From a2f87682f9526af099f40f8e2b31d2bae140e861 Mon Sep 17 00:00:00 2001 From: Jan Kolena Date: Tue, 16 Nov 2021 13:38:07 +0100 Subject: [PATCH 1/3] Big-Bang. Everything refactored and cleaned. API mostly the same. Some changes: - Reworked PoisonedMessageHandler. - Splitting tests into multiple. - Readmes update - Composition over inheritance: ConsumerBase is now a helper class, instead of a trait - F-logging, declarations in F, implicit propagation of CorrelationId - Removed extras-cactus - Remove old migration guides - Extract Channel-related stuff from ConsumerBase to ConsumerChannelOps Please see the migration guide. --- .github/workflows/ci.yml | 4 +- .github/workflows/release.yml | 4 +- Migration-5-6.md | 33 - Migration-6-6_1.md | 9 - Migration-6_1-7.md | 15 - Migration-8-9.md | 34 + README.md | 384 +++++++--- .../rabbitmq/api/CorrelationIdStrategy.scala | 58 ++ .../clients/rabbitmq/api/DeliveryResult.scala | 9 +- .../rabbitmq/api/RabbitMQConsumer.scala | 2 - .../rabbitmq/api/RabbitMQProducer.scala | 5 +- .../rabbitmq/api/RabbitMQPullConsumer.scala | 2 - .../api/RabbitMQStreamingConsumer.scala | 11 +- build.gradle | 54 +- core/build.gradle | 10 +- .../clients/rabbitmq/ChannelListener.scala | 43 +- .../rabbitmq/ConfirmedDeliveryResult.scala | 24 + .../clients/rabbitmq/ConnectionListener.scala | 39 +- .../avast/clients/rabbitmq/ConsumerBase.scala | 180 +++-- .../clients/rabbitmq/ConsumerChannelOps.scala | 143 ++++ .../clients/rabbitmq/ConsumerListener.scala | 22 +- .../rabbitmq/ConsumerWithCallbackBase.scala | 172 ++--- .../DefaultRabbitMQClientFactory.scala | 697 ++++++++---------- .../rabbitmq/DefaultRabbitMQConnection.scala | 162 ++-- .../rabbitmq/DefaultRabbitMQConsumer.scala | 73 +- .../rabbitmq/DefaultRabbitMQProducer.scala | 104 +-- .../DefaultRabbitMQPullConsumer.scala | 98 +-- .../DefaultRabbitMQStreamingConsumer.scala | 386 +++++----- .../clients/rabbitmq/DeliveryContext.scala | 48 ++ .../rabbitmq/MultiFormatConsumer.scala | 61 +- .../rabbitmq/PoisonedMessageHandler.scala | 150 ++++ .../clients/rabbitmq/RabbitMQConnection.scala | 256 ++++--- .../clients/rabbitmq/RepublishStrategy.scala | 95 +-- .../clients/rabbitmq/configuration.scala | 31 +- .../logging/ImplicitContextLogger.scala | 48 ++ .../rabbitmq/logging/LoggingContext.scala | 5 + .../clients/rabbitmq/logging/package.scala | 14 + .../com/avast/clients/rabbitmq/rabbitmq.scala | 16 +- core/src/test/resources/application.conf | 167 ++++- core/src/test/resources/logback.xml | 2 +- .../{LiveTest.scala => BasicLiveTest.scala} | 438 ++--------- .../DefaultRabbitMQConsumerTest.scala | 330 +++++---- .../DefaultRabbitMQProducerTest.scala | 133 +++- .../DefaultRabbitMQPullConsumerTest.scala | 173 ++--- .../rabbitmq/MultiFormatConsumerTest.scala | 72 +- .../PoisonedMessageHandlerLiveTest.scala | 339 +++++++++ .../rabbitmq/PoisonedMessageHandlerTest.scala | 154 ++++ .../rabbitmq/RepublishStrategyTest.scala | 69 +- .../rabbitmq/StreamingConsumerLiveTest.scala | 479 ++++++++++++ .../com/avast/clients/rabbitmq/TestBase.scala | 19 +- .../rabbitmq/TestDeliveryContext.scala | 21 + .../avast/clients/rabbitmq/TestHelper.scala | 9 - .../avast/clients/rabbitmq/TestMonitor.scala | 39 + extras-cactus/README.md | 18 - extras-cactus/build.gradle | 10 - .../extras/format/GpbDeliveryConverter.scala | 54 -- .../rabbitmq/extras/format/GpbParser.scala | 21 - .../extras/format/GpbProductConverter.scala | 60 -- extras-protobuf/build.gradle | 2 +- .../extras/format/ProtobufConsumer.scala | 9 +- .../extras/format/ProtobufConsumerTest.scala | 21 +- extras-scalapb/build.gradle | 1 + .../extras/format/ScalaPBConsumer.scala | 9 +- .../extras/format/ScalaPBConsumerTest.scala | 20 +- extras/README.md | 63 -- extras/build.gradle | 2 + .../clients/rabbitmq/extras/HealthCheck.scala | 64 +- .../extras/PoisonedMessageHandler.scala | 87 --- .../StreamingPoisonedMessageHandler.scala | 60 -- extras/src/test/resources/application.conf | 258 +++++++ extras/src/test/resources/logback.xml | 18 + .../DefaultPoisonedMessageHandlerTest.scala | 70 -- .../StreamingPoisonedMessageHandlerTest.scala | 88 --- .../clients/rabbitmq/extras/TestBase.scala | 36 +- .../clients/rabbitmq/extras/TestHelper.scala | 120 +++ .../clients/rabbitmq/extras/TestMonitor.scala | 39 + gradle/wrapper/gradle-wrapper.properties | 2 +- pureconfig/README.md | 4 +- pureconfig/build.gradle | 4 +- .../pureconfig/ConfigRabbitMQConnection.scala | 47 +- .../pureconfig/PureconfigImplicits.scala | 21 + .../rabbitmq/pureconfig/pureconfig.scala | 23 +- settings.gradle | 1 - 83 files changed, 4360 insertions(+), 2817 deletions(-) delete mode 100644 Migration-5-6.md delete mode 100644 Migration-6-6_1.md delete mode 100644 Migration-6_1-7.md create mode 100644 Migration-8-9.md create mode 100644 api/src/main/scala/com/avast/clients/rabbitmq/api/CorrelationIdStrategy.scala create mode 100644 core/src/main/scala/com/avast/clients/rabbitmq/ConfirmedDeliveryResult.scala create mode 100644 core/src/main/scala/com/avast/clients/rabbitmq/ConsumerChannelOps.scala create mode 100644 core/src/main/scala/com/avast/clients/rabbitmq/DeliveryContext.scala create mode 100644 core/src/main/scala/com/avast/clients/rabbitmq/PoisonedMessageHandler.scala create mode 100644 core/src/main/scala/com/avast/clients/rabbitmq/logging/ImplicitContextLogger.scala create mode 100644 core/src/main/scala/com/avast/clients/rabbitmq/logging/LoggingContext.scala create mode 100644 core/src/main/scala/com/avast/clients/rabbitmq/logging/package.scala rename core/src/test/scala/com/avast/clients/rabbitmq/{LiveTest.scala => BasicLiveTest.scala} (51%) create mode 100644 core/src/test/scala/com/avast/clients/rabbitmq/PoisonedMessageHandlerLiveTest.scala create mode 100644 core/src/test/scala/com/avast/clients/rabbitmq/PoisonedMessageHandlerTest.scala create mode 100644 core/src/test/scala/com/avast/clients/rabbitmq/StreamingConsumerLiveTest.scala create mode 100644 core/src/test/scala/com/avast/clients/rabbitmq/TestDeliveryContext.scala create mode 100644 core/src/test/scala/com/avast/clients/rabbitmq/TestMonitor.scala delete mode 100644 extras-cactus/README.md delete mode 100644 extras-cactus/build.gradle delete mode 100644 extras-cactus/src/main/scala/com/avast/clients/rabbitmq/extras/format/GpbDeliveryConverter.scala delete mode 100644 extras-cactus/src/main/scala/com/avast/clients/rabbitmq/extras/format/GpbParser.scala delete mode 100644 extras-cactus/src/main/scala/com/avast/clients/rabbitmq/extras/format/GpbProductConverter.scala delete mode 100644 extras/src/main/scala/com/avast/clients/rabbitmq/extras/PoisonedMessageHandler.scala delete mode 100644 extras/src/main/scala/com/avast/clients/rabbitmq/extras/StreamingPoisonedMessageHandler.scala create mode 100644 extras/src/test/resources/application.conf create mode 100644 extras/src/test/resources/logback.xml delete mode 100644 extras/src/test/scala/com/avast/clients/rabbitmq/extras/DefaultPoisonedMessageHandlerTest.scala delete mode 100644 extras/src/test/scala/com/avast/clients/rabbitmq/extras/StreamingPoisonedMessageHandlerTest.scala create mode 100644 extras/src/test/scala/com/avast/clients/rabbitmq/extras/TestHelper.scala create mode 100644 extras/src/test/scala/com/avast/clients/rabbitmq/extras/TestMonitor.scala diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1db8f74c..765b00e3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -7,9 +7,9 @@ jobs: strategy: fail-fast: false matrix: - scala-version: [2.12.13, 2.13.5] + scala-version: [ 2.12.13, 2.13.8 ] steps: - - uses: actions/checkout@v2.4.0 + - uses: actions/checkout@v3.0.0 with: fetch-depth: 100 - name: Fetch tags diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 2a6472f0..8a47b7a1 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -7,7 +7,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - scala-version: [2.12.13, 2.13.5] + scala-version: [ 2.12.13, 2.13.8 ] steps: - uses: actions/checkout@v2.4.0 with: @@ -24,4 +24,4 @@ jobs: SIGNING_KEY: ${{ secrets.SIGNING_KEY }} SIGNING_PASSWORD: ${{ secrets.SIGNING_PASSWORD }} ORG_GRADLE_PROJECT_sonatypeUsername: ${{ secrets.SONATYPE_USERNAME }} - ORG_GRADLE_PROJECT_sonatypePassword : ${{ secrets.SONATYPE_PASSWORD }} + ORG_GRADLE_PROJECT_sonatypePassword: ${{ secrets.SONATYPE_PASSWORD }} diff --git a/Migration-5-6.md b/Migration-5-6.md deleted file mode 100644 index b03871d8..00000000 --- a/Migration-5-6.md +++ /dev/null @@ -1,33 +0,0 @@ -## Migration from 5.x to 6.0.x - -Common changes: -1. GroupId of all artifacts has changed from `com.avast.clients` to `com.avast.clients.rabbitmq`. -1. The library doesn't use [Lyra library](https://github.com/jhalterman/lyra) anymore. Removing the Lyra resulted into change of listeners. -1. The Kluzo usage is configurable per connection (not for every consumer and producer separately) - -Changes in Scala API: - -1. `RabbitMQFactory` was renamed to `RabbitMQConnection`. It's factory method returns `DefaultRabbitMQConnection` and requires an -`ExecutorService` to be passed (was optional before). -1. The whole API is _finally tagless_ - all methods now return `F[_]`. See [related section](README.md#scala-usage) in docs. -1. The API now uses type-conversions - provide type and related converter when creating producer/consumer. -See [related section](README.md#providing-converters-for-producer/consumer) in docs. -1. The `Delivery` is now sealed trait - there are `Delivery.Ok[A]` (e.g. `Delivery[Bytes]`, depends on type-conversion) and `Delivery.MalformedContent`. -After getting the `Delivery[A]` you should pattern-match it. -1. The API of 6.0.x now requires an implicit `monix.execution.Scheduler` instead of `ExecutionContext` -1. Methods like `RabbitMQConnection.declareQueue` now return `F[Unit]` (was `Try[Done]` before). -1. Possibility to pass manually created configurations (`ProducerConfig` etc.) is now gone. The only option is to use TypeSafe config. -1. There is no `RabbitMQConsumer.bindTo` method anymore. Use [additional declarations](README.md#additional-declarations-and-bindings) for such thing. -1. There are new methods in [`RabbitMQConnection`](core/src/main/scala/com/avast/clients/rabbitmq/RabbitMQConnection.scala): `newChannel` and `withChannel`. -1. [`RabbitMQPullConsumer`](README.md#pull-consumer) was added - -Changes in Java API: - -1. `RabbitMQFactory` was renamed to `RabbitMQJavaConnection` -1. `RabbitMQJavaConnection.newBuilder` requires an `ExecutorService` to be passed (was optional before) -1. Possibility to pass manually created configurations (`ProducerConfig` etc.) is now gone. The only option is to use TypeSafe config. -1. Methods like `RabbitMQJavaConnection.declareQueue` now return `CompletableFuture[Void]` (was `void` before) - ***it's not blocking anymore!*** -1. Method `RabbitMQProducer.send` now returns `CompletableFuture[Void]` (was `void` before) - ***it's not blocking anymore!*** -1. `RabbitMQConsumer` and `RabbitMQProducer` (`api` module) are now traits and have their `Default*` counterparts in `core` module -1. There is no `RabbitMQConsumer.bindTo` method anymore. Use [additional declarations](README.md#additional-declarations-and-bindings) for such thing. -1. `RabbitMQPullConsumer` was added diff --git a/Migration-6-6_1.md b/Migration-6-6_1.md deleted file mode 100644 index d2d5d58c..00000000 --- a/Migration-6-6_1.md +++ /dev/null @@ -1,9 +0,0 @@ -## Migration from 6.0.x to 6.1.x - -Removed Kluzo functionality. - -Changes in Scala API: - -1. API doesn't require `FromTask`/`ToTask` instance; requires `Effect[F]` instead - not possible to use `Future` directly, see -[Using own non-Efect F](README.md#using-own-non-effect-f) -1. The API of 6.1.x now requires an implicit `ExecutionContext` diff --git a/Migration-6_1-7.md b/Migration-6_1-7.md deleted file mode 100644 index 9ddb2fd8..00000000 --- a/Migration-6_1-7.md +++ /dev/null @@ -1,15 +0,0 @@ -## Migration from 6.1.x to 7.0.x - -Common changes: - -1. Additional declarations: `bindQueue` now has more consistent API. The only change is `bindArguments` were renamed to `arguments`. -1. You are able to specify network recovery strategy. -1. You are able to specify timeout log level. - -Changes in Scala API: - -1. API is fully effectful; all methods return `F[_]` instance (including initialization and closing) - -Changes in Java API.: - -1. You can specify `initTimeout` in the connection builder. \ No newline at end of file diff --git a/Migration-8-9.md b/Migration-8-9.md new file mode 100644 index 00000000..fafe837d --- /dev/null +++ b/Migration-8-9.md @@ -0,0 +1,34 @@ +## Migration from 8.x.x to 9.0.x + +In general, there are MANY changes under the hood, however, only a few changes in the API. The most relevant changes are: + +1. `extras-cactus` module was removed. +2. `StreamedDelivery` has now only new `handleWith` method, forcing the user to correct behavior and enabling much more effective tasks + processing and canceling. +3. The client now explicitly supports CorrelationId handling. + Producer has `implicit cidStrategy: CorrelationIdStrategy` parameter (with `CorrelationIdStrategy.FromPropertiesOrRandomNew` as a + default) which enables you to configure how the CorrelationId should be derived/generated. You can implement your own _strategy_ to suit + your needs. + You can also easily get the CorrelationId from message properties on the consumer side. The CID (on consumer side) is taken from both + AMQP properties and `X-Correlation-Id` header (where the property has precedence and the header is just a fallback). It's propagated to + logs in logging context (not in message directly). +4. [`PoisonedMessageHandler`](README.md#poisoned-message-handler) is now a part of the core. It's not wrapping your _action_ anymore, + however, there exists only a few options, describable in configuration. +5. `DeliveryResult.Republish` now has `countAsPoisoned` parameter (defaults to `true`) determining whether the PMH (if configured) should + count the attempt or not. +6. Logging in general was somewhat _tuned_. +7. The client uses Cats-Effect version of `Monitor` from [Metrics library](https://github.com/avast/metrics). + +As there are only very minor changes in the API between 8 and 9 versions, version 8 won't be supported any more (unless necessary). + +Some additional notes, mainly for library developers: + +1. The library is still cross-compiled to Scala 2.12 and 2.13. +2. All consumers are now free of boilerplate code - new `ConsumerBase` class is a dependency of all consumers. +3. `DefaultRabbitMQClientFactory` was redesigned and it's a class now. +4. "Live" tests were split to `BasicLiveTest`, `StreamingConsumerLiveTest` and `PoisonedMessageHandlerLiveTest`. + +--- + +The client now (9.0.0) uses circe 0.14.1 (only [extras-circe module](extras-circe)), pureconfig 0.17.1 ( +only [pureconfig module](pureconfig)), still cats 2 and cats-effect 2. diff --git a/README.md b/README.md index 4d7851aa..a431e582 100644 --- a/README.md +++ b/README.md @@ -1,15 +1,17 @@ # RabbitMQ client [![CI](https://github.com/avast/rabbitmq-scala-client/actions/workflows/ci.yml/badge.svg)](https://github.com/avast/rabbitmq-scala-client/actions/workflows/ci.yml) [![Version](https://badgen.net/maven/v/maven-central/com.avast.clients.rabbitmq/rabbitmq-client-api_2.13)](https://repo1.maven.org/maven2/com/avast/clients/rabbitmq/rabbitmq-client-api_2.13/) -This client is Scala wrapper over the standard [RabbitMQ Java client](https://www.rabbitmq.com/java-client.html). Goal of this library is -to simplify basic use cases - to provide FP-oriented API for programmers and to shadow the programmer from an underlying client. +This client is Scala wrapper over the standard [RabbitMQ Java client](https://www.rabbitmq.com/java-client.html). Goal of this library is to +simplify basic use cases - to provide FP-oriented API for programmers and to shadow the programmer from an underlying client. The library is configurable both by case classes (`core` module) and by HOCON/Lightbend `Config` (`pureconfig` module). The library uses concept of _connection_ and derived _producers_ and _consumers_. Note that the _connection_ shadows you from the underlying -concept of AMQP connection and derived channels - it handles channels automatically according to best practises. Each _producer_ and _consumer_ +concept of AMQP connection and derived channels - it handles channels automatically according to best practises. Each _producer_ and _ +consumer_ can be closed separately while closing _connection_ causes closing all derived channels and all _producers_ and _consumers_. ## Dependency + SBT: `"com.avast.clients.rabbitmq" %% "rabbitmq-client-core" % "x.x.x"` @@ -21,18 +23,20 @@ Gradle: 1. [api](api) - Contains only basic traits for consumer etc. 1. [core](core) - Main module. The client, configurable by case classes. 1. [pureconfig](pureconfig) - Module for configuration from [`Config`](https://github.com/lightbend/config). -1. [extras](extras/README.md) - Module with some extra feature. -1. [extras-circe](extras-circe/README.md) Allows to publish and consume JSON events, using [the circe library](https://circe.github.io/circe/). -1. [extras-cactus](extras-cactus/README.md) Allows to publish and consume Protobuf events, dusing [the cactus library](https://github.com/avast/cactus) that provides mapping between Java generated classes and Scala classes. -1. [extras-protobuf](extras-protobuf/README.md) Allows to publish and consume events defined as [Google Protocol Buffers](https://developers.google.com/protocol-buffers/) messages (as both JSON and Protobuf), represented as standard Java classes. -1. [extras-scalapb](extras-scalapb/README.md) Allows to publish and consume events defined as [Google Protocol Buffers](https://developers.google.com/protocol-buffers/) messages (as both JSON and Protobuf), generated to Scala using [ScalaPB](https://scalapb.github.io/). +1. [extras](extras/README.md) - Module with some extra features. +1. [extras-circe](extras-circe/README.md) Allows to publish and consume JSON events, + using [the circe library](https://circe.github.io/circe/). +1. [extras-protobuf](extras-protobuf/README.md) Allows to publish and consume events defined + as [Google Protocol Buffers](https://developers.google.com/protocol-buffers/) messages (as both JSON and Protobuf), represented as + standard Java classes. +1. [extras-scalapb](extras-scalapb/README.md) Allows to publish and consume events defined + as [Google Protocol Buffers](https://developers.google.com/protocol-buffers/) messages (as both JSON and Protobuf), generated to Scala + using [ScalaPB](https://scalapb.github.io/). ## Migration -There is a [migration guide](Migration-5-6.md) between versions 5 and 6.0.x. -There is a [migration guide](Migration-6-6_1.md) between versions 6.0.x and 6.1.x. -There is a [migration guide](Migration-6_1-7.md) between versions 6.1.x and 7.0.x. -There is a [migration guide](Migration-6_1-8.md) between versions 6.1.x and 8.0.x. +There exists a [migration guide](Migration-6_1-8.md) between versions 6.1.x and 8.0.x. +There exists a [migration guide](Migration-8-9.md) between versions 8.x and 9.0.x. Please note that configuration from Typesafe/Lightbend config has been moved to [pureconfig module](pureconfig) since 8.x. @@ -43,10 +47,15 @@ The API is _finally tagless_ (read more e.g. [here](https://www.beyondthelines.n [manage resources in your app](https://typelevel.org/cats-effect/tutorial/tutorial.html#acquiring-and-releasing-resources). In addition, there is a support for [streaming](#streaming-support) with [`fs2.Stream`](https://fs2.io/). -The API uses conversions for both consumer and producer, that means you don't have to work directly with `Bytes` (however you -still can if you want to) and you touch only your business model class which is then (de)serialized using provided converter. +The API uses conversions for both consumer and producer, that means you don't have to work directly with `Bytes` (however you still can if +you want to) and you touch only your business model class which is then (de)serialized using provided converter. + +Monitoring of the library is done via [Avast Metrics library](https://github.com/avast/metrics#scala-effect-api), its Scala Effect API in +particular. If you don't want the client to be monitored, feel free to pass `Monitor.noOp[F]` instead. + +The library uses two types of executors - one is for blocking (IO) operations and the second for callbacks. You _have to_ provide both of +them: -The library uses two types of executors - one is for blocking (IO) operations and the second for callbacks. You _have to_ provide both of them: 1. Blocking executor as `ExecutorService` 1. Callback executor as `scala.concurrent.ExecutionContext` @@ -62,7 +71,7 @@ import cats.effect.Resource import com.avast.bytes.Bytes import com.avast.clients.rabbitmq._ import com.avast.clients.rabbitmq.api._ -import com.avast.metrics.scalaapi.Monitor +import com.avast.metrics.scalaeffectapi.Monitor import javax.net.ssl.SSLContext import monix.eval._ import monix.execution.Scheduler @@ -75,81 +84,85 @@ val blockingExecutor: ExecutorService = ??? val sslContext = SSLContext.getDefault val connectionConfig = RabbitMQConnectionConfig( - hosts = List("localhost:5432"), - name = "MyProductionConnection", - virtualHost = "/", - credentials = CredentialsConfig(username = "vogon", password = "jeltz") - ) + hosts = List("localhost:5432"), + name = "MyProductionConnection", + virtualHost = "/", + credentials = CredentialsConfig(username = "vogon", password = "jeltz") +) val consumerConfig = ConsumerConfig( - name = "MyConsumer", - queueName = "QueueWithMyEvents", - bindings = List( - AutoBindQueueConfig(exchange = AutoBindExchangeConfig(name = "OtherAppExchange"), routingKeys = List("TheEvent")) - ) + name = "MyConsumer", + queueName = "QueueWithMyEvents", + bindings = List( + AutoBindQueueConfig(exchange = AutoBindExchangeConfig(name = "OtherAppExchange"), routingKeys = List("TheEvent")) ) +) val producerConfig = ProducerConfig( - name = "MyProducer", - exchange = "MyGreatApp" - ) + name = "MyProducer", + exchange = "MyGreatApp" +) // see https://typelevel.org/cats-effect/tutorial/tutorial.html#acquiring-and-releasing-resources val rabbitMQProducer: Resource[Task, RabbitMQProducer[Task, Bytes]] = { - for { - connection <- RabbitMQConnection.make[Task](connectionConfig, blockingExecutor, Some(sslContext)) - /* + for { + connection <- RabbitMQConnection.make[Task](connectionConfig, blockingExecutor, Some(sslContext)) + /* Here you have created the connection; it's shared for all producers/consumers amongst one RabbitMQ server - they will share a single TCP connection but have separated channels. If you expect very high load, you can use separate connections for each producer/consumer, however it's usually not needed. - */ + */ - consumer <- connection.newConsumer[Bytes](consumerConfig, monitor) { - case delivery: Delivery.Ok[Bytes] => - Task.now(DeliveryResult.Ack) + consumer <- connection.newConsumer[Bytes](consumerConfig, monitor) { + case delivery: Delivery.Ok[Bytes] => + Task.now(DeliveryResult.Ack) - case _: Delivery.MalformedContent => - Task.now(DeliveryResult.Reject) - } - - producer <- connection.newProducer[Bytes](producerConfig, monitor) - } yield { - producer + case _: Delivery.MalformedContent => + Task.now(DeliveryResult.Reject) } + + producer <- connection.newProducer[Bytes](producerConfig, monitor) + } yield { + producer } +} ``` -#### Streaming support +### Streaming support + +_Note: this has nothing to do with the RabbitMQ Streams. This client is about providing `fs2.Stream` API instead of the callback-based one +but works on top of a normal queue._ It seems quite natural to process RabbitMQ queue with a streaming app. -[`StreamingRabbitMQConsumer`](core/src/main/scala/com/avast/clients/rabbitmq/StreamingRabbitMQConsumer.scala) provides you an +[`StreamingRabbitMQConsumer`](api/src/main/scala/com/avast/clients/rabbitmq/api/RabbitMQStreamingConsumer.scala) provides you an [`fs2.Stream`](https://fs2.io/) through which you can easily process incoming messages in a streaming way. -Notice: Using this functionality requires you to know some basics of [FS2](https://fs2.io/guide.html#overview) library. Please see it's official -guide if you're not familiar with it first. +Notice: Using this functionality requires you to know some basics of [FS2](https://fs2.io/guide.html#overview) library. Please see it's +official guide if you're not familiar with it first. ```scala // skipping imports and common things, they are the same as in general example above val consumerConfig = StreamingConsumerConfig( // notice: StreamingConsumerConfig vs. ConsumerConfig - name = "MyConsumer", - queueName = "QueueWithMyEvents", - bindings = List( - AutoBindQueueConfig(exchange = AutoBindExchangeConfig(name = "OtherAppExchange"), routingKeys = List("TheEvent")) - ) + name = "MyConsumer", + queueName = "QueueWithMyEvents", + bindings = List( + AutoBindQueueConfig(exchange = AutoBindExchangeConfig(name = "OtherAppExchange"), routingKeys = List("TheEvent")) ) +) -val processMyStream: fs2.Pipe[Task, StreamedDelivery[Task, Bytes], StreamedResult] = { in => - in.evalMap(delivery => delivery.handle(DeliveryResult.Ack)) // TODO you probably want to do some real stuff here - } +val processMyStream: fs2.Pipe[Task, StreamedDelivery[Task, Bytes], Unit] = { in => + in.evalMap(_.handleWith(d => Task.now(DeliveryResult.Ack))) // TODO you probably want to do some real stuff here +} -val deliveryStream: Resource[Task, fs2.Stream[Task, StreamedResult]] = for { +val deliveryStream: Resource[Task, fs2.Stream[Task, Unit]] = { + for { connection <- RabbitMQConnection.make[Task](connectionConfig, blockingExecutor, Some(sslContext)) streamingConsumer <- connection.newStreamingConsumer[Bytes](consumerConfig, monitor) } yield { val stream: fs2.Stream[Task, StreamedResult] = streamingConsumer.deliveryStream.through(processMyStream) - + // create resilient (self-restarting) stream; see more information below lazy val resilientStream: fs2.Stream[Task, StreamedResult] = stream.handleErrorWith { e => // TODO log the error - something is going wrong! @@ -158,19 +171,23 @@ val deliveryStream: Resource[Task, fs2.Stream[Task, StreamedResult]] = for { resilientStream } +} ``` -##### Resilient stream +#### Resilient stream -While you should never ever let the stream fail (handle all your possible errors; see [Error handling](https://fs2.io/guide.html#error-handling) -section in official docs how the stream can be failed), it's important you're able to recover the stream when it accidentally happens. -You can do that by simply _requesting_ a new stream from the client: +While you should never ever let the stream fail (handle all your possible errors; +see [Error handling](https://fs2.io/guide.html#error-handling) +section in official docs how the stream can be failed), it's important you're able to recover the stream when it accidentally happens. You +can do that by simply _requesting_ a new stream from the client: ```scala val stream = streamingConsumer.deliveryStream // get stream from client .through(processMyStream) // "run" the stream through your processing logic -lazy val resilientStream: fs2.Stream[Task, StreamedResult] = stream.handleErrorWith { err => +val failureCounter: Ref[Task, Int] = ??? // TODO: initialize to max recover count! + +lazy val resilientStream: fs2.Stream[Task, Unit] = stream.handleErrorWith { err => // handle the error in stream: recover by calling itself // TODO don't forget to add some logging/metrics here! fs2.Stream.eval(failureCounter.modify(a => (a - 1, a - 1))).flatMap { attemptsRest => @@ -182,6 +199,7 @@ resilientStream ``` or use a prepared extension method: + ```scala import com.avast.clients.rabbitmq._ @@ -195,22 +213,105 @@ streamingConsumer.deliveryStream // get stream from client } ``` -Please refer to the [official guide](https://fs2.io/guide.html#overview) for understanding more deeply how the recovery of `fs2.Stream` works. +Please refer to the [official guide](https://fs2.io/guide.html#overview) for understanding more deeply how the recovery of `fs2.Stream` +works. + +### Producer/consumer listeners + +While everyone wants the RabbitMQ to "just work", in reality, it may not be that easy. Servers are restarted, deliveries are processed for +too long etc. For such occasions, there exist _listeners_ in this client - _connection_, _channel_ and _consumer_ kinds. +The listeners are passed to the connection factory method. You are not required to implement/provide them, however, it's strongly +recommended doing so. The default implementations are only logging the events while you may want to react differently - increase some +counter, mark the app as _unhealthy_ etc. (as some events are not easy to recover from). -#### Providing converters for producer/consumer +### Providing converters for producer/consumer Both the producer and consumer require type argument when creating from _connection_: + 1. `connection.newConsumer[MyClass]` which requires implicit `DeliveryConverter[MyClass]` 1. `connection.newProducer[MyClass]` which requires implicit `ProductConverter[MyClass]` There are multiple options where to get the _converter_ (it's the same case for `DeliveryConverter` as for `ProductConverter`): + 1. Implement your own implicit _converter_ for the type -1. Modules [extras-circe](extras-circe/README.md) and [extras-cactus](extras-cactus/README.md) provide support for JSON and GPB conversion. +1. Modules [extras-circe](extras-circe/README.md) and [extras-scalapb](extras-scalapb/README.md) provide support for JSON and GPB + conversion. 1. Use `identity` converter by specifying `Bytes` type argument. No further action needed in that case. -#### Caveats +### Poisoned message handler + +It's quite often use-case we want to republish failed message but want to avoid the message to be republishing forever. You can use +the `PoisonedMessageHandler` (PMH) to solve this issue. It will count no. of attempts and won't let the message be republished again and +again (above the limit you set). +_Note: it works ONLY for `Republish` and not for `Retry`!_ + +The `PoisonedMessageHandler` is built into the both "normal" and streaming consumers. After the execution of the poisoned-message action, +the delivery is REJECTed (so it's not in the original queue anymore). + +All types (except no-op) of the poisoned message handler has `maxAttempts` configuration option which determines how many times the message +can be delivered to the consumer. What it means in practice is that if `maxAttempts == 3` and you choose to republish it for the third time, +the PMH takes its action - as the next delivery of the message would be already fourth, which is over the configured limit. + +Internally, the attempts counting is done via incrementing (or adding, the first time) the `X-Republish-Count` header in the message. Feel +free to use its value for your own logging or whatever. You can even set it to your own value - just bear in mind that you might affect the +PMH's functionality (of course, that might be your intention). + +It can happen that you know that you have PMH configured, and you need to republish the message and "not count the attempt" (the typical +scenario is that the message processing has failed and it's not fault of your app but of some 3rd party system which you count on to be +recovered later). There exists the new `countAsPoisoned` parameter now (defaults to `true`) determining whether the PMH (if configured) +should count the attempt or not. This is an easy and clean way how to influence the PMH behavior. + +#### Dead-queue poisoned message handler + +The most common and useful type, which will take all "poisoned" messages and publish them to a queue of your choice. +In its configuration, you basically configure a _producer_ which is used to send the message into the dead-queue. While the producer will +create its exchange it publishes to, *you are responsible for creating the queue and binding* to the producer's exchange. You can use the +[additional declaration/bindings functionality](#additional-declarations-and-bindings) of the client. If you forget to do so, your messages +will be lost completely. + +#### Logging poisoned message handler + +As the name suggests, this PMH only logs the poisoned message before it's thrown away (and lost forever). + +#### No-op poisoned message handler + +This PMH does nothing. + +#### Example HOCON configuration + +Please mind that the PMH is *only* responsible for publishing the poisoned messages, not for declaring/binding the queue where they'll end! + +```hocon +myConsumer { + name = "MyVeryImportantConsumer" + + // ... + // the usual stuff for consumer - timeout, bindings, ... + // ... + + poisonedMessageHandling { + type = "deadQueue" // deadqueue, logging, noop (default noop) + + maxAttempts = 2 // <-- required for deadqueue and logging types + + // required only for deadQueue type: + deadQueueProducer { + routingKey = "dead" + name = "DeadQueueProducer" + exchange = "EXCHANGE3" + declare { + enabled = true + type = "direct" + } + } + } +} +``` + +### Caveats + 1. `null` instead of converter instance - It may happen you run in this problem: + It may happen you run in this problem: ```scala scala> import io.circe.generic.auto._ import io.circe.generic.auto._ @@ -233,56 +334,66 @@ There are multiple options where to get the _converter_ (it's the same case for scala> implicit val deliveryConverter = JsonDeliveryConverter.derive[Event]() deliveryConverter: com.avast.clients.rabbitmq.extras.format.JsonDeliveryConverter[Event] = com.avast.clients.rabbitmq.extras.format.JsonDeliveryConverter$$anon$1@4b024fb2 ``` - Notice the results of last three calls **differ** even though they are supposed to be the same (non-null respectively)! A very similar issue - is discussed on the [StackOverflow](https://github.com/circe/circe/issues/380) and so is similar the solution: + Notice the results of last three calls **differ** even though they are supposed to be the same (non-null respectively)! A very similar + issue is discussed on the [StackOverflow](https://github.com/circe/circe/issues/380) and so is similar the solution: 1. Remove explicit type completely (not recommended) 1. Make the explicit type more general (`DeliveryConverter` instead of `JsonDeliveryConverter` in this case) ## Notes ### Extras + There is a module with some optional functionality called [extras](extras/README.md). ### Network recovery + The library offers configurable network recovery, with the functionality itself backed by RabbitMQ client's one (ready in 5+). You can either disable the recovery or select (and configure one of following types): + 1. Linear - The client will wait `initialDelay` for first recovery attempt and if it fails, will try it again each `period` until it succeeds. + The client will wait `initialDelay` for first recovery attempt and if it fails, will try it again each `period` until it succeeds. 1. Exponential - The client will wait `initialDelay` for first recovery attempt and if it fails, will try it again until it succeeds and prolong the - delay between each two attempts exponentially (based on `period`, `factor`, attempt number), up to `maxLength`. - Example: - For `initialDelay = 3s, period = 2s, factor = 2.0, maxLength = 1 minute`, produced delays will be 3, 2, 4, 8, 16, 32, 60 seconds - (and it will never go higher). - -Do not set too short custom recovery delay intervals (less than 2 seconds) as it is not recommended by the official [RabbitMQ API Guide](https://www.rabbitmq.com/api-guide.html#automatic-recovery-limitations). + The client will wait `initialDelay` for first recovery attempt and if it fails, will try it again until it succeeds and prolong the delay + between each two attempts exponentially (based on `period`, `factor`, attempt number), up to `maxLength`. + Example: + For `initialDelay = 3s, period = 2s, factor = 2.0, maxLength = 1 minute`, produced delays will be 3, 2, 4, 8, 16, 32, 60 seconds + (and it will never go higher). + +Do not set too short custom recovery delay intervals (less than 2 seconds) as it is not recommended by the +official [RabbitMQ API Guide](https://www.rabbitmq.com/api-guide.html#automatic-recovery-limitations). ### DeliveryResult -The consumers `readAction` returns `Future` of [`DeliveryResult`](api/src/main/scala/com/avast/clients/rabbitmq/api/DeliveryResult.scala). The `DeliveryResult` has 4 possible values + +The consumers `readAction` returns `Future` of [`DeliveryResult`](api/src/main/scala/com/avast/clients/rabbitmq/api/DeliveryResult.scala). +The `DeliveryResult` has 4 possible values (descriptions of usual use-cases): + 1. Ack - the message was processed; it will be removed from the queue 1. Reject - the message is corrupted or for some other reason we don't want to see it again; it will be removed from the queue 1. Retry - the message couldn't be processed at this moment (unreachable 3rd party services?); it will be requeued (inserted on the top of -the queue) + the queue) 1. Republish - the message may be corrupted but we're not sure; it will be re-published to the bottom of the queue (as a new message and the -original one will be removed). It's usually wise to prevent an infinite republishing of the message - see [Poisoned message handler](extras/README.md#poisoned-message-handler). + original one will be removed). It's usually wise to prevent an infinite republishing of the message - + see [Poisoned message handler](extras/README.md#poisoned-message-handler). #### Difference between _Retry_ and _Republish_ -When using _Retry_ the message can effectively cause starvation of other messages in the queue -until the message itself can be processed; on the other hand _Republish_ inserts the message to the original queue as a new message and it -lets the consumer handle other messages (if they can be processed). + +When using _Retry_ the message can effectively cause starvation of other messages in the queue until the message itself can be processed; on +the other hand _Republish_ inserts the message to the original queue as a new message and it lets the consumer handle other messages (if +they can be processed). #### Republishing -Republishing is solved at application level with publishing a new message (with original content, headers, messageId, etc.) to the original queue and -acknowledging the old one. This can be done via: -1. Default exchange - Every virtual host in RabbitMQ has default exchange which has implicit bindings to all queues and can be easily used for publishing to - basically any queue. This is very handy for functionality such as the republishing however it's also very dangerous and you don't have - permissions to use it. In case you do have them, use this option instead of the custom exchange. - This the default option (in other words, the client will use the default exchange in case you don't tell it not to do so). -1. Custom exchange - In case you're unable to use the default exchange, you have to create your own exchange to replace the functionality. The RabbitMQ client - will create it for you together with all necessary bindings and all you have to do is to just configure a name of the exchange, e.g. + +Republishing is solved at application level with publishing a new message (with original content, headers, messageId, etc.) to the original +queue and acknowledging the old one. This can be done via: + +1. Default exchange Every virtual host in RabbitMQ has default exchange which has implicit bindings to all queues and can be easily used for + publishing to basically any queue. This is very handy for functionality such as the republishing however it's also very dangerous and you + don't have permissions to use it. In case you do have them, use this option instead of the custom exchange. + This the default option (in other words, the client will use the default exchange in case you don't tell it not to do so). +1. Custom exchange In case you're unable to use the default exchange, you have to create your own exchange to replace the functionality. The + RabbitMQ client will create it for you together with all necessary bindings and all you have to do is to just configure a name of the + exchange, e.g. ```hocon rabbitConnection { hosts = ["localhost:5672"] @@ -305,48 +416,56 @@ acknowledging the old one. This can be done via: The exchange is created as _direct_, _durable_ and without _auto-delete_ flag. ### Bind/declare arguments -There is an option to specify bind/declare arguments for queues/exchanges as you may read about at [RabbitMQ docs](https://www.rabbitmq.com/queues.html). + +There is an option to specify bind/declare arguments for queues/exchanges as you may read about +at [RabbitMQ docs](https://www.rabbitmq.com/queues.html). Example of configuration with HOCON: + ```hocon producer { - name = "Testing" // this is used for logging etc. + name = "Testing" // this is used for logging etc. - exchange = "myclient" + exchange = "myclient" - // should the producer declare exchange he wants to send to? - declare { - enabled = true // disabled by default + // should the producer declare exchange he wants to send to? + declare { + enabled = true // disabled by default - type = "direct" // fanout, topic - - arguments = { "x-max-length" : 10000 } - } + type = "direct" // fanout, topic + + arguments = {"x-max-length": 10000} } +} ``` ### Additional declarations and bindings -Sometimes it's necessary to declare an additional queue or exchange which is not directly related to the consumers or producers you have -in your application (e.g. dead-letter queue). + +Sometimes it's necessary to declare an additional queue or exchange which is not directly related to the consumers or producers you have in +your application (e.g. dead-letter queue). The library makes possible to do such thing. Here is example of such configuration with HOCON: + ```scala val rabbitConnection: ConfigRabbitMQConnection[F] = ??? rabbitConnection.bindExchange("backupExchangeBinding") // : F[Unit] ``` + where the "backupExchangeBinding" is link to the configuration (use relative path to the `declarations` block in configuration): + ```hocon - declarations { - backupExchangeBinding { - sourceExchangeName = "mainExchange" - destExchangeName = "backupExchange" - routingKeys = ["myMessage"] - arguments {} - } + declarations { + backupExchangeBinding { + sourceExchangeName = "mainExchange" + destExchangeName = "backupExchange" + routingKeys = ["myMessage"] + arguments {} } +} ``` Equivalent code with using case classes configuration: + ```scala val rabbitConnection: RabbitMQConnection[F] = ??? @@ -359,11 +478,31 @@ rabbitConnection.bindExchange( ) // : F[Unit] ``` +### Correlation-ID handling + +The library supports CorrelationId (sometimes also called TraceId or TracingId) handling out of the box. + +#### Producer + +Producer takes `implicit cidStrategy: CorrelationIdStrategy` parameter which enables you to configure how the CorrelationId should be +derived/generated. You can implement your own _strategy_ to suit your needs.That means that there'll always be "some" CorrelationId going in +the message (since v9). +If you don't specify the strategy by yourself, `CorrelationIdStrategy.FromPropertiesOrRandomNew` is used - it will try to locate the CID in +properties (or headers) and generate a new one if it doesn't succeed. In any way, the CID will be part of both logs and resulting (outgoing) +RabbitMQ message. + +#### Consumers + +You can also get the CorrelationId from the message properties on the consumer side. The CID is taken from both AMQP properties +and `X-Correlation-Id` header (where the property has precedence and the header is just a fallback). + ### Pull consumer + Sometimes your use-case just doesn't fit the _normal_ consumer scenario. Here you can use the _pull consumer_ which gives you much more control over the received messages. You _pull_ new message from the queue and acknowledge (reject, ...) it somewhere in the future. The pull consumer uses `PullResult` as return type: + * Ok - contains `DeliveryWithHandle` instance * EmptyQueue - there was no message in the queue available @@ -393,7 +532,9 @@ val consumer: Resource[Task, RabbitMQPullConsumer[Task, Bytes]] = { val program: Task[Unit] = consumer.use { consumer => Task - .sequence { (1 to 100).map(_ => consumer.pull()) } // receive "up to" 100 deliveries + .sequence { + (1 to 100).map(_ => consumer.pull()) + } // receive "up to" 100 deliveries .flatMap { ds => // do your stuff! @@ -404,11 +545,11 @@ val program: Task[Unit] = consumer.use { consumer => ### MultiFormatConsumer -Quite often you receive a single type of message but you want to support multiple formats of encoding (Protobuf, Json, ...). -This is where `MultiFormatConsumer` could be used. +Quite often you receive a single type of message but you want to support multiple formats of encoding (Protobuf, Json, ...). This is +where `MultiFormatConsumer` could be used. -Modules [extras-circe](extras-circe/README.md) and [extras-cactus](extras-cactus/README.md) provide support for JSON and GPB conversion. They -are both used in the example below. +Modules [extras-circe](extras-circe/README.md) and [extras-scalapb](extras-scalapb/README.md) provide support for JSON and GPB conversion. +They are both used in the example below. The `MultiFormatConsumer` is Scala only. @@ -425,7 +566,7 @@ import com.avast.clients.rabbitmq.extras.format._ import io.circe.Decoder import io.circe.generic.auto._ // to auto derive `io.circe.Decoder[A]` with https://circe.github.io/circe/codec.html#fully-automatic-derivation import scala.concurrent.Future -import scala.jdk.CollectionConverters._ // <-- for Scala 2.12 use scala.collection.JavaConverters +import scala.jdk.CollectionConverters._ private implicit val d: Decoder[Bytes] = Decoder.decodeString.map(???) @@ -435,15 +576,17 @@ case class NewFileSourceAdded(fileSources: Seq[FileSource]) val consumer = MultiFormatConsumer.forType[Future, NewFileSourceAdded]( JsonDeliveryConverter.derive(), // requires implicit `io.circe.Decoder[NewFileSourceAdded]` - GpbDeliveryConverter[NewFileSourceAddedGpb].derive() // requires implicit `com.avast.cactus.Converter[NewFileSourceAddedGpb, NewFileSourceAdded]` + GpbDeliveryConverter[NewFileSourceAddedGpb] + .derive() // requires implicit `com.avast.cactus.Converter[NewFileSourceAddedGpb, NewFileSourceAdded]` )(_ => ???) ``` + (see [unit test](core/src/test/scala/com/avast/clients/rabbitmq/MultiFormatConsumerTest.scala) for full example) #### Implementing own `DeliveryConverter` The [CheckedDeliveryConverter](core/src/main/scala/com/avast/clients/rabbitmq/converters.scala) is usually reacting to Content-Type (like in -the example below) but it's not required - it could e.g. analyze the payload (or first bytes) too. +the example below) but it's not required - it could e.g. analyze the payload (or first bytes) too. ```scala import com.avast.bytes.Bytes @@ -452,6 +595,7 @@ import com.avast.clients.rabbitmq.api.{ConversionException, Delivery} val StringDeliveryConverter: CheckedDeliveryConverter[String] = new CheckedDeliveryConverter[String] { override def canConvert(d: Delivery[Bytes]): Boolean = d.properties.contentType.contains("text/plain") + override def convert(b: Bytes): Either[ConversionException, String] = Right(b.toStringUtf8) } ``` diff --git a/api/src/main/scala/com/avast/clients/rabbitmq/api/CorrelationIdStrategy.scala b/api/src/main/scala/com/avast/clients/rabbitmq/api/CorrelationIdStrategy.scala new file mode 100644 index 00000000..3b2c07bd --- /dev/null +++ b/api/src/main/scala/com/avast/clients/rabbitmq/api/CorrelationIdStrategy.scala @@ -0,0 +1,58 @@ +package com.avast.clients.rabbitmq.api + +import scala.annotation.implicitNotFound +import scala.util.Random + +@implicitNotFound("You have to specify the CorrelationIdStrategy to proceed") +trait CorrelationIdStrategy { + def toCIDValue: String +} + +object CorrelationIdStrategy { + final val CorrelationIdKeyName: String = "X-Correlation-Id" + + /** + * Generate a new, random CorrelationId. + * + * Example: + * {{{ + * implicit val cidStrategy: CorrelationIdStrategy = CorrelationIdStrategy.RandomNew + * }}} + */ + case object RandomNew extends CorrelationIdStrategy { + override def toCIDValue: String = randomValue + } + + /** + * Always provide same value. + * + * Example: + * {{{ + * implicit val cidStrategy: CorrelationIdStrategy = CorrelationIdStrategy.Fixed("my-corr-id") + * }}} + */ + case class Fixed(value: String) extends CorrelationIdStrategy { + override val toCIDValue: String = value + } + + /** + * Try to find the CID in properties (or `X-Correlation-Id` header as a fallback). Generate a new, random one, if nothing was found. + * + * Example: + * {{{ + * val mp = MessageProperties(correlationId = Some(cid)) + * ... + * implicit val cidStrategy: CorrelationIdStrategy = CorrelationIdStrategy.FromPropertiesOrRandomNew(Some(mp)) + * }}} + */ + case class FromPropertiesOrRandomNew(mp: Option[MessageProperties]) extends CorrelationIdStrategy { + override lazy val toCIDValue: String = { + // take it from properties or from header (as a fallback)... if still empty, generate new + mp.flatMap(p => p.correlationId.orElse(p.headers.get(CorrelationIdKeyName).map(_.toString))).getOrElse(randomValue) + } + } + + private def randomValue: String = { + Random.alphanumeric.take(20).mkString + } +} diff --git a/api/src/main/scala/com/avast/clients/rabbitmq/api/DeliveryResult.scala b/api/src/main/scala/com/avast/clients/rabbitmq/api/DeliveryResult.scala index 83a909ca..8abcd3e2 100644 --- a/api/src/main/scala/com/avast/clients/rabbitmq/api/DeliveryResult.scala +++ b/api/src/main/scala/com/avast/clients/rabbitmq/api/DeliveryResult.scala @@ -13,7 +13,12 @@ object DeliveryResult { /** The message cannot be processed but is worth - it will be requeued to the top of the queue. */ case object Retry extends DeliveryResult - /** The message cannot be processed but is worth - it will be requeued to the bottom of the queue. */ - case class Republish(newHeaders: Map[String, AnyRef] = Map.empty) extends DeliveryResult + /** The message cannot be processed but is worth - it will be requeued to the bottom of the queue. + * + * @param countAsPoisoned Determines whether the PoisonedMessageHandler should count this republish and throw the delivery away if it's reached the limit. + * @param newHeaders Headers to be added to the delivery before requeueing. + * + * */ + case class Republish(countAsPoisoned: Boolean = true, newHeaders: Map[String, AnyRef] = Map.empty) extends DeliveryResult } diff --git a/api/src/main/scala/com/avast/clients/rabbitmq/api/RabbitMQConsumer.scala b/api/src/main/scala/com/avast/clients/rabbitmq/api/RabbitMQConsumer.scala index f53aba58..fa738ffb 100644 --- a/api/src/main/scala/com/avast/clients/rabbitmq/api/RabbitMQConsumer.scala +++ b/api/src/main/scala/com/avast/clients/rabbitmq/api/RabbitMQConsumer.scala @@ -1,5 +1,3 @@ package com.avast.clients.rabbitmq.api -import scala.language.higherKinds - trait RabbitMQConsumer[F[_]] diff --git a/api/src/main/scala/com/avast/clients/rabbitmq/api/RabbitMQProducer.scala b/api/src/main/scala/com/avast/clients/rabbitmq/api/RabbitMQProducer.scala index 4589680b..01c45b58 100644 --- a/api/src/main/scala/com/avast/clients/rabbitmq/api/RabbitMQProducer.scala +++ b/api/src/main/scala/com/avast/clients/rabbitmq/api/RabbitMQProducer.scala @@ -1,7 +1,8 @@ package com.avast.clients.rabbitmq.api -import scala.language.higherKinds +import com.avast.clients.rabbitmq.api.CorrelationIdStrategy.FromPropertiesOrRandomNew trait RabbitMQProducer[F[_], A] { - def send(routingKey: String, body: A, properties: Option[MessageProperties] = None): F[Unit] + def send(routingKey: String, body: A, properties: Option[MessageProperties] = None)(implicit cidStrategy: CorrelationIdStrategy = + FromPropertiesOrRandomNew(properties)): F[Unit] } diff --git a/api/src/main/scala/com/avast/clients/rabbitmq/api/RabbitMQPullConsumer.scala b/api/src/main/scala/com/avast/clients/rabbitmq/api/RabbitMQPullConsumer.scala index 7416a069..9e0d6f07 100644 --- a/api/src/main/scala/com/avast/clients/rabbitmq/api/RabbitMQPullConsumer.scala +++ b/api/src/main/scala/com/avast/clients/rabbitmq/api/RabbitMQPullConsumer.scala @@ -1,7 +1,5 @@ package com.avast.clients.rabbitmq.api -import scala.language.higherKinds - trait RabbitMQPullConsumer[F[_], A] { /** Retrieves one message from the queue, if there is any. diff --git a/api/src/main/scala/com/avast/clients/rabbitmq/api/RabbitMQStreamingConsumer.scala b/api/src/main/scala/com/avast/clients/rabbitmq/api/RabbitMQStreamingConsumer.scala index 2fa53c0a..745b491d 100644 --- a/api/src/main/scala/com/avast/clients/rabbitmq/api/RabbitMQStreamingConsumer.scala +++ b/api/src/main/scala/com/avast/clients/rabbitmq/api/RabbitMQStreamingConsumer.scala @@ -1,16 +1,9 @@ package com.avast.clients.rabbitmq.api -import scala.language.higherKinds - trait RabbitMQStreamingConsumer[F[_], A] { def deliveryStream: fs2.Stream[F, StreamedDelivery[F, A]] } -trait StreamedDelivery[+F[_], +A] { - def delivery: Delivery[A] - - def handle(result: DeliveryResult): F[StreamedResult] +trait StreamedDelivery[F[_], A] { + def handleWith(f: Delivery[A] => F[DeliveryResult]): F[Unit] } - -sealed trait StreamedResult -object StreamedResult extends StreamedResult diff --git a/build.gradle b/build.gradle index dd843c35..fc86ce81 100644 --- a/build.gradle +++ b/build.gradle @@ -4,7 +4,7 @@ buildscript { } dependencies { classpath 'com.google.protobuf:protobuf-gradle-plugin:0.8.18' - classpath 'com.avast.gradle:gradle-docker-compose-plugin:0.14.13' + classpath 'com.avast.gradle:gradle-docker-compose-plugin:0.15.2' classpath 'com.github.ben-manes:gradle-versions-plugin:0.39.0' } } @@ -18,6 +18,10 @@ nexusPublishing { repositories { sonatype() } + + transitionCheckOptions { + maxRetries.set(180) // 30 minutes + } } allprojects { @@ -30,18 +34,48 @@ allprojects { scalaVersionFull = scalaVersionEnv != null ? scalaVersionEnv : "2.12.13" scalaVersion = "${scalaVersionFull}".split("\\.").dropRight(1).join(".") - metricsVersion = "2.8.6" bytesVersion = "2.2.0" circeVersion = "0.14.1" - cactusVersion = "0.17.0" - catsVersion = "2.4.2" - catsEffectVersion = "2.4.1" + catsVersion = "2.7.0" + catsEffectVersion = "2.5.4" fs2Version = "2.5.3" - protobufVersion = "3.17.3" - scalapbVersion = "0.11.4" + metricsVersion = "2.9.2" + protobufVersion = "3.19.4" + pureconfigVersion = "0.17.1" + scalapbVersion = "0.11.8" scalapbJson4sVersion = "0.11.1" + typesafeConfigVersion = "1.4.2" monixVersion = "3.4.0" // just for tests! } + + /* + * Report failed test at the end of the run: + * */ + + // add a collection to track failedTests + ext.failedTests = [] + + // add a testlistener to all tasks of type Test + tasks.withType(Test) { + afterTest { TestDescriptor descriptor, TestResult result -> + if (result.resultType == TestResult.ResultType.FAILURE) { + failedTests << ["${descriptor.className}::${descriptor.name}"] + } + } + } + + // print out tracked failed tests when the build has finished + gradle.buildFinished { + if (!failedTests.empty) { + println "\n\n----------------------------------------" + println "Failed tests for ${project.name}:" + failedTests.each { failedTest -> + println " - ${failedTest}" + } + println "----------------------------------------" + println "\n" + } + } } println("Scala version: ${scalaVersion} (${scalaVersionFull})") // will display the version once, when configuring root @@ -127,13 +161,12 @@ subprojects { repositories { mavenCentral() - jcenter() } dependencies { - scalaCompilerPlugin "org.typelevel:kind-projector_$scalaVersionFull:0.11.3" + scalaCompilerPlugin "org.typelevel:kind-projector_$scalaVersionFull:0.13.2" - api "org.scala-lang.modules:scala-collection-compat_$scalaVersion:2.4.2" + api "org.scala-lang.modules:scala-collection-compat_$scalaVersion:2.6.0" testImplementation "io.monix:monix_$scalaVersion:$monixVersion" @@ -147,6 +180,7 @@ subprojects { testImplementation 'junit:junit:4.13.2' testImplementation "org.scalatest:scalatest_$scalaVersion:3.0.8" testImplementation 'org.mockito:mockito-all:1.10.19' + testImplementation "com.typesafe.scala-logging:scala-logging_$scalaVersion:3.9.4" testImplementation 'ch.qos.logback:logback-classic:1.2.11' testImplementation 'org.pegdown:pegdown:1.6.0' diff --git a/core/build.gradle b/core/build.gradle index 5d938de8..27a8140f 100644 --- a/core/build.gradle +++ b/core/build.gradle @@ -17,28 +17,26 @@ test.doFirst { dependencies { api project(":api") - api "com.avast.metrics:metrics-scala_$scalaVersion:${metricsVersion}" - api "com.typesafe.scala-logging:scala-logging_$scalaVersion:3.9.2" + api "com.avast.metrics:metrics-cats-effect-2_$scalaVersion:${metricsVersion}" + api "org.typelevel:log4cats-slf4j_$scalaVersion:1.4.0" api "com.avast.bytes:bytes-core:${bytesVersion}" - api 'com.rabbitmq:amqp-client:5.14.0' + api 'com.rabbitmq:amqp-client:5.14.2' api "org.typelevel:cats-core_$scalaVersion:$catsVersion" api "org.typelevel:cats-effect_$scalaVersion:$catsEffectVersion" api 'org.xbib:jsr-305:1.0.0' + api "org.scala-lang:scala-reflect:$scalaVersionFull" testImplementation project(":extras") testImplementation project(":extras-circe") - testImplementation project(":extras-cactus") testImplementation project(":extras") testImplementation project(":pureconfig") testImplementation "io.circe:circe-generic_$scalaVersion:$circeVersion" testImplementation "io.circe:circe-generic-extras_$scalaVersion:$circeVersion" - testImplementation "com.avast.cactus:cactus-gpbv2_$scalaVersion:$cactusVersion" - testImplementation "com.avast.cactus:cactus-bytes-gpbv2_$scalaVersion:$cactusVersion" testImplementation "com.google.protobuf:protobuf-java:$protobufVersion" } diff --git a/core/src/main/scala/com/avast/clients/rabbitmq/ChannelListener.scala b/core/src/main/scala/com/avast/clients/rabbitmq/ChannelListener.scala index 02dd504a..cb7daf0d 100755 --- a/core/src/main/scala/com/avast/clients/rabbitmq/ChannelListener.scala +++ b/core/src/main/scala/com/avast/clients/rabbitmq/ChannelListener.scala @@ -1,59 +1,62 @@ package com.avast.clients.rabbitmq +import cats.effect.Sync import com.rabbitmq.client.impl.recovery.RecoveryAwareChannelN import com.rabbitmq.client.{Channel, ShutdownSignalException} -import com.typesafe.scalalogging.StrictLogging +import org.typelevel.log4cats.slf4j.Slf4jLogger -trait ChannelListener { - def onShutdown(cause: ShutdownSignalException, channel: Channel): Unit +trait ChannelListener[F[_]] { + def onShutdown(cause: ShutdownSignalException, channel: Channel): F[Unit] - def onCreate(channel: Channel): Unit + def onCreate(channel: Channel): F[Unit] - def onCreateFailure(failure: Throwable): Unit + def onCreateFailure(failure: Throwable): F[Unit] - def onRecoveryStarted(channel: Channel): Unit + def onRecoveryStarted(channel: Channel): F[Unit] - def onRecoveryCompleted(channel: Channel): Unit + def onRecoveryCompleted(channel: Channel): F[Unit] - def onRecoveryFailure(channel: Channel, failure: Throwable): Unit + def onRecoveryFailure(channel: Channel, failure: Throwable): F[Unit] } object ChannelListener { - final val Default: ChannelListener = new ChannelListener with StrictLogging { - override def onCreate(channel: Channel): Unit = { + def default[F[_]: Sync]: ChannelListener[F] = new ChannelListener[F] { + private val logger = Slf4jLogger.getLoggerFromClass[F](this.getClass) + + override def onCreate(channel: Channel): F[Unit] = { logger.info(s"Channel created: $channel") } - override def onCreateFailure(failure: Throwable): Unit = { - logger.warn(s"Channel was NOT created", failure) + override def onCreateFailure(failure: Throwable): F[Unit] = { + logger.warn(failure)(s"Channel was NOT created") } - override def onRecoveryCompleted(channel: Channel): Unit = { + override def onRecoveryCompleted(channel: Channel): F[Unit] = { logger.info(s"Channel recovered: $channel") } - override def onRecoveryStarted(channel: Channel): Unit = { + override def onRecoveryStarted(channel: Channel): F[Unit] = { logger.debug(s"Channel recovery started: $channel") } - override def onRecoveryFailure(channel: Channel, failure: Throwable): Unit = { + override def onRecoveryFailure(channel: Channel, failure: Throwable): F[Unit] = { channel match { case ch: RecoveryAwareChannelN if !ch.isOpen => val initByApp = Option(ch.getCloseReason).map(_.isInitiatedByApplication).exists(identity) if (initByApp) { - logger.debug(s"Channel could not be recovered, because it was manually closed: $channel", failure) - } else logger.warn(s"Channel recovery failed: $channel", failure) + logger.debug(failure)(s"Channel could not be recovered, because it was manually closed: $channel") + } else logger.warn(failure)(s"Channel recovery failed: $channel") - case _ => logger.warn(s"Channel recovery failed: $channel", failure) + case _ => logger.warn(failure)(s"Channel recovery failed: $channel") } } - override def onShutdown(cause: ShutdownSignalException, channel: Channel): Unit = { + override def onShutdown(cause: ShutdownSignalException, channel: Channel): F[Unit] = { if (cause.isInitiatedByApplication) { logger.debug(s"Channel shutdown: $channel") } else { - logger.warn(s"Channel shutdown: $channel", cause) + logger.warn(cause)(s"Channel shutdown: $channel") } } } diff --git a/core/src/main/scala/com/avast/clients/rabbitmq/ConfirmedDeliveryResult.scala b/core/src/main/scala/com/avast/clients/rabbitmq/ConfirmedDeliveryResult.scala new file mode 100644 index 00000000..bc73aa2f --- /dev/null +++ b/core/src/main/scala/com/avast/clients/rabbitmq/ConfirmedDeliveryResult.scala @@ -0,0 +1,24 @@ +package com.avast.clients.rabbitmq + +import cats.effect.Concurrent +import cats.effect.concurrent.Deferred +import com.avast.clients.rabbitmq.api.DeliveryResult + +private[rabbitmq] trait ConfirmedDeliveryResult[F[_]] { + def deliveryResult: DeliveryResult + def confirm: F[Unit] + def awaitConfirmation: F[Unit] +} + +private[rabbitmq] object ConfirmedDeliveryResult { + def apply[F[_]: Concurrent](dr: DeliveryResult): ConfirmedDeliveryResult[F] = { + new ConfirmedDeliveryResult[F] { + private val deff = Deferred.unsafe[F, Unit] + + override val deliveryResult: DeliveryResult = dr + + override def confirm: F[Unit] = deff.complete(()) + override def awaitConfirmation: F[Unit] = deff.get + } + } +} diff --git a/core/src/main/scala/com/avast/clients/rabbitmq/ConnectionListener.scala b/core/src/main/scala/com/avast/clients/rabbitmq/ConnectionListener.scala index cd2feaa4..9f442eb9 100644 --- a/core/src/main/scala/com/avast/clients/rabbitmq/ConnectionListener.scala +++ b/core/src/main/scala/com/avast/clients/rabbitmq/ConnectionListener.scala @@ -1,49 +1,52 @@ package com.avast.clients.rabbitmq +import cats.effect.Sync import com.rabbitmq.client.{Connection, ShutdownSignalException} -import com.typesafe.scalalogging.StrictLogging +import org.typelevel.log4cats.slf4j.Slf4jLogger -trait ConnectionListener { - def onCreate(connection: Connection): Unit +trait ConnectionListener[F[_]] { + def onCreate(connection: Connection): F[Unit] - def onCreateFailure(failure: Throwable): Unit + def onCreateFailure(failure: Throwable): F[Unit] - def onRecoveryStarted(connection: Connection): Unit + def onRecoveryStarted(connection: Connection): F[Unit] - def onRecoveryCompleted(connection: Connection): Unit + def onRecoveryCompleted(connection: Connection): F[Unit] - def onRecoveryFailure(connection: Connection, failure: Throwable): Unit + def onRecoveryFailure(connection: Connection, failure: Throwable): F[Unit] - def onShutdown(connection: Connection, cause: ShutdownSignalException): Unit + def onShutdown(connection: Connection, cause: ShutdownSignalException): F[Unit] } object ConnectionListener { - final val Default: ConnectionListener = new ConnectionListener with StrictLogging { - override def onCreate(connection: Connection): Unit = { + def default[F[_]: Sync]: ConnectionListener[F] = new ConnectionListener[F] { + private val logger = Slf4jLogger.getLoggerFromClass[F](this.getClass) + + override def onCreate(connection: Connection): F[Unit] = { logger.info(s"Connection created: $connection (name ${connection.getClientProvidedName})") } - override def onCreateFailure(failure: Throwable): Unit = { - logger.warn(s"Connection NOT created", failure) + override def onCreateFailure(failure: Throwable): F[Unit] = { + logger.warn(failure)(s"Connection NOT created") } - override def onRecoveryStarted(connection: Connection): Unit = { + override def onRecoveryStarted(connection: Connection): F[Unit] = { logger.info(s"Connection recovery started: $connection (name ${connection.getClientProvidedName})") } - override def onRecoveryCompleted(connection: Connection): Unit = { + override def onRecoveryCompleted(connection: Connection): F[Unit] = { logger.info(s"Connection recovery completed: $connection (name ${connection.getClientProvidedName})") } - override def onRecoveryFailure(connection: Connection, failure: Throwable): Unit = { - logger.warn(s"Connection recovery failed: $connection (name ${connection.getClientProvidedName})", failure) + override def onRecoveryFailure(connection: Connection, failure: Throwable): F[Unit] = { + logger.warn(failure)(s"Connection recovery failed: $connection (name ${connection.getClientProvidedName})") } - override def onShutdown(connection: Connection, cause: ShutdownSignalException): Unit = { + override def onShutdown(connection: Connection, cause: ShutdownSignalException): F[Unit] = { if (cause.isInitiatedByApplication) { logger.debug(s"Connection shutdown: $connection (name ${connection.getClientProvidedName})") } else { - logger.warn(s"Connection shutdown: $connection (name ${connection.getClientProvidedName})", cause) + logger.warn(cause)(s"Connection shutdown: $connection (name ${connection.getClientProvidedName})") } } } diff --git a/core/src/main/scala/com/avast/clients/rabbitmq/ConsumerBase.scala b/core/src/main/scala/com/avast/clients/rabbitmq/ConsumerBase.scala index 20e05090..421a8f31 100644 --- a/core/src/main/scala/com/avast/clients/rabbitmq/ConsumerBase.scala +++ b/core/src/main/scala/com/avast/clients/rabbitmq/ConsumerBase.scala @@ -1,105 +1,95 @@ package com.avast.clients.rabbitmq -import cats.effect.{Blocker, ContextShift, Sync} +import cats.effect.{Blocker, Concurrent, ConcurrentEffect, ContextShift, Timer} +import cats.implicits.{catsSyntaxApplicativeError, toFunctorOps} import cats.syntax.flatMap._ -import com.avast.clients.rabbitmq.DefaultRabbitMQConsumer._ -import com.avast.clients.rabbitmq.api.DeliveryResult -import com.avast.metrics.scalaapi.Monitor -import com.rabbitmq.client.AMQP.BasicProperties -import com.typesafe.scalalogging.StrictLogging - -import scala.jdk.CollectionConverters._ -import scala.language.higherKinds -import scala.util.control.NonFatal - -private[rabbitmq] trait ConsumerBase[F[_]] extends StrictLogging { - protected def name: String - protected def queueName: String - protected def channel: ServerChannel - protected def blocker: Blocker - protected def republishStrategy: RepublishStrategy - protected implicit def F: Sync[F] // scalastyle:ignore - protected implicit def cs: ContextShift[F] - protected def connectionInfo: RabbitMQConnectionInfo - protected def monitor: Monitor - - protected val resultsMonitor: Monitor = monitor.named("results") - private val resultAckMeter = resultsMonitor.meter("ack") - private val resultRejectMeter = resultsMonitor.meter("reject") - private val resultRetryMeter = resultsMonitor.meter("retry") - private val resultRepublishMeter = resultsMonitor.meter("republish") - - protected def handleResult(messageId: String, deliveryTag: Long, properties: BasicProperties, routingKey: String, body: Array[Byte])( - res: DeliveryResult): F[Unit] = { - import DeliveryResult._ - - res match { - case Ack => ack(messageId, deliveryTag) - case Reject => reject(messageId, deliveryTag) - case Retry => retry(messageId, deliveryTag) - case Republish(newHeaders) => - republish(messageId, deliveryTag, createPropertiesForRepublish(newHeaders, properties, routingKey), body) - } +import com.avast.bytes.Bytes +import com.avast.clients.rabbitmq.JavaConverters.AmqpPropertiesConversions +import com.avast.clients.rabbitmq.api._ +import com.avast.clients.rabbitmq.logging.ImplicitContextLogger +import com.avast.metrics.scalaeffectapi.Monitor +import com.rabbitmq.client.{AMQP, Envelope} +import org.slf4j.event.Level - } +import scala.concurrent.TimeoutException +import scala.concurrent.duration.{Duration, FiniteDuration} +import scala.util._ - protected def ack(messageId: String, deliveryTag: Long): F[Unit] = - blocker.delay { - try { - logger.debug(s"[$name] ACK delivery ID $messageId, deliveryTag $deliveryTag") - if (!channel.isOpen) throw new IllegalStateException("Cannot ack delivery on closed channel") - channel.basicAck(deliveryTag, false) - resultAckMeter.mark() - } catch { - case NonFatal(e) => logger.warn(s"[$name] Error while confirming the delivery", e) - } - } - - protected def reject(messageId: String, deliveryTag: Long): F[Unit] = - blocker.delay { - try { - logger.debug(s"[$name] REJECT delivery ID $messageId, deliveryTag $deliveryTag") - if (!channel.isOpen) throw new IllegalStateException("Cannot reject delivery on closed channel") - channel.basicReject(deliveryTag, false) - resultRejectMeter.mark() - } catch { - case NonFatal(e) => logger.warn(s"[$name] Error while rejecting the delivery", e) - } - } - - protected def retry(messageId: String, deliveryTag: Long): F[Unit] = - blocker.delay { - try { - logger.debug(s"[$name] REJECT (with requeue) delivery ID $messageId, deliveryTag $deliveryTag") - if (!channel.isOpen) throw new IllegalStateException("Cannot retry delivery on closed channel") - channel.basicReject(deliveryTag, true) - resultRetryMeter.mark() - } catch { - case NonFatal(e) => logger.warn(s"[$name] Error while rejecting (with requeue) the delivery", e) - } - } +// it's case-class to have `copy` method for free.... +final private[rabbitmq] case class ConsumerBase[F[_]: ConcurrentEffect: Timer, A]( + consumerName: String, + queueName: String, + blocker: Blocker, + consumerLogger: ImplicitContextLogger[F], + consumerRootMonitor: Monitor[F])(implicit val contextShift: ContextShift[F], implicit val deliveryConverter: DeliveryConverter[A]) { + + val F: ConcurrentEffect[F] = ConcurrentEffect[F] // scalastyle:ignore + + private val timeoutsMeter = consumerRootMonitor.meter("timeouts") + + def parseDelivery(envelope: Envelope, rawBody: Bytes, properties: AMQP.BasicProperties): F[DeliveryWithContext[A]] = { + implicit val dctx: DeliveryContext = DeliveryContext.from(envelope, properties) + import dctx.fixedProperties + + blocker + .delay(Try(deliveryConverter.convert(rawBody))) + .flatMap[Delivery[A]] { + case Success(Right(a)) => + val delivery = Delivery(a, fixedProperties.asScala, dctx.routingKey.value) - protected def republish(messageId: String, deliveryTag: Long, properties: BasicProperties, body: Array[Byte]): F[Unit] = { - republishStrategy - .republish(blocker, channel, name)(queueName, messageId, deliveryTag, properties, body) - .flatTap(_ => F.delay(resultRepublishMeter.mark())) + consumerLogger.trace(s"[$consumerName] Received delivery from queue '$queueName': ${delivery.copy(body = rawBody)}").as { + delivery + } + + case Success(Left(ce)) => + val delivery = Delivery.MalformedContent(rawBody, fixedProperties.asScala, dctx.routingKey.value, ce) + + consumerLogger.trace(s"[$consumerName] Received delivery from queue '$queueName' but could not convert it: $delivery").as { + delivery + } + + case Failure(ce) => + val ex = ConversionException("Unexpected failure", ce) + val delivery = Delivery.MalformedContent(rawBody, fixedProperties.asScala, dctx.routingKey.value, ex) + + consumerLogger + .trace( + s"[$consumerName] Received delivery from queue '$queueName' but could not convert it as the convertor has failed: $delivery") + .as(delivery) + } + .map(DeliveryWithContext(_, dctx)) } - protected def createPropertiesForRepublish(newHeaders: Map[String, AnyRef], - properties: BasicProperties, - routingKey: String): BasicProperties = { - // values in newHeaders will overwrite values in original headers - // we must also ensure that UserID will be the same as current username (or nothing): https://www.rabbitmq.com/validated-user-id.html - val originalUserId = Option(properties.getUserId).filter(_.nonEmpty) - val h = originalUserId match { - case Some(uid) => newHeaders + (RepublishOriginalRoutingKeyHeaderName -> routingKey) + (RepublishOriginalUserId -> uid) - case None => newHeaders + (RepublishOriginalRoutingKeyHeaderName -> routingKey) - } - val headers = Option(properties.getHeaders).map(_.asScala ++ h).getOrElse(h) - val newUserId = originalUserId match { - case Some(_) => connectionInfo.username.orNull - case None => null - } - properties.builder().headers(headers.asJava).userId(newUserId).build() + def watchForTimeoutIfConfigured(processTimeout: FiniteDuration, timeoutAction: DeliveryResult, timeoutLogLevel: Level)( + delivery: Delivery[A], + result: F[ConfirmedDeliveryResult[F]])( + customTimeoutAction: F[Unit], + )(implicit dctx: DeliveryContext): F[ConfirmedDeliveryResult[F]] = { + import dctx._ + + if (processTimeout != Duration.Zero) { + Concurrent + .timeout(result, processTimeout) + .recoverWith { + case e: TimeoutException => + customTimeoutAction >> + consumerLogger.trace(e)(s"[$consumerName] Timeout for $messageId") >> + timeoutsMeter.mark >> { + + lazy val msg = + s"[$consumerName] Task timed-out after $processTimeout of processing delivery $messageId with routing key ${delivery.routingKey}, applying DeliveryResult.$timeoutAction. Delivery was:\n$delivery" + + (timeoutLogLevel match { + case Level.ERROR => consumerLogger.error(msg) + case Level.WARN => consumerLogger.warn(msg) + case Level.INFO => consumerLogger.info(msg) + case Level.DEBUG => consumerLogger.debug(msg) + case Level.TRACE => consumerLogger.trace(msg) + }).as { + ConfirmedDeliveryResult[F](timeoutAction) + } + } + } + } else result } } diff --git a/core/src/main/scala/com/avast/clients/rabbitmq/ConsumerChannelOps.scala b/core/src/main/scala/com/avast/clients/rabbitmq/ConsumerChannelOps.scala new file mode 100644 index 00000000..321493eb --- /dev/null +++ b/core/src/main/scala/com/avast/clients/rabbitmq/ConsumerChannelOps.scala @@ -0,0 +1,143 @@ +package com.avast.clients.rabbitmq + +import cats.effect.{Blocker, ConcurrentEffect, ContextShift, Resource, Timer} +import cats.implicits.catsSyntaxApplicativeError +import cats.syntax.flatMap._ +import com.avast.bytes.Bytes +import com.avast.clients.rabbitmq.DefaultRabbitMQConsumer._ +import com.avast.clients.rabbitmq.api._ +import com.avast.clients.rabbitmq.logging.ImplicitContextLogger +import com.avast.metrics.scalaeffectapi.Monitor +import com.rabbitmq.client.AMQP.BasicProperties + +import scala.jdk.CollectionConverters._ +import scala.util._ + +// it's case-class to have `copy` method for free.... +final private[rabbitmq] case class ConsumerChannelOps[F[_]: ConcurrentEffect: Timer: ContextShift, A]( + private val consumerName: String, + private val queueName: String, + channel: ServerChannel, + private val blocker: Blocker, + republishStrategy: RepublishStrategy[F], + poisonedMessageHandler: PoisonedMessageHandler[F, A], + connectionInfo: RabbitMQConnectionInfo, + private val consumerLogger: ImplicitContextLogger[F], + private val consumerRootMonitor: Monitor[F]) { + + val resultsMonitor: Monitor[F] = consumerRootMonitor.named("results") + private val resultAckMeter = resultsMonitor.meter("ack") + private val resultRejectMeter = resultsMonitor.meter("reject") + private val resultRetryMeter = resultsMonitor.meter("retry") + private val resultRepublishMeter = resultsMonitor.meter("republish") + + def handleResult(rawBody: Bytes, delivery: Delivery[A])(res: DeliveryResult)(implicit dctx: DeliveryContext): F[Unit] = { + import DeliveryResult._ + import dctx._ + + poisonedMessageHandler.interceptResult(delivery, messageId, rawBody)(res).flatMap { + case Ack => ack() + case Reject => reject() + case Retry => retry() + case Republish(_, newHeaders) => republish(createPropertiesForRepublish(newHeaders, fixedProperties, routingKey), rawBody) + } + } + + protected def ack()(implicit dctx: DeliveryContext): F[Unit] = { + import dctx._ + + consumerLogger.debug(s"[$consumerName] ACK delivery $messageId, $deliveryTag") >> + blocker + .delay { + if (!channel.isOpen) throw new IllegalStateException("Cannot ack delivery on closed channel") + channel.basicAck(deliveryTag.value, false) + } + .attempt + .flatMap { + case Right(()) => resultAckMeter.mark + case Left(e) => consumerLogger.warn(e)(s"[$consumerName] Error while confirming the delivery $messageId") + } + } + + protected def reject()(implicit dctx: DeliveryContext): F[Unit] = { + import dctx._ + + consumerLogger.debug(s"[$consumerName] REJECT delivery $messageId, $deliveryTag") >> + blocker + .delay { + if (!channel.isOpen) throw new IllegalStateException("Cannot reject delivery on closed channel") + channel.basicReject(deliveryTag.value, false) + } + .attempt + .flatMap { + case Right(()) => resultRejectMeter.mark + case Left(e) => consumerLogger.warn(e)(s"[$consumerName] Error while rejecting the delivery $messageId") + } + } + + protected def retry()(implicit dctx: DeliveryContext): F[Unit] = { + import dctx._ + + consumerLogger.debug(s"[$consumerName] REJECT (with requeue) delivery $messageId, $deliveryTag") >> + blocker + .delay { + if (!channel.isOpen) throw new IllegalStateException("Cannot retry delivery on closed channel") + channel.basicReject(deliveryTag.value, true) + } + .attempt + .flatMap { + case Right(()) => resultRetryMeter.mark + case Left(e) => consumerLogger.warn(e)(s"[$consumerName] Error while rejecting (with requeue) the delivery $messageId") + } + } + + protected def republish(properties: BasicProperties, rawBody: Bytes)(implicit dctx: DeliveryContext): F[Unit] = { + + republishStrategy.republish(blocker, channel, consumerName)(queueName, properties, rawBody) >> + resultRepublishMeter.mark + } + + protected def createPropertiesForRepublish(newHeaders: Map[String, AnyRef], + properties: BasicProperties, + routingKey: RoutingKey): BasicProperties = { + // values in newHeaders will overwrite values in original headers + // we must also ensure that UserID will be the same as current username (or nothing): https://www.rabbitmq.com/validated-user-id.html + val originalUserId = Option(properties.getUserId).filter(_.nonEmpty) + val h = originalUserId match { + case Some(uid) => newHeaders + (RepublishOriginalRoutingKeyHeaderName -> routingKey.value) + (RepublishOriginalUserId -> uid) + case None => newHeaders + (RepublishOriginalRoutingKeyHeaderName -> routingKey.value) + } + val headers = Option(properties.getHeaders).map(_.asScala ++ h).getOrElse(h) + val newUserId = originalUserId match { + case Some(_) => connectionInfo.username.orNull + case None => null + } + properties.builder().headers(headers.asJava).userId(newUserId).build() + } +} + +class ConsumerChannelOpsFactory[F[_]: ConcurrentEffect: Timer: ContextShift, A: DeliveryConverter]( + consumerName: String, + queueName: String, + blocker: Blocker, + republishStrategy: RepublishStrategy[F], + poisonedMessageHandler: PoisonedMessageHandler[F, A], + connectionInfo: RabbitMQConnectionInfo, + consumerLogger: ImplicitContextLogger[F], + consumerRootMonitor: Monitor[F], + newChannel: Resource[F, ServerChannel]) { + + val create: Resource[F, ConsumerChannelOps[F, A]] = { + newChannel.map { channel => + new ConsumerChannelOps[F, A](consumerName, + queueName, + channel, + blocker, + republishStrategy, + poisonedMessageHandler, + connectionInfo, + consumerLogger, + consumerRootMonitor) + } + } +} diff --git a/core/src/main/scala/com/avast/clients/rabbitmq/ConsumerListener.scala b/core/src/main/scala/com/avast/clients/rabbitmq/ConsumerListener.scala index e22a82f4..78e2c237 100755 --- a/core/src/main/scala/com/avast/clients/rabbitmq/ConsumerListener.scala +++ b/core/src/main/scala/com/avast/clients/rabbitmq/ConsumerListener.scala @@ -1,30 +1,32 @@ package com.avast.clients.rabbitmq -import com.rabbitmq.client.{Channel, Consumer, ShutdownSignalException} -import com.typesafe.scalalogging.StrictLogging +import cats.effect.Sync +import com.rabbitmq.client._ +import org.typelevel.log4cats.slf4j.Slf4jLogger -trait ConsumerListener { - def onError(consumer: Consumer, consumerName: String, channel: Channel, failure: Throwable): Unit +trait ConsumerListener[F[_]] { + def onError(consumer: Consumer, consumerName: String, channel: Channel, failure: Throwable): F[Unit] - def onShutdown(consumer: Consumer, channel: Channel, consumerName: String, consumerTag: String, sig: ShutdownSignalException): Unit + def onShutdown(consumer: Consumer, channel: Channel, consumerName: String, consumerTag: String, sig: ShutdownSignalException): F[Unit] } object ConsumerListener { - final val Default: ConsumerListener = new ConsumerListener with StrictLogging { + def default[F[_]: Sync]: ConsumerListener[F] = new ConsumerListener[F] { + private val logger = Slf4jLogger.getLoggerFromClass[F](this.getClass) - override def onError(consumer: Consumer, consumerName: String, channel: Channel, failure: Throwable): Unit = { - logger.warn(s"[$consumerName] Error in consumer on channel $channel", failure) + override def onError(consumer: Consumer, consumerName: String, channel: Channel, failure: Throwable): F[Unit] = { + logger.warn(failure)(s"[$consumerName] Error in consumer on channel $channel") } override def onShutdown(consumer: Consumer, channel: Channel, consumerName: String, consumerTag: String, - cause: ShutdownSignalException): Unit = { + cause: ShutdownSignalException): F[Unit] = { if (cause.isInitiatedByApplication) { logger.debug(s"[$consumerName] Shutdown of consumer on channel $channel") } else { - logger.warn(s"[$consumerName] Shutdown of consumer on channel $channel", cause) + logger.warn(cause)(s"[$consumerName] Shutdown of consumer on channel $channel") } } } diff --git a/core/src/main/scala/com/avast/clients/rabbitmq/ConsumerWithCallbackBase.scala b/core/src/main/scala/com/avast/clients/rabbitmq/ConsumerWithCallbackBase.scala index 4f27be82..bc75b828 100644 --- a/core/src/main/scala/com/avast/clients/rabbitmq/ConsumerWithCallbackBase.scala +++ b/core/src/main/scala/com/avast/clients/rabbitmq/ConsumerWithCallbackBase.scala @@ -1,111 +1,119 @@ package com.avast.clients.rabbitmq -import java.time.{Duration, Instant} -import java.util.concurrent.RejectedExecutionException -import java.util.concurrent.atomic.AtomicInteger - -import cats.effect.{Effect, Sync} +import cats.effect.{ConcurrentEffect, Timer => CatsTimer} import cats.syntax.all._ import com.avast.bytes.Bytes -import com.avast.clients.rabbitmq.JavaConverters._ -import com.avast.clients.rabbitmq.api.{Delivery, DeliveryResult} -import com.avast.metrics.scalaapi._ +import com.avast.clients.rabbitmq.api.DeliveryResult +import com.avast.metrics.scalaeffectapi._ import com.rabbitmq.client.AMQP.BasicProperties -import com.rabbitmq.client.{DefaultConsumer, ShutdownSignalException} +import com.rabbitmq.client._ -import scala.jdk.CollectionConverters._ -import scala.language.higherKinds +import java.time.{Duration, Instant} +import java.util.concurrent.RejectedExecutionException import scala.util.control.NonFatal -abstract class ConsumerWithCallbackBase[F[_]: Effect](channel: ServerChannel, - failureAction: DeliveryResult, - consumerListener: ConsumerListener) - extends DefaultConsumer(channel) - with ConsumerBase[F] { +abstract class ConsumerWithCallbackBase[F[_]: ConcurrentEffect: CatsTimer, A: DeliveryConverter](base: ConsumerBase[F, A], + channelOps: ConsumerChannelOps[F, A], + failureAction: DeliveryResult, + consumerListener: ConsumerListener[F]) + extends DefaultConsumer(channelOps.channel) { + import base._ + import channelOps._ - override protected implicit val F: Sync[F] = Effect[F] + protected val readMeter: Meter[F] = consumerRootMonitor.meter("read") - protected val readMeter: Meter = monitor.meter("read") + protected val processingFailedMeter: Meter[F] = resultsMonitor.meter("processingFailed") - protected val processingFailedMeter: Meter = resultsMonitor.meter("processingFailed") + protected val tasksMonitor: Monitor[F] = consumerRootMonitor.named("tasks") - protected val tasksMonitor: Monitor = monitor.named("tasks") + protected val processingCount: SettableGauge[F, Long] = tasksMonitor.gauge.settableLong("processing", replaceExisting = true) - protected val processingCount: AtomicInteger = new AtomicInteger(0) + protected val processedTimer: TimerPair[F] = tasksMonitor.timerPair("processed") - tasksMonitor.gauge("processing")(() => processingCount.get()) + override def handleShutdownSignal(consumerTag: String, sig: ShutdownSignalException): Unit = { + consumerListener.onShutdown(this, channel, consumerName, consumerTag, sig).unsafeStartAndForget() + } - protected val processedTimer: TimerPair = tasksMonitor.timerPair("processed") + protected def handleNewDelivery(d: DeliveryWithContext[A]): F[Option[ConfirmedDeliveryResult[F]]] - override def handleShutdownSignal(consumerTag: String, sig: ShutdownSignalException): Unit = - consumerListener.onShutdown(this, channel, name, consumerTag, sig) + override final def handleDelivery(consumerTag: String, envelope: Envelope, properties: BasicProperties, body: Array[Byte]): Unit = { + val action = processingCount.inc >> { + val rawBody = Bytes.copyFrom(body) - protected def handleDelivery(messageId: String, deliveryTag: Long, properties: BasicProperties, routingKey: String, body: Array[Byte])( - readAction: DeliveryReadAction[F, Bytes]): F[Unit] = - F.delay { - try { - readMeter.mark() + base + .parseDelivery(envelope, rawBody, properties) + .flatMap { d => + import d.context + import context._ - logger.debug(s"[$name] Read delivery with ID $messageId, deliveryTag $deliveryTag") + consumerLogger.debug(s"[$consumerName] Read delivery with $messageId, $deliveryTag") >> + consumerLogger.trace(s"[$consumerName] Received delivery: ${d.delivery}") >> + readMeter.mark >> { - val delivery = Delivery(Bytes.copyFrom(body), properties.asScala, Option(routingKey).getOrElse("")) + val st = Instant.now() + val taskDuration = F.delay(Duration.between(st, Instant.now())) - logger.trace(s"[$name] Received delivery: $delivery") + unsafeExecuteReadAction(d, rawBody, taskDuration) + .recoverWith { + case e: RejectedExecutionException => + consumerLogger.debug(e)(s"[$consumerName] Executor was unable to plan the handling task for $messageId, $deliveryTag") >> + handleFailure(d, rawBody, e) + } + } + } + .recoverWith { + case e => + processingCount.dec >> + processingFailedMeter.mark >> + consumerLogger.plainDebug(e)(s"Could not process delivery with delivery tag ${envelope.getDeliveryTag}") >> + F.raiseError[Unit](e) + } + } + + // Actually start the processing. This is the barrier between synchronous and asynchronous world. + startAndForget { + action + } + } - val st = Instant.now() + private def unsafeExecuteReadAction(delivery: DeliveryWithContext[A], rawBody: Bytes, taskDuration: F[Duration]): F[Unit] = { + import delivery.context + import context._ - @inline - def taskDuration: Duration = Duration.between(st, Instant.now()) + handleNewDelivery(delivery) + .flatMap { + case Some(dr) => + handleResult(rawBody, delivery.delivery)(dr.deliveryResult) >> dr.confirm - readAction(delivery) - .flatMap { handleResult(messageId, deliveryTag, properties, routingKey, body) } - .flatTap(_ => - F.delay { - val duration = taskDuration - logger.debug(s"[$name] Delivery ID $messageId handling succeeded in $duration") - processedTimer.update(duration) - }) - .recoverWith { - case NonFatal(t) => - F.delay { - val duration = taskDuration - logger.debug(s"[$name] Delivery ID $messageId handling failed in $duration", t) - processedTimer.updateFailure(duration) - logger.error(s"[$name] Error while executing callback for delivery with routing key $routingKey", t) - } >> - handleFailure(messageId, deliveryTag, properties, routingKey, body, t) + case None => + consumerLogger.trace(s"[$consumerName] Delivery result for $messageId ignored") + } + .flatTap { _ => + taskDuration.flatMap { duration => + consumerLogger.debug(s"[$consumerName] Delivery $messageId handling succeeded in $duration") >> + processedTimer.update(duration) >> + processingCount.dec + } + } + .recoverWith { + case NonFatal(t) => + taskDuration.flatMap { duration => + consumerLogger.debug(t)(s"[$consumerName] Delivery $messageId handling failed in $duration") >> + processedTimer.updateFailure(duration) >> + handleFailure(delivery, rawBody, t) } - } catch { - // we catch this specific exception, handling of others is up to Lyra - case e: RejectedExecutionException => - logger.debug(s"[$name] Executor was unable to plan the handling task", e) - handleFailure(messageId, deliveryTag, properties, routingKey, body, e) - - case NonFatal(e) => - logger.error(s"[$name] Error while preparing callback execution for delivery with routing key $routingKey. This is probably a bug as the F construction shouldn't throw any exception", e) - handleFailure(messageId, deliveryTag, properties, routingKey, body, e) } - }.flatten - - private def handleFailure(messageId: String, - deliveryTag: Long, - properties: BasicProperties, - routingKey: String, - body: Array[Byte], - t: Throwable): F[Unit] = { - F.delay { - processingCount.decrementAndGet() - processingFailedMeter.mark() - consumerListener.onError(this, name, channel, t) - } >> - executeFailureAction(messageId, deliveryTag, properties, routingKey, body) } - private def executeFailureAction(messageId: String, - deliveryTag: Long, - properties: BasicProperties, - routingKey: String, - body: Array[Byte]): F[Unit] = { - handleResult(messageId, deliveryTag, properties, routingKey, body)(failureAction) + private def handleFailure(delivery: DeliveryWithContext[A], rawBody: Bytes, t: Throwable)(implicit dctx: DeliveryContext): F[Unit] = { + processingFailedMeter.mark >> + processingCount.dec >> + consumerListener.onError(this, consumerName, channel, t) >> + executeFailureAction(delivery, rawBody) + } + + private def executeFailureAction(d: DeliveryWithContext[A], rawBody: Bytes): F[Unit] = { + import d._ + handleResult(rawBody, delivery)(failureAction) } } diff --git a/core/src/main/scala/com/avast/clients/rabbitmq/DefaultRabbitMQClientFactory.scala b/core/src/main/scala/com/avast/clients/rabbitmq/DefaultRabbitMQClientFactory.scala index 077299ef..f1eb8b31 100644 --- a/core/src/main/scala/com/avast/clients/rabbitmq/DefaultRabbitMQClientFactory.scala +++ b/core/src/main/scala/com/avast/clients/rabbitmq/DefaultRabbitMQClientFactory.scala @@ -1,312 +1,303 @@ package com.avast.clients.rabbitmq -import java.util.concurrent.TimeoutException import cats.effect._ -import cats.syntax.all._ +import cats.implicits.{catsSyntaxFlatMapOps, toFunctorOps, toTraverseOps} import com.avast.bytes.Bytes -import com.avast.clients.rabbitmq.api.{Delivery, _} -import com.avast.metrics.scalaapi.{Meter, Monitor} -import com.rabbitmq.client.AMQP.Queue -import com.rabbitmq.client.{AMQP, Consumer} -import com.typesafe.scalalogging.LazyLogging -import org.slf4j.event.Level +import com.avast.clients.rabbitmq.DefaultRabbitMQClientFactory.startConsumingQueue +import com.avast.clients.rabbitmq.api._ +import com.avast.clients.rabbitmq.logging.ImplicitContextLogger +import com.avast.metrics.scalaeffectapi.Monitor +import com.rabbitmq.client.Consumer import scala.collection.compat._ import scala.collection.immutable -import scala.concurrent.duration.Duration import scala.jdk.CollectionConverters._ -import scala.language.{higherKinds, implicitConversions} -import scala.util.control.NonFatal +import scala.language.implicitConversions -private[rabbitmq] object DefaultRabbitMQClientFactory extends LazyLogging { +private[rabbitmq] class DefaultRabbitMQClientFactory[F[_]: ConcurrentEffect: Timer: ContextShift]( + connection: RabbitMQConnection[F], + connectionInfo: RabbitMQConnectionInfo, + blocker: Blocker, + republishStrategy: RepublishStrategyConfig) { - private type ArgumentsMap = Map[String, Any] + private val F: ConcurrentEffect[F] = implicitly - private type DefaultDeliveryReadAction[F[_]] = DeliveryReadAction[F, Bytes] + private type ArgumentsMap = Map[String, Any] object Producer { - def create[F[_]: ConcurrentEffect, A: ProductConverter]( - producerConfig: ProducerConfig, - channel: ServerChannel, - factoryInfo: RabbitMQConnectionInfo, - blocker: Blocker, - monitor: Monitor)(implicit cs: ContextShift[F]): DefaultRabbitMQProducer[F, A] = { - prepareProducer[F, A](producerConfig, channel, factoryInfo, blocker, monitor) + def create[A: ProductConverter](producerConfig: ProducerConfig, monitor: Monitor[F]): Resource[F, DefaultRabbitMQProducer[F, A]] = { + prepareProducer[A](producerConfig, connection, monitor) } - } object Consumer { - def create[F[_]: ConcurrentEffect, A: DeliveryConverter]( + def create[A: DeliveryConverter]( consumerConfig: ConsumerConfig, - channel: ServerChannel, - connectionInfo: RabbitMQConnectionInfo, - republishStrategy: RepublishStrategyConfig, - blocker: Blocker, - monitor: Monitor, - consumerListener: ConsumerListener, - readAction: DeliveryReadAction[F, A])(implicit timer: Timer[F], cs: ContextShift[F]): DefaultRabbitMQConsumer[F] = { - - prepareConsumer(consumerConfig, readAction, connectionInfo, republishStrategy, channel, consumerListener, blocker, monitor) + consumerListener: ConsumerListener[F], + readAction: DeliveryReadAction[F, A], + monitor: Monitor[F] + ): Resource[F, DefaultRabbitMQConsumer[F, A]] = { + prepareConsumer[A](consumerConfig, consumerListener, readAction, monitor) } } object PullConsumer { - def create[F[_]: ConcurrentEffect, A: DeliveryConverter]( - consumerConfig: PullConsumerConfig, - channel: ServerChannel, - connectionInfo: RabbitMQConnectionInfo, - republishStrategy: RepublishStrategyConfig, - blocker: Blocker, - monitor: Monitor)(implicit cs: ContextShift[F]): DefaultRabbitMQPullConsumer[F, A] = { + def create[A: DeliveryConverter](consumerConfig: PullConsumerConfig, + monitor: Monitor[F]): Resource[F, DefaultRabbitMQPullConsumer[F, A]] = { - preparePullConsumer(consumerConfig, connectionInfo, republishStrategy, channel, blocker, monitor) + preparePullConsumer(consumerConfig, monitor) } } object StreamingConsumer { - def create[F[_]: ConcurrentEffect, A: DeliveryConverter](consumerConfig: StreamingConsumerConfig, - channel: ServerChannel, - newChannel: F[ServerChannel], - connectionInfo: RabbitMQConnectionInfo, - republishStrategy: RepublishStrategyConfig, - blocker: Blocker, - monitor: Monitor, - consumerListener: ConsumerListener)( - implicit timer: Timer[F], - cs: ContextShift[F]): Resource[F, DefaultRabbitMQStreamingConsumer[F, A]] = { - - prepareStreamingConsumer(consumerConfig, connectionInfo, republishStrategy, channel, newChannel, consumerListener, blocker, monitor) + def create[A: DeliveryConverter](consumerConfig: StreamingConsumerConfig, + monitor: Monitor[F], + consumerListener: ConsumerListener[F]): Resource[F, DefaultRabbitMQStreamingConsumer[F, A]] = { + + prepareStreamingConsumer(consumerConfig, consumerListener, monitor) } } object Declarations { - def declareExchange[F[_]: Sync](config: DeclareExchangeConfig, - channel: ServerChannel, - connectionInfo: RabbitMQConnectionInfo): F[Unit] = - Sync[F].delay { - import config._ - - DefaultRabbitMQClientFactory.this.declareExchange(name, `type`, durable, autoDelete, arguments, channel, connectionInfo) - } - - def declareQueue[F[_]: Sync](config: DeclareQueueConfig, channel: ServerChannel, connectionInfo: RabbitMQConnectionInfo): F[Unit] = - Sync[F].delay { - import config._ - - DefaultRabbitMQClientFactory.this.declareQueue(channel, name, durable, exclusive, autoDelete, arguments) - () - } - - def bindQueue[F[_]: Sync](config: BindQueueConfig, channel: ServerChannel, connectionInfo: RabbitMQConnectionInfo): F[Unit] = - Sync[F].delay { - import config._ - - DefaultRabbitMQClientFactory.bindQueue(channel, queueName, exchangeName, routingKeys, arguments, connectionInfo) - } + private val logger = ImplicitContextLogger.createLogger[F, DefaultRabbitMQClientFactory[F]] - def bindExchange[F[_]: Sync](config: BindExchangeConfig, channel: ServerChannel, connectionInfo: RabbitMQConnectionInfo): F[Unit] = - Sync[F].delay { - import config._ + def declareExchange(config: DeclareExchangeConfig, channel: ServerChannel): F[Unit] = { + import config._ - routingKeys.foreach { - DefaultRabbitMQClientFactory.this - .bindExchange(connectionInfo)(channel, sourceExchangeName, destExchangeName, arguments.value) - } - } - } - - private def prepareStreamingConsumer[F[_]: ConcurrentEffect, A: DeliveryConverter]( - consumerConfig: StreamingConsumerConfig, - connectionInfo: RabbitMQConnectionInfo, - republishStrategy: RepublishStrategyConfig, - channel: ServerChannel, - newChannel: F[ServerChannel], - consumerListener: ConsumerListener, - blocker: Blocker, - monitor: Monitor)(implicit timer: Timer[F], cs: ContextShift[F]): Resource[F, DefaultRabbitMQStreamingConsumer[F, A]] = { - import consumerConfig._ - - val timeoutsMeter = monitor.meter("timeouts") - - // auto declare exchanges - declareExchangesFromBindings(connectionInfo, channel, consumerConfig.bindings) - - // auto declare queue; if configured - consumerConfig.declare.foreach { - declareQueue(consumerConfig.queueName, connectionInfo, channel, _) + DefaultRabbitMQClientFactory.this.declareExchange(name, `type`, durable, autoDelete, arguments, channel)(logger) } - // auto bind - bindQueue(connectionInfo, channel, consumerConfig.queueName, consumerConfig.bindings) - - bindQueueForRepublishing(connectionInfo, channel, consumerConfig.queueName, republishStrategy) - - val timeoutAction = (d: Delivery[Bytes], e: TimeoutException) => doTimeoutAction(name, d, consumerConfig.timeoutAction, timeoutLogLevel, timeoutsMeter, e) - - DefaultRabbitMQStreamingConsumer.make( - name, - newChannel.flatTap(ch => Sync[F].delay(ch.basicQos(consumerConfig.prefetchCount))), - consumerTag, - queueName, - connectionInfo, - consumerListener, - queueBufferSize, - monitor, - republishStrategy.toRepublishStrategy, - processTimeout, - timeoutAction, - blocker - ) - } - - private def prepareConsumer[F[_]: ConcurrentEffect, A: DeliveryConverter]( - consumerConfig: ConsumerConfig, - readAction: DeliveryReadAction[F, A], - connectionInfo: RabbitMQConnectionInfo, - republishStrategy: RepublishStrategyConfig, - channel: ServerChannel, - consumerListener: ConsumerListener, - blocker: Blocker, - monitor: Monitor)(implicit timer: Timer[F], cs: ContextShift[F]): DefaultRabbitMQConsumer[F] = { - - // auto declare exchanges - declareExchangesFromBindings(connectionInfo, channel, consumerConfig.bindings) + def declareQueue(config: DeclareQueueConfig, channel: ServerChannel): F[Unit] = { + import config._ - // auto declare queue; if configured - consumerConfig.declare.foreach { - declareQueue(consumerConfig.queueName, connectionInfo, channel, _) + DefaultRabbitMQClientFactory.this.declareQueue(channel, name, durable, exclusive, autoDelete, arguments)(logger) } - // set prefetch size (per consumer) - channel.basicQos(consumerConfig.prefetchCount) + def bindQueue(config: BindQueueConfig, channel: ServerChannel): F[Unit] = { + import config._ - // auto bind - bindQueue(connectionInfo, channel, consumerConfig.queueName, consumerConfig.bindings) + DefaultRabbitMQClientFactory.this.bindQueue(channel, queueName, exchangeName, routingKeys, arguments)(logger) + } - bindQueueForRepublishing(connectionInfo, channel, consumerConfig.queueName, republishStrategy) + def bindExchange(config: BindExchangeConfig, channel: ServerChannel): F[Unit] = { + import config._ - prepareConsumer(consumerConfig, connectionInfo, republishStrategy, channel, readAction, consumerListener, blocker, monitor) + routingKeys + .map { + DefaultRabbitMQClientFactory.this.bindExchange(channel, sourceExchangeName, destExchangeName, arguments.value)(_)(logger) + } + .sequence + .as(()) + } } - private def prepareConsumer[F[_]: ConcurrentEffect, A: DeliveryConverter]( - consumerConfig: ConsumerConfig, - connectionInfo: RabbitMQConnectionInfo, - republishStrategy: RepublishStrategyConfig, - channel: ServerChannel, - userReadAction: DeliveryReadAction[F, A], - consumerListener: ConsumerListener, - blocker: Blocker, - monitor: Monitor)(implicit timer: Timer[F], cs: ContextShift[F]): DefaultRabbitMQConsumer[F] = { + // scalastyle:off method.length + private def prepareStreamingConsumer[A: DeliveryConverter](consumerConfig: StreamingConsumerConfig, + consumerListener: ConsumerListener[F], + monitor: Monitor[F]): Resource[F, DefaultRabbitMQStreamingConsumer[F, A]] = { import consumerConfig._ - val readAction: DefaultDeliveryReadAction[F] = { - val convAction: DefaultDeliveryReadAction[F] = { d: Delivery[Bytes] => - try { - userReadAction(convertDelivery(d)) - } catch { - case NonFatal(e) => - ConcurrentEffect[F].raiseError(e) + val logger = ImplicitContextLogger.createLogger[F, DefaultRabbitMQStreamingConsumer[F, A]] + + Resource.eval(connection.withChannel { channel => + // auto declare exchanges + declareExchangesFromBindings(channel, consumerConfig.bindings)(logger) >> + // auto declare queue; if configured + consumerConfig.declare.map { declareQueue(consumerConfig.queueName, channel, _)(logger) }.getOrElse(F.unit) >> + // auto bind + bindQueue(channel, consumerConfig.queueName, consumerConfig.bindings)(logger) >> + bindQueueForRepublishing(channel, consumerConfig.queueName, republishStrategy)(logger) + }) >> + PoisonedMessageHandler + .make[F, A](consumerConfig.poisonedMessageHandling, connection, monitor.named("poisonedMessageHandler")) + .flatMap { pmh => + val base = new ConsumerBase[F, A]( + name, + queueName, + blocker, + ImplicitContextLogger.createLogger[F, DefaultRabbitMQStreamingConsumer[F, A]], + monitor + ) + + val channelOpsFactory = new ConsumerChannelOpsFactory[F, A]( + name, + queueName, + blocker, + republishStrategy.toRepublishStrategy[F], + pmh, + connectionInfo, + ImplicitContextLogger.createLogger[F, DefaultRabbitMQStreamingConsumer[F, A]], + monitor, + connection.newChannel().evalTap(ch => Sync[F].delay(ch.basicQos(consumerConfig.prefetchCount))) + ) + + DefaultRabbitMQStreamingConsumer.make( + base, + channelOpsFactory, + consumerTag, + consumerListener, + queueBufferSize, + processTimeout, + timeoutAction, + timeoutLogLevel, + ) } - } - - wrapReadAction(consumerConfig, convAction, monitor, blocker) - } - - val consumer = { - new DefaultRabbitMQConsumer(name, - channel, - queueName, - connectionInfo, - monitor, - failureAction, - consumerListener, - republishStrategy.toRepublishStrategy, - blocker)(readAction) - } - - startConsumingQueue(channel, queueName, consumerTag, consumer) - - consumer } + // scalastyle:on method.length - private def preparePullConsumer[F[_]: ConcurrentEffect, A: DeliveryConverter]( - consumerConfig: PullConsumerConfig, - connectionInfo: RabbitMQConnectionInfo, - republishStrategy: RepublishStrategyConfig, - channel: ServerChannel, - blocker: Blocker, - monitor: Monitor)(implicit cs: ContextShift[F]): DefaultRabbitMQPullConsumer[F, A] = { - + // scalastyle:off method.length + private def prepareConsumer[A: DeliveryConverter](consumerConfig: ConsumerConfig, + consumerListener: ConsumerListener[F], + readAction: DeliveryReadAction[F, A], + monitor: Monitor[F]): Resource[F, DefaultRabbitMQConsumer[F, A]] = { import consumerConfig._ - // auto declare exchanges - declareExchangesFromBindings(connectionInfo, channel, consumerConfig.bindings) - - // auto declare queue; if configured - declare.foreach { - declareQueue(consumerConfig.queueName, connectionInfo, channel, _) - } - - // auto bind - bindQueue(connectionInfo, channel, consumerConfig.queueName, consumerConfig.bindings) + val logger = ImplicitContextLogger.createLogger[F, DefaultRabbitMQConsumer[F, A]] + + connection + .newChannel() + .evalTap { channel => + // auto declare exchanges + declareExchangesFromBindings(channel, consumerConfig.bindings)(logger) >> + // auto declare queue; if configured + consumerConfig.declare.map { declareQueue(consumerConfig.queueName, channel, _)(logger) }.getOrElse(F.unit) >> + // set prefetch size (per consumer) + blocker.delay { channel.basicQos(consumerConfig.prefetchCount) } >> + // auto bind + bindQueue(channel, consumerConfig.queueName, consumerConfig.bindings)(logger) >> + bindQueueForRepublishing(channel, consumerConfig.queueName, republishStrategy)(logger) + } + .flatMap { channel => + PoisonedMessageHandler + .make[F, A](consumerConfig.poisonedMessageHandling, connection, monitor.named("poisonedMessageHandler")) + .map { pmh => + val base = new ConsumerBase[F, A]( + name, + queueName, + blocker, + logger, + monitor + ) + + val channelOps = new ConsumerChannelOps[F, A]( + name, + queueName, + channel, + blocker, + republishStrategy.toRepublishStrategy[F], + pmh, + connectionInfo, + ImplicitContextLogger.createLogger[F, DefaultRabbitMQStreamingConsumer[F, A]], + monitor + ) + + new DefaultRabbitMQConsumer[F, A]( + base, + channelOps, + processTimeout, + timeoutAction, + timeoutLogLevel, + failureAction, + consumerListener + )(readAction) + } + .evalTap { consumer => + startConsumingQueue(channel, queueName, consumerTag, consumer, blocker) + } + } + } + // scalastyle:on method.length - bindQueueForRepublishing(connectionInfo, channel, consumerConfig.queueName, republishStrategy) + private def preparePullConsumer[A: DeliveryConverter](consumerConfig: PullConsumerConfig, + monitor: Monitor[F]): Resource[F, DefaultRabbitMQPullConsumer[F, A]] = { + import consumerConfig._ - new DefaultRabbitMQPullConsumer[F, A](name, - channel, - queueName, - connectionInfo, - failureAction, - monitor, - republishStrategy.toRepublishStrategy, - blocker) + val logger = ImplicitContextLogger.createLogger[F, DefaultRabbitMQPullConsumer[F, A]] + + connection + .newChannel() + .evalTap { channel => + // auto declare exchanges + declareExchangesFromBindings(channel, consumerConfig.bindings)(logger) >> + // auto declare queue; if configured + declare.map { declareQueue(consumerConfig.queueName, channel, _)(logger) }.getOrElse(F.unit) >> + // auto bind + bindQueue(channel, consumerConfig.queueName, consumerConfig.bindings)(logger) >> + bindQueueForRepublishing(channel, consumerConfig.queueName, republishStrategy)(logger) + } + .flatMap { channel => + PoisonedMessageHandler + .make[F, A](consumerConfig.poisonedMessageHandling, connection, monitor.named("poisonedMessageHandler")) + .map { pmh => + val base = new ConsumerBase[F, A]( + name, + queueName, + blocker, + logger, + monitor + ) + + val channelOps = new ConsumerChannelOps[F, A]( + name, + queueName, + channel, + blocker, + republishStrategy.toRepublishStrategy[F], + pmh, + connectionInfo, + ImplicitContextLogger.createLogger[F, DefaultRabbitMQStreamingConsumer[F, A]], + monitor + ) + + new DefaultRabbitMQPullConsumer[F, A](base, channelOps) + } + } } - private def prepareProducer[F[_]: ConcurrentEffect, A: ProductConverter]( - producerConfig: ProducerConfig, - channel: ServerChannel, - connectionInfo: RabbitMQConnectionInfo, - blocker: Blocker, - monitor: Monitor)(implicit cs: ContextShift[F]): DefaultRabbitMQProducer[F, A] = { - - val defaultProperties = MessageProperties( - deliveryMode = DeliveryMode.fromCode(producerConfig.properties.deliveryMode), - contentType = producerConfig.properties.contentType, - contentEncoding = producerConfig.properties.contentEncoding, - priority = producerConfig.properties.priority.map(Integer.valueOf) - ) - - // auto declare exchange; if configured - producerConfig.declare.foreach { - declareExchange(producerConfig.exchange, connectionInfo, channel, _) - } + private def prepareProducer[A: ProductConverter](producerConfig: ProducerConfig, + connection: RabbitMQConnection[F], + monitor: Monitor[F]): Resource[F, DefaultRabbitMQProducer[F, A]] = { + val logger = ImplicitContextLogger.createLogger[F, DefaultRabbitMQProducer[F, A]] - new DefaultRabbitMQProducer[F, A]( - producerConfig.name, - producerConfig.exchange, - channel, - defaultProperties, - producerConfig.reportUnroutable, - blocker, - monitor - ) + connection + .newChannel() + .evalTap { channel => + // auto declare exchange; if configured + producerConfig.declare.map { declareExchange(producerConfig.exchange, channel, _)(logger) }.getOrElse(F.unit) + } + .map { channel => + val defaultProperties = MessageProperties( + deliveryMode = DeliveryMode.fromCode(producerConfig.properties.deliveryMode), + contentType = producerConfig.properties.contentType, + contentEncoding = producerConfig.properties.contentEncoding, + priority = producerConfig.properties.priority.map(Integer.valueOf) + ) + + new DefaultRabbitMQProducer[F, A]( + producerConfig.name, + producerConfig.exchange, + channel, + defaultProperties, + producerConfig.reportUnroutable, + blocker, + logger, + monitor + ) + } } - private[rabbitmq] def declareExchange(name: String, - connectionInfo: RabbitMQConnectionInfo, - channel: ServerChannel, - autoDeclareExchange: AutoDeclareExchangeConfig): Unit = { + private[rabbitmq] def declareExchange(name: String, channel: ServerChannel, autoDeclareExchange: AutoDeclareExchangeConfig, + )(logger: ImplicitContextLogger[F]): F[Unit] = { import autoDeclareExchange._ if (enabled) { - declareExchange(name, `type`, durable, autoDelete, arguments, channel, connectionInfo) - } - () + declareExchange(name, `type`, durable, autoDelete, arguments, channel)(logger) + } else F.unit + } private[rabbitmq] def declareExchange(name: String, @@ -315,23 +306,29 @@ private[rabbitmq] object DefaultRabbitMQClientFactory extends LazyLogging { autoDelete: Boolean, arguments: DeclareArgumentsConfig, channel: ServerChannel, - connectionInfo: RabbitMQConnectionInfo): Unit = { - logger.info(s"Declaring exchange '$name' of type ${`type`} in virtual host '${connectionInfo.virtualHost}'") - val javaArguments = argsAsJava(arguments.value) - channel.exchangeDeclare(name, `type`.value, durable, autoDelete, javaArguments) - () + )(logger: ImplicitContextLogger[F]): F[Unit] = { + logger.plainInfo(s"Declaring exchange '$name' of type ${`type`} in virtual host '${connectionInfo.virtualHost}'") >> + blocker.delay { + val javaArguments = argsAsJava(arguments.value) + channel.exchangeDeclare(name, `type`.value, durable, autoDelete, javaArguments) + () + } } - private def bindQueue(connectionInfo: RabbitMQConnectionInfo, - channel: ServerChannel, - queueName: String, - bindings: immutable.Seq[AutoBindQueueConfig]): Unit = { - bindings.foreach { bind => - import bind._ - val exchangeName = bind.exchange.name - - bindQueue(channel, queueName, exchangeName, routingKeys, bindArguments, connectionInfo) - } + private def bindQueue( + channel: ServerChannel, + queueName: String, + bindings: immutable.Seq[AutoBindQueueConfig], + )(logger: ImplicitContextLogger[F]): F[Unit] = { + bindings + .map { bind => + import bind._ + val exchangeName = bind.exchange.name + + bindQueue(channel, queueName, exchangeName, routingKeys, bindArguments)(logger) + } + .sequence + .as(()) } private def bindQueue(channel: ServerChannel, @@ -339,50 +336,41 @@ private[rabbitmq] object DefaultRabbitMQClientFactory extends LazyLogging { exchangeName: String, routingKeys: immutable.Seq[String], bindArguments: BindArgumentsConfig, - connectionInfo: RabbitMQConnectionInfo): Unit = { + )(logger: ImplicitContextLogger[F]): F[Unit] = { if (routingKeys.nonEmpty) { - routingKeys.foreach { routingKey => - bindQueue(connectionInfo)(channel, queueName)(exchangeName, routingKey, bindArguments.value) - } + routingKeys + .map { bindQueue(channel, queueName)(exchangeName, _, bindArguments.value)(logger) } + .sequence + .as(()) } else { // binding without routing key, possibly to fanout exchange - bindQueue(connectionInfo)(channel, queueName)(exchangeName, "", bindArguments.value) + bindQueue(channel, queueName)(exchangeName, "", bindArguments.value)(logger) } } - private[rabbitmq] def startConsumingQueue(channel: ServerChannel, queueName: String, consumerTag: String, consumer: Consumer): String = { - channel.setDefaultConsumer(consumer) // see `setDefaultConsumer` javadoc; this is possible because the channel is here exclusively for this consumer - val finalConsumerTag = channel.basicConsume(queueName, false, if (consumerTag == "Default") "" else consumerTag, consumer) - finalConsumerTag - } - - private def bindQueueForRepublishing[F[_]](connectionInfo: RabbitMQConnectionInfo, - channel: ServerChannel, - queueName: String, - strategyConfig: RepublishStrategyConfig): Unit = { + private def bindQueueForRepublishing( + channel: ServerChannel, + queueName: String, + strategyConfig: RepublishStrategyConfig, + )(logger: ImplicitContextLogger[F]): F[Unit] = { import RepublishStrategyConfig._ strategyConfig match { - case CustomExchange(exchangeName, _, exchangeAutoBind) => - if (exchangeAutoBind) { - bindQueue(connectionInfo)(channel, queueName)(exchangeName, queueName, Map.empty) - } + case CustomExchange(exchangeName, _, exchangeAutoBind) if exchangeAutoBind => + bindQueue(channel, queueName)(exchangeName, queueName, Map.empty)(logger) - case _ => () // no-op + case _ => F.unit // no-op } } - private def declareQueue(queueName: String, - connectionInfo: RabbitMQConnectionInfo, - channel: ServerChannel, - declare: AutoDeclareQueueConfig): Unit = { + private def declareQueue(queueName: String, channel: ServerChannel, declare: AutoDeclareQueueConfig, + )(logger: ImplicitContextLogger[F]): F[Unit] = { import declare._ if (enabled) { - logger.info(s"Declaring queue '$queueName' in virtual host '${connectionInfo.virtualHost}'") - declareQueue(channel, queueName, durable, exclusive, autoDelete, arguments) - } + declareQueue(channel, queueName, durable, exclusive, autoDelete, arguments)(logger) + } else F.unit } private[rabbitmq] def declareQueue(channel: ServerChannel, @@ -390,44 +378,54 @@ private[rabbitmq] object DefaultRabbitMQClientFactory extends LazyLogging { durable: Boolean, exclusive: Boolean, autoDelete: Boolean, - arguments: DeclareArgumentsConfig): Queue.DeclareOk = { - channel.queueDeclare(queueName, durable, exclusive, autoDelete, arguments.value) + arguments: DeclareArgumentsConfig, + )(logger: ImplicitContextLogger[F]): F[Unit] = { + logger.plainInfo(s"Declaring queue '$queueName' in virtual host '${connectionInfo.virtualHost}'") >> + blocker.delay { + channel.queueDeclare(queueName, durable, exclusive, autoDelete, arguments.value) + () + } } - private[rabbitmq] def bindQueue(connectionInfo: RabbitMQConnectionInfo)( + private[rabbitmq] def bindQueue( channel: ServerChannel, - queueName: String)(exchangeName: String, routingKey: String, arguments: ArgumentsMap): AMQP.Queue.BindOk = { - logger.info(s"Binding exchange $exchangeName($routingKey) -> queue '$queueName' in virtual host '${connectionInfo.virtualHost}'") - - channel.queueBind(queueName, exchangeName, routingKey, arguments) + queueName: String)(exchangeName: String, routingKey: String, arguments: ArgumentsMap)(logger: ImplicitContextLogger[F]): F[Unit] = { + logger.plainInfo(s"Binding exchange $exchangeName($routingKey) -> queue '$queueName' in virtual host '${connectionInfo.virtualHost}'") >> + blocker.delay { + channel.queueBind(queueName, exchangeName, routingKey, arguments) + () + } } - private[rabbitmq] def bindExchange(connectionInfo: RabbitMQConnectionInfo)( - channel: ServerChannel, - sourceExchangeName: String, - destExchangeName: String, - arguments: ArgumentsMap)(routingKey: String): AMQP.Exchange.BindOk = { - logger.info( + private[rabbitmq] def bindExchange(channel: ServerChannel, sourceExchangeName: String, destExchangeName: String, arguments: ArgumentsMap)( + routingKey: String)(logger: ImplicitContextLogger[F]): F[Unit] = { + logger.plainInfo( s"Binding exchange $sourceExchangeName($routingKey) -> exchange '$destExchangeName' in virtual host '${connectionInfo.virtualHost}'" - ) - - channel.exchangeBind(destExchangeName, sourceExchangeName, routingKey, arguments) + ) >> blocker.delay { + channel.exchangeBind(destExchangeName, sourceExchangeName, routingKey, arguments) + () + } } - private def declareExchangesFromBindings(connectionInfo: RabbitMQConnectionInfo, - channel: ServerChannel, - bindings: Seq[AutoBindQueueConfig]): Unit = { - bindings.foreach { bind => - import bind.exchange._ + private def declareExchangesFromBindings(channel: ServerChannel, bindings: Seq[AutoBindQueueConfig])( + logger: ImplicitContextLogger[F]): F[Unit] = { + bindings + .map { bind => + import bind.exchange._ - // auto declare exchange; if configured - declare.foreach { - declareExchange(name, connectionInfo, channel, _) + // auto declare exchange; if configured + declare + .map { + declareExchange(name, channel, _)(logger) + } + .getOrElse(F.unit) } - } + .toList + .sequence + .as(()) } - private[rabbitmq] def convertDelivery[F[_]: ConcurrentEffect, A: DeliveryConverter](d: Delivery[Bytes]): Delivery[A] = { + private[rabbitmq] def convertDelivery[A: DeliveryConverter](d: Delivery[Bytes]): Delivery[A] = { d.flatMap { d => implicitly[DeliveryConverter[A]].convert(d.body) match { case Right(a) => d.mapBody(_ => a) @@ -436,70 +434,21 @@ private[rabbitmq] object DefaultRabbitMQClientFactory extends LazyLogging { } } - private def wrapReadAction[F[_]: ConcurrentEffect, A]( - consumerConfig: ConsumerConfig, - userReadAction: DefaultDeliveryReadAction[F], - consumerMonitor: Monitor, - blocker: Blocker)(implicit timer: Timer[F], cs: ContextShift[F]): DefaultDeliveryReadAction[F] = { - import consumerConfig._ - - val timeoutsMeter = consumerMonitor.meter("timeouts") - val fatalFailuresMeter = consumerMonitor.meter("fatalFailures") - - delivery: Delivery[Bytes] => - try { - // we try to catch also long-lasting synchronous work on the thread - val action: F[DeliveryResult] = blocker.delay { userReadAction(delivery) }.flatten - - val timedOutAction: F[DeliveryResult] = { - if (processTimeout != Duration.Zero) { - Concurrent - .timeout(action, processTimeout) - .recoverWith { - case e: TimeoutException => doTimeoutAction(name, delivery, timeoutAction, timeoutLogLevel, timeoutsMeter, e) - } - } else action - } - - timedOutAction - .recoverWith { - case NonFatal(e) => - fatalFailuresMeter.mark() - logger.warn(s"[$name] Error while executing callback for delivery with routing key ${delivery.routingKey}, applying DeliveryResult.${consumerConfig.failureAction}${System.lineSeparator()}$delivery", e) - ConcurrentEffect[F].pure(consumerConfig.failureAction) - } - } catch { - case NonFatal(e) => - fatalFailuresMeter.mark() - logger.error(s"[$name] Error while executing callback for delivery with routing key ${delivery.routingKey}, applying DeliveryResult.${consumerConfig.failureAction}${System.lineSeparator()}$delivery", e) - ConcurrentEffect[F].pure(consumerConfig.failureAction) - } + private implicit def argsAsJava(value: ArgumentsMap): java.util.Map[String, Object] = { + value.view.mapValues(_.asInstanceOf[Object]).toMap.asJava } - private def doTimeoutAction[A, F[_]: ConcurrentEffect](consumerName: String, - delivery: Delivery[Bytes], - timeoutAction: DeliveryResult, - timeoutLogLevel: Level, - timeoutsMeter: Meter, - e: TimeoutException): F[DeliveryResult] = Sync[F].delay { - - timeoutsMeter.mark() - - lazy val msg = s"[$consumerName] Task timed-out when processing delivery with routing key ${delivery.routingKey}, applying DeliveryResult.$timeoutAction${System.lineSeparator()}$delivery" +} - timeoutLogLevel match { - case Level.ERROR => logger.error(msg, e) - case Level.WARN => logger.warn(msg, e) - case Level.INFO => logger.info(msg, e) - case Level.DEBUG => logger.debug(msg, e) - case Level.TRACE => logger.trace(msg, e) +object DefaultRabbitMQClientFactory { + private[rabbitmq] def startConsumingQueue[F[_]: Sync: ContextShift](channel: ServerChannel, + queueName: String, + consumerTag: String, + consumer: Consumer, + blocker: Blocker): F[Unit] = { + blocker.delay { + channel.setDefaultConsumer(consumer) // see `setDefaultConsumer` javadoc; this is possible because the channel is here exclusively for this consumer + channel.basicConsume(queueName, false, if (consumerTag == "Default") "" else consumerTag, consumer) } - - timeoutAction - } - - private implicit def argsAsJava(value: ArgumentsMap): java.util.Map[String, Object] = { - value.view.mapValues(_.asInstanceOf[Object]).toMap.asJava } - } diff --git a/core/src/main/scala/com/avast/clients/rabbitmq/DefaultRabbitMQConnection.scala b/core/src/main/scala/com/avast/clients/rabbitmq/DefaultRabbitMQConnection.scala index dd05f8de..548d54df 100644 --- a/core/src/main/scala/com/avast/clients/rabbitmq/DefaultRabbitMQConnection.scala +++ b/core/src/main/scala/com/avast/clients/rabbitmq/DefaultRabbitMQConnection.scala @@ -3,22 +3,24 @@ package com.avast.clients.rabbitmq import cats.effect._ import cats.syntax.flatMap._ import com.avast.clients.rabbitmq.api._ -import com.avast.metrics.scalaapi.Monitor +import com.avast.clients.rabbitmq.logging.ImplicitContextLogger +import com.avast.metrics.scalaeffectapi.Monitor import com.rabbitmq.client.ShutdownSignalException -import com.typesafe.scalalogging.StrictLogging -import scala.language.higherKinds import scala.util.control.NonFatal class DefaultRabbitMQConnection[F[_]] private (connection: ServerConnection, info: RabbitMQConnectionInfo, republishStrategy: RepublishStrategyConfig, - override val connectionListener: ConnectionListener, - override val channelListener: ChannelListener, - override val consumerListener: ConsumerListener, + override val connectionListener: ConnectionListener[F], + override val channelListener: ChannelListener[F], + override val consumerListener: ConsumerListener[F], blocker: Blocker)(implicit F: ConcurrentEffect[F], timer: Timer[F], cs: ContextShift[F]) - extends RabbitMQConnection[F] - with StrictLogging { + extends RabbitMQConnection[F] { + + private val logger = ImplicitContextLogger.createLogger[F, DefaultRabbitMQConnection[F]] + + private val factory = new DefaultRabbitMQClientFactory[F](this, info, blocker, republishStrategy) def newChannel(): Resource[F, ServerChannel] = { createChannel() @@ -26,123 +28,111 @@ class DefaultRabbitMQConnection[F[_]] private (connection: ServerConnection, private val createChannelF: F[ServerChannel] = { F.delay { - try { - connection.createChannel() match { - case channel: ServerChannel => - logger.debug(s"Created channel: $channel ${channel.hashCode()}") - channel.addShutdownListener((cause: ShutdownSignalException) => channelListener.onShutdown(cause, channel)) - channelListener.onCreate(channel) - channel - - // since the connection is `Recoverable`, the channel should always be `Recoverable` too (based on docs), so the exception will never be thrown - case _ => throw new IllegalStateException(s"Required Recoverable Channel") + try { + connection.createChannel() match { + case channel: ServerChannel => + channel.addShutdownListener { (cause: ShutdownSignalException) => + channelListener.onShutdown(cause, channel).unsafeStartAndForget() + } + channelListener.onCreate(channel).unsafeStartAndForget() + channel + + // since the connection is `Recoverable`, the channel should always be `Recoverable` too (based on docs), so the exception will never be thrown + case _ => throw new IllegalStateException(s"Required Recoverable Channel") + } + } catch { + case NonFatal(e) => + channelListener.onCreateFailure(e).unsafeStartAndForget() + throw e } - } catch { - case NonFatal(e) => - channelListener.onCreateFailure(e) - throw e } - } + .flatTap(channel => logger.plainDebug(s"Created channel: $channel ${channel.hashCode()}")) } - override def newStreamingConsumer[A: DeliveryConverter](consumerConfig: StreamingConsumerConfig, - monitor: Monitor): Resource[F, RabbitMQStreamingConsumer[F, A]] = { - createChannel().flatMap { channel => - DefaultRabbitMQClientFactory.StreamingConsumer - .create[F, A](consumerConfig, channel, createChannelF, info, republishStrategy, blocker, monitor, consumerListener) - .map(identity[RabbitMQStreamingConsumer[F, A]]) // type inference... :-( - } + override def newStreamingConsumer[A: DeliveryConverter]( + consumerConfig: StreamingConsumerConfig, + monitor: Monitor[F], + ): Resource[F, RabbitMQStreamingConsumer[F, A]] = { + factory.StreamingConsumer + .create[A](consumerConfig, monitor, consumerListener) + .map(identity[RabbitMQStreamingConsumer[F, A]]) // type inference... :-( } - def newConsumer[A: DeliveryConverter](consumerConfig: ConsumerConfig, monitor: Monitor)( + def newConsumer[A: DeliveryConverter](consumerConfig: ConsumerConfig, monitor: Monitor[F])( readAction: DeliveryReadAction[F, A]): Resource[F, RabbitMQConsumer[F]] = { - createChannel().map { channel => - DefaultRabbitMQClientFactory.Consumer - .create[F, A](consumerConfig, channel, info, republishStrategy, blocker, monitor, consumerListener, readAction) - } + factory.Consumer.create[A](consumerConfig, consumerListener, readAction, monitor) } def newPullConsumer[A: DeliveryConverter](pullConsumerConfig: PullConsumerConfig, - monitor: Monitor): Resource[F, RabbitMQPullConsumer[F, A]] = { - createChannel().map { channel => - DefaultRabbitMQClientFactory.PullConsumer - .create[F, A](pullConsumerConfig, channel, info, republishStrategy, blocker, monitor) - } + monitor: Monitor[F]): Resource[F, RabbitMQPullConsumer[F, A]] = { + factory.PullConsumer.create[A](pullConsumerConfig, monitor) } private def createChannel(): Resource[F, ServerChannel] = - Resource.make(createChannelF)(channel => - F.delay { - logger.debug(s"Closing channel: $channel ${channel.hashCode()}") - channel.close() - }) - - override def newProducer[A: ProductConverter](producerConfig: ProducerConfig, monitor: Monitor): Resource[F, RabbitMQProducer[F, A]] = { - createChannel().map { channel => - DefaultRabbitMQClientFactory.Producer - .create[F, A](producerConfig, channel, info, blocker, monitor) - } + Resource.make(createChannelF)( + channel => + logger.plainDebug(s"Closing channel: $channel ${channel.hashCode()}") >> + F.delay { + channel.close() + }) + + override def newProducer[A: ProductConverter](producerConfig: ProducerConfig, + monitor: Monitor[F]): Resource[F, RabbitMQProducer[F, A]] = { + factory.Producer + .create[A](producerConfig, monitor) } override def declareExchange(config: DeclareExchangeConfig): F[Unit] = withChannel { ch => - DefaultRabbitMQClientFactory.Declarations.declareExchange(config, ch, info) + factory.Declarations.declareExchange(config, ch) } override def declareQueue(config: DeclareQueueConfig): F[Unit] = withChannel { ch => - DefaultRabbitMQClientFactory.Declarations.declareQueue(config, ch, info) + factory.Declarations.declareQueue(config, ch) } override def bindExchange(config: BindExchangeConfig): F[Unit] = withChannel { ch => - DefaultRabbitMQClientFactory.Declarations.bindExchange(config, ch, info) + factory.Declarations.bindExchange(config, ch) } override def bindQueue(config: BindQueueConfig): F[Unit] = withChannel { ch => - DefaultRabbitMQClientFactory.Declarations.bindQueue(config, ch, info) + factory.Declarations.bindQueue(config, ch) } def withChannel[A](f: ServerChannel => F[A]): F[A] = { createChannel().use(f) } -} - -object DefaultRabbitMQConnection { - def make[F[_]](connection: ServerConnection, - info: RabbitMQConnectionInfo, - republishStrategy: RepublishStrategyConfig, - connectionListener: ConnectionListener, - channelListener: ChannelListener, - consumerListener: ConsumerListener, - blocker: Blocker)(implicit F: ConcurrentEffect[F], timer: Timer[F], cs: ContextShift[F]): F[DefaultRabbitMQConnection[F]] = - F.delay { - new DefaultRabbitMQConnection(connection, info, republishStrategy, connectionListener, channelListener, consumerListener, blocker) - } - .flatTap { conn => - conn.withChannel { channel => - setUpRepublishing(republishStrategy, info, channel) - } - } // prepare exchange for republishing - private def setUpRepublishing[F[_]: Sync](republishStrategyConfig: RepublishStrategyConfig, - connectionInfo: RabbitMQConnectionInfo, - channel: ServerChannel): F[Unit] = Sync[F].delay { - republishStrategyConfig match { - case RepublishStrategyConfig.CustomExchange(exchangeName, exchangeDeclare, _) => - if (exchangeDeclare) { - DefaultRabbitMQClientFactory.declareExchange( + private[rabbitmq] val setUpRepublishing: F[Unit] = { + withChannel { channel => + republishStrategy match { + case RepublishStrategyConfig.CustomExchange(exchangeName, exchangeDeclare, _) if exchangeDeclare => + factory.declareExchange( name = exchangeName, `type` = ExchangeType.Direct, durable = true, autoDelete = false, arguments = DeclareArgumentsConfig(), channel = channel, - connectionInfo = connectionInfo - ) - } - - () + )(logger) - case _ => () // no-op + case _ => F.unit // no-op + } } } } + +object DefaultRabbitMQConnection { + def make[F[_]](connection: ServerConnection, + info: RabbitMQConnectionInfo, + republishStrategy: RepublishStrategyConfig, + connectionListener: ConnectionListener[F], + channelListener: ChannelListener[F], + consumerListener: ConsumerListener[F], + blocker: Blocker)(implicit F: ConcurrentEffect[F], timer: Timer[F], cs: ContextShift[F]): F[DefaultRabbitMQConnection[F]] = + F.delay { + new DefaultRabbitMQConnection(connection, info, republishStrategy, connectionListener, channelListener, consumerListener, blocker) + } + .flatTap { _.setUpRepublishing } + +} diff --git a/core/src/main/scala/com/avast/clients/rabbitmq/DefaultRabbitMQConsumer.scala b/core/src/main/scala/com/avast/clients/rabbitmq/DefaultRabbitMQConsumer.scala index 425d04e5..c3a03976 100755 --- a/core/src/main/scala/com/avast/clients/rabbitmq/DefaultRabbitMQConsumer.scala +++ b/core/src/main/scala/com/avast/clients/rabbitmq/DefaultRabbitMQConsumer.scala @@ -2,61 +2,38 @@ package com.avast.clients.rabbitmq import cats.effect._ import cats.implicits._ -import com.avast.bytes.Bytes import com.avast.clients.rabbitmq.api._ -import com.avast.metrics.scalaapi.Monitor -import com.rabbitmq.client.AMQP.BasicProperties -import com.rabbitmq.client.{Delivery => _, _} -import com.typesafe.scalalogging.StrictLogging - -import scala.language.higherKinds - -class DefaultRabbitMQConsumer[F[_]: Effect]( - override val name: String, - override protected val channel: ServerChannel, - override protected val queueName: String, - override protected val connectionInfo: RabbitMQConnectionInfo, - override protected val monitor: Monitor, +import com.rabbitmq.client.{Delivery => _} +import org.slf4j.event.Level + +import scala.concurrent.duration.FiniteDuration + +class DefaultRabbitMQConsumer[F[_]: ConcurrentEffect: Timer, A: DeliveryConverter]( + private[rabbitmq] val base: ConsumerBase[F, A], + channelOps: ConsumerChannelOps[F, A], + processTimeout: FiniteDuration, + timeoutAction: DeliveryResult, + timeoutLogLevel: Level, failureAction: DeliveryResult, - consumerListener: ConsumerListener, - override protected val republishStrategy: RepublishStrategy, - override protected val blocker: Blocker)(readAction: DeliveryReadAction[F, Bytes])(implicit override protected val cs: ContextShift[F]) - extends ConsumerWithCallbackBase(channel, failureAction, consumerListener) - with RabbitMQConsumer[F] - with ConsumerBase[F] - with StrictLogging { - - override def handleDelivery(consumerTag: String, envelope: Envelope, properties: BasicProperties, body: Array[Byte]): Unit = { - processingCount.incrementAndGet() - - val deliveryTag = envelope.getDeliveryTag - val messageId = properties.getMessageId - val routingKey = properties.getOriginalRoutingKey.getOrElse(envelope.getRoutingKey) - - val action = handleDelivery(messageId, deliveryTag, properties, routingKey, body)(readAction) - .flatTap(_ => - F.delay { - processingCount.decrementAndGet() - logger.debug(s"Delivery processed successfully (tag $deliveryTag)") - }) - .recoverWith { - case e => - F.delay { - processingCount.decrementAndGet() - processingFailedMeter.mark() - logger.debug("Could not process delivery", e) - } >> - F.raiseError(e) - } - - Effect[F].toIO(action).unsafeToFuture() // actually start the processing - - () + consumerListener: ConsumerListener[F])(userAction: DeliveryReadAction[F, A]) + extends ConsumerWithCallbackBase(base, channelOps, failureAction, consumerListener) + with RabbitMQConsumer[F] { + import base._ + + override protected def handleNewDelivery(d: DeliveryWithContext[A]): F[Option[ConfirmedDeliveryResult[F]]] = { + import d._ + + // we try to catch also long-lasting synchronous work on the thread + val resultAction = blocker.delay { userAction(delivery).map(ConfirmedDeliveryResult(_)) }.flatten + + watchForTimeoutIfConfigured(processTimeout, timeoutAction, timeoutLogLevel)(delivery, resultAction)(F.unit).map(Some(_)) } + } object DefaultRabbitMQConsumer { final val RepublishOriginalRoutingKeyHeaderName = "X-Original-Routing-Key" final val RepublishOriginalUserId = "X-Original-User-Id" final val FederationOriginalRoutingKeyHeaderName = "x-original-routing-key" + final val CorrelationIdHeaderName = CorrelationIdStrategy.CorrelationIdKeyName } diff --git a/core/src/main/scala/com/avast/clients/rabbitmq/DefaultRabbitMQProducer.scala b/core/src/main/scala/com/avast/clients/rabbitmq/DefaultRabbitMQProducer.scala index 2c1f1b0e..8126ccfd 100644 --- a/core/src/main/scala/com/avast/clients/rabbitmq/DefaultRabbitMQProducer.scala +++ b/core/src/main/scala/com/avast/clients/rabbitmq/DefaultRabbitMQProducer.scala @@ -1,28 +1,28 @@ package com.avast.clients.rabbitmq -import java.util.UUID - import cats.effect.{Blocker, ContextShift, Effect, Sync} +import cats.implicits.{catsSyntaxApplicativeError, catsSyntaxFlatMapOps, toFlatMapOps} import com.avast.bytes.Bytes -import com.avast.clients.rabbitmq.api.{ChannelNotRecoveredException, MessageProperties, RabbitMQProducer} -import JavaConverters._ -import com.avast.metrics.scalaapi.Monitor +import com.avast.clients.rabbitmq.JavaConverters._ +import com.avast.clients.rabbitmq.api.CorrelationIdStrategy.FromPropertiesOrRandomNew +import com.avast.clients.rabbitmq.api._ +import com.avast.clients.rabbitmq.logging.ImplicitContextLogger +import com.avast.metrics.scalaeffectapi.Monitor import com.rabbitmq.client.AMQP.BasicProperties import com.rabbitmq.client.{AlreadyClosedException, ReturnListener} -import com.typesafe.scalalogging.StrictLogging -import scala.language.higherKinds +import java.util.UUID import scala.util.control.NonFatal -class DefaultRabbitMQProducer[F[_]: Sync, A: ProductConverter](name: String, - exchangeName: String, - channel: ServerChannel, - defaultProperties: MessageProperties, - reportUnroutable: Boolean, - blocker: Blocker, - monitor: Monitor)(implicit F: Effect[F], cs: ContextShift[F]) - extends RabbitMQProducer[F, A] - with StrictLogging { +class DefaultRabbitMQProducer[F[_], A: ProductConverter](name: String, + exchangeName: String, + channel: ServerChannel, + defaultProperties: MessageProperties, + reportUnroutable: Boolean, + blocker: Blocker, + logger: ImplicitContextLogger[F], + monitor: Monitor[F])(implicit F: Effect[F], cs: ContextShift[F]) + extends RabbitMQProducer[F, A] { private val sentMeter = monitor.meter("sent") private val sentFailedMeter = monitor.meter("sentFailed") @@ -34,10 +34,17 @@ class DefaultRabbitMQProducer[F[_]: Sync, A: ProductConverter](name: String, channel.addReturnListener(if (reportUnroutable) LoggingReturnListener else NoOpReturnListener) - override def send(routingKey: String, body: A, properties: Option[MessageProperties] = None): F[Unit] = { - val finalProperties = { - val initialProperties = properties.getOrElse(defaultProperties.copy(messageId = Some(UUID.randomUUID().toString))) - converter.fillProperties(initialProperties) + override def send(routingKey: String, body: A, properties: Option[MessageProperties] = None)( + implicit cidStrategy: CorrelationIdStrategy = FromPropertiesOrRandomNew(properties)): F[Unit] = { + implicit val correlationId: CorrelationId = CorrelationId(cidStrategy.toCIDValue) + + val finalProperties = converter.fillProperties { + val initialProperties = properties.getOrElse(defaultProperties) + + initialProperties.copy( + messageId = initialProperties.messageId.orElse(Some(UUID.randomUUID().toString)), + correlationId = Some(correlationId.value) // it was taken from it if it was there + ) } converter.convert(body) match { @@ -46,30 +53,27 @@ class DefaultRabbitMQProducer[F[_]: Sync, A: ProductConverter](name: String, } } - private def send(routingKey: String, body: Bytes, properties: MessageProperties): F[Unit] = { - blocker.delay { - logger.debug(s"Sending message with ${body.size()} B to exchange $exchangeName with routing key '$routingKey' and $properties") - - try { - sendLock.synchronized { - // see https://www.rabbitmq.com/api-guide.html#channel-threads - channel.basicPublish(exchangeName, routingKey, properties.asAMQP, body.toByteArray) + private def send(routingKey: String, body: Bytes, properties: MessageProperties)(implicit correlationId: CorrelationId): F[Unit] = { + logger.debug(s"Sending message with ${body.size()} B to exchange $exchangeName with routing key '$routingKey' and $properties") >> + blocker + .delay { + sendLock.synchronized { + // see https://www.rabbitmq.com/api-guide.html#channel-threads + channel.basicPublish(exchangeName, routingKey, properties.asAMQP, body.toByteArray) + } + } + .flatTap(_ => sentMeter.mark) + .recoverWith { + case ce: AlreadyClosedException => + logger.debug(ce)(s"[$name] Failed to send message with routing key '$routingKey' to exchange '$exchangeName'") >> + sentFailedMeter.mark >> + F.raiseError[Unit](ChannelNotRecoveredException("Channel closed, wait for recovery", ce)) + + case NonFatal(e) => + logger.debug(e)(s"[$name] Failed to send message with routing key '$routingKey' to exchange '$exchangeName'") >> + sentFailedMeter.mark >> + F.raiseError[Unit](e) } - - // ok! - sentMeter.mark() - } catch { - case ce: AlreadyClosedException => - logger.debug(s"[$name] Failed to send message with routing key '$routingKey' to exchange '$exchangeName'", ce) - sentFailedMeter.mark() - throw ChannelNotRecoveredException("Channel closed, wait for recovery", ce) - - case NonFatal(e) => - logger.debug(s"[$name] Failed to send message with routing key '$routingKey' to exchange '$exchangeName'", e) - sentFailedMeter.mark() - throw e - } - } } // scalastyle:off @@ -80,10 +84,12 @@ class DefaultRabbitMQProducer[F[_]: Sync, A: ProductConverter](name: String, routingKey: String, properties: BasicProperties, body: Array[Byte]): Unit = { - unroutableMeter.mark() - logger.warn( - s"[$name] Message sent with routingKey '$routingKey' to exchange '$exchange' (message ID '${properties.getMessageId}', body size ${body.length} B) is unroutable ($replyCode: $replyText)" - ) + startAndForget { + unroutableMeter.mark >> + logger.plainWarn( + s"[$name] Message sent with routingKey '$routingKey' to exchange '$exchange' (message ID '${properties.getMessageId}', body size ${body.length} B) is unroutable ($replyCode: $replyText)" + ) + } } } @@ -94,7 +100,9 @@ class DefaultRabbitMQProducer[F[_]: Sync, A: ProductConverter](name: String, routingKey: String, properties: BasicProperties, body: Array[Byte]): Unit = { - unroutableMeter.mark() + startAndForget { + unroutableMeter.mark + } } } diff --git a/core/src/main/scala/com/avast/clients/rabbitmq/DefaultRabbitMQPullConsumer.scala b/core/src/main/scala/com/avast/clients/rabbitmq/DefaultRabbitMQPullConsumer.scala index 3a61707d..a93c2cc6 100644 --- a/core/src/main/scala/com/avast/clients/rabbitmq/DefaultRabbitMQPullConsumer.scala +++ b/core/src/main/scala/com/avast/clients/rabbitmq/DefaultRabbitMQPullConsumer.scala @@ -1,41 +1,20 @@ package com.avast.clients.rabbitmq -import java.util.concurrent.atomic.AtomicInteger - import cats.effect._ import cats.implicits._ import com.avast.bytes.Bytes -import com.avast.clients.rabbitmq.JavaConverters._ import com.avast.clients.rabbitmq.api._ -import com.avast.metrics.scalaapi.Monitor -import com.rabbitmq.client.{AMQP, GetResponse} -import com.typesafe.scalalogging.StrictLogging - -import scala.language.higherKinds -import scala.util.control.NonFatal - -class DefaultRabbitMQPullConsumer[F[_]: Effect, A: DeliveryConverter]( - override val name: String, - protected override val channel: ServerChannel, - protected override val queueName: String, - protected override val connectionInfo: RabbitMQConnectionInfo, - failureAction: DeliveryResult, - protected override val monitor: Monitor, - override protected val republishStrategy: RepublishStrategy, - protected override val blocker: Blocker)(implicit override protected val cs: ContextShift[F]) - extends RabbitMQPullConsumer[F, A] - with ConsumerBase[F] - with StrictLogging { - - override protected implicit val F: Sync[F] = Sync[F] +import com.avast.metrics.scalaeffectapi.SettableGauge - private val tasksMonitor = monitor.named("tasks") - - private val processingCount = new AtomicInteger(0) +import java.util.concurrent.atomic.AtomicInteger - tasksMonitor.gauge("processing")(() => processingCount.get()) +class DefaultRabbitMQPullConsumer[F[_]: ConcurrentEffect, A: DeliveryConverter](base: ConsumerBase[F, A], + channelOps: ConsumerChannelOps[F, A]) + extends RabbitMQPullConsumer[F, A] { + import base._ + import channelOps._ - private def convertMessage(b: Bytes): Either[ConversionException, A] = implicitly[DeliveryConverter[A]].convert(b) + protected val processingCount: SettableGauge[F, Long] = consumerRootMonitor.gauge.settableLong("processing", replaceExisting = true) override def pull(): F[PullResult[F, A]] = { blocker @@ -44,24 +23,20 @@ class DefaultRabbitMQPullConsumer[F[_]: Effect, A: DeliveryConverter]( } .flatMap { case Some(response) => - processingCount.incrementAndGet() + val rawBody = Bytes.copyFrom(response.getBody) - val envelope = response.getEnvelope - val properties = response.getProps + (processingCount.inc >> parseDelivery(response.getEnvelope, rawBody, response.getProps)).flatMap { d => + import d._ + import context._ - val deliveryTag = envelope.getDeliveryTag - val messageId = properties.getMessageId - val routingKey = envelope.getRoutingKey + val dwh = createDeliveryWithHandle(delivery) { result => + handleResult(rawBody, delivery)(result) >> + processingCount.dec.void + } - logger.debug(s"[$name] Read delivery with ID $messageId, deliveryTag $deliveryTag") - - handleMessage(response, properties, routingKey) { result => - super - .handleResult(messageId, deliveryTag, properties, routingKey, response.getBody)(result) - .map { _ => - processingCount.decrementAndGet() - () - } + consumerLogger.debug(s"[$consumerName] Read delivery with $messageId $deliveryTag").as { + PullResult.Ok(dwh) + } } case None => @@ -71,40 +46,7 @@ class DefaultRabbitMQPullConsumer[F[_]: Effect, A: DeliveryConverter]( } } - private def handleMessage(response: GetResponse, properties: AMQP.BasicProperties, routingKey: String)( - handleResult: DeliveryResult => F[Unit]): F[PullResult[F, A]] = { - try { - val bytes = Bytes.copyFrom(response.getBody) - - val delivery = convertMessage(bytes) match { - case Right(a) => - val delivery = Delivery(a, properties.asScala, routingKey) - logger.trace(s"[$name] Received delivery: ${delivery.copy(body = bytes)}") - delivery - - case Left(ce) => - val delivery = Delivery.MalformedContent(bytes, properties.asScala, routingKey, ce) - logger.trace(s"[$name] Received delivery but could not convert it: $delivery") - delivery - } - - val dwh = createDeliveryWithHandle(delivery, handleResult) - - Effect[F].pure { - PullResult.Ok(dwh) - } - } catch { - case NonFatal(e) => - logger.error( - s"[$name] Error while converting the message, it's probably a BUG; the converter should return Left(ConversionException)", - e - ) - - handleResult(failureAction).flatMap(_ => Effect[F].raiseError(e)) - } - } - - private def createDeliveryWithHandle[B](d: Delivery[B], handleResult: DeliveryResult => F[Unit]): DeliveryWithHandle[F, B] = { + private def createDeliveryWithHandle[B](d: Delivery[B])(handleResult: DeliveryResult => F[Unit]): DeliveryWithHandle[F, B] = { new DeliveryWithHandle[F, B] { override val delivery: Delivery[B] = d diff --git a/core/src/main/scala/com/avast/clients/rabbitmq/DefaultRabbitMQStreamingConsumer.scala b/core/src/main/scala/com/avast/clients/rabbitmq/DefaultRabbitMQStreamingConsumer.scala index 420b6c3c..b65de6c5 100644 --- a/core/src/main/scala/com/avast/clients/rabbitmq/DefaultRabbitMQStreamingConsumer.scala +++ b/core/src/main/scala/com/avast/clients/rabbitmq/DefaultRabbitMQStreamingConsumer.scala @@ -1,72 +1,100 @@ package com.avast.clients.rabbitmq import cats.effect.concurrent._ -import cats.effect.{Blocker, CancelToken, Concurrent, ConcurrentEffect, ContextShift, Effect, ExitCase, IO, Resource, Sync, Timer} +import cats.effect.implicits.toConcurrentOps +import cats.effect.{ConcurrentEffect, ContextShift, ExitCase, Resource, Timer} import cats.syntax.all._ -import com.avast.bytes.Bytes -import com.avast.clients.rabbitmq.DefaultRabbitMQStreamingConsumer.DeliveryQueue +import com.avast.clients.rabbitmq.DefaultRabbitMQStreamingConsumer.{DeferredResult, DeliveryQueue} +import com.avast.clients.rabbitmq.api.DeliveryResult.Reject import com.avast.clients.rabbitmq.api._ -import com.avast.metrics.scalaapi.Monitor import com.rabbitmq.client.{Delivery => _, _} -import com.typesafe.scalalogging.StrictLogging import fs2.Stream -import fs2.concurrent.Queue +import fs2.concurrent.{Queue, SignallingRef} +import org.slf4j.event.Level -import java.util.concurrent.TimeoutException +import java.time.Instant import scala.concurrent.duration.FiniteDuration -import scala.language.higherKinds -import scala.util.control.NonFatal -class DefaultRabbitMQStreamingConsumer[F[_]: ConcurrentEffect: Timer, A: DeliveryConverter] private ( - name: String, - queueName: String, +class DefaultRabbitMQStreamingConsumer[F[_]: ConcurrentEffect: Timer, A] private ( + base: ConsumerBase[F, A], + channelOpsFactory: ConsumerChannelOpsFactory[F, A], initialConsumerTag: String, - connectionInfo: RabbitMQConnectionInfo, - consumerListener: ConsumerListener, - monitor: Monitor, - republishStrategy: RepublishStrategy, - timeout: FiniteDuration, - timeoutAction: (Delivery[Bytes], TimeoutException) => F[DeliveryResult], - recoveringMutex: Semaphore[F], - blocker: Blocker)(createQueue: F[DeliveryQueue[F, Bytes]], newChannel: F[ServerChannel])(implicit cs: ContextShift[F]) - extends RabbitMQStreamingConsumer[F, A] - with StrictLogging { + consumerListener: ConsumerListener[F], + processTimeout: FiniteDuration, + timeoutAction: DeliveryResult, + timeoutLogLevel: Level, + recoveringMutex: Semaphore[F])(createQueue: F[DeliveryQueue[F, A]]) + extends RabbitMQStreamingConsumer[F, A] { + import base._ - private lazy val F: Sync[F] = Sync[F] - - private lazy val streamFailureMeter = monitor.meter("streamFailures") + private lazy val streamFailureMeter = consumerRootMonitor.meter("streamFailures") private lazy val consumer = Ref.unsafe[F, Option[StreamingConsumer]](None) private lazy val isClosed = Ref.unsafe[F, Boolean](false) private lazy val isOk = Ref.unsafe[F, Boolean](false) - private lazy val tasks = Ref.unsafe[IO, Set[CancelToken[IO]]](Set.empty) lazy val deliveryStream: fs2.Stream[F, StreamedDelivery[F, A]] = { Stream - .eval { checkNotClosed >> recoverIfNeeded } + .resource(Resource.eval(checkNotClosed) >> recoverIfNeeded) .flatMap { queue => - queue.dequeue - .map { - case (del, deff) => - new StreamedDelivery[F, A] { - override val delivery: Delivery[A] = DefaultRabbitMQClientFactory.convertDelivery(del) - override def handle(result: DeliveryResult): F[StreamedResult] = deff.complete(result).map(_ => StreamedResult) - } - } + queue.dequeue.evalMap { + case (del, ref) => + ref.get.map(_.map { _ => + del + }) + }.unNone } .onFinalizeCase(handleStreamFinalize) } - private lazy val recoverIfNeeded: F[DeliveryQueue[F, Bytes]] = { - recoveringMutex.withPermit { - isOk.get.flatMap { - case true => getCurrentQueue - case false => recover + private def createStreamedDelivery(delivery: Delivery[A], + deffRef: SignallingRef[F, Option[DeferredResult[F]]]): StreamedDelivery[F, A] = { + (f: DeliveryReadAction[F, A]) => + deffRef.get.flatMap { + case Some(deff) => + f(delivery).start.flatMap { fiber => + val waitForCancel: F[Unit] = { + deffRef.discrete + .collect { + // We don't care about Some. None means cancel; Some appears only "sometimes" as the initial value update. + case None => fiber.cancel + } + .take(1) // wait for a single (first) update + .compile + .last + .map(_.getOrElse(throw new IllegalStateException("This must not happen!"))) + } + + val waitForFinish = { + fiber.join + .map(ConfirmedDeliveryResult(_)) + .attempt + .flatMap { dr => + // wait for completion AND confirmation of the result (if there was any) + deff.complete(dr) >> dr.map(_.awaitConfirmation).getOrElse(F.unit) + } + } + + (waitForCancel race waitForFinish).as(()) + } + + case None => F.unit // we're not starting the task } - } } - private lazy val getCurrentQueue: F[DeliveryQueue[F, Bytes]] = { + private lazy val recoverIfNeeded: Resource[F, DeliveryQueue[F, A]] = { + Resource(recoveringMutex.withPermit { + Resource + .eval(isOk.get) + .flatMap { + case true => Resource.eval(getCurrentQueue) + case false => recover + } + .allocated // this is plumbing... we need to _stick_ the resource through plain F here, because of the mutex + }) + } + + private lazy val getCurrentQueue: F[DeliveryQueue[F, A]] = { consumer.get.map { cons => cons .getOrElse(throw new IllegalStateException("Consumer has to be initialized at this stage! It's probably a BUG")) @@ -74,43 +102,33 @@ class DefaultRabbitMQStreamingConsumer[F[_]: ConcurrentEffect: Timer, A: Deliver } } - private lazy val recover: F[DeliveryQueue[F, Bytes]] = { - createQueue.flatTap { newQueue => - newChannel.flatMap { newChannel => - val newConsumer = new StreamingConsumer(newChannel, newQueue) - - consumer - .getAndSet(Some(newConsumer)) - .flatMap { - case Some(oldConsumer) => - blocker - .delay { - val consumerTag = oldConsumer.getConsumerTag - logger.debug(s"[$name] Cancelling consumer with consumer tag $consumerTag") - - try { - oldConsumer.channel.close() - } catch { - case NonFatal(e) => logger.debug(s"Could not close channel", e) + private lazy val recover: Resource[F, DeliveryQueue[F, A]] = { + Resource.eval(createQueue).flatTap { newQueue => + channelOpsFactory.create.flatMap { channelOps => + Resource + .make { + SignallingRef[F, Boolean](true) + .map(new StreamingConsumer(channelOps, newQueue, _)) + .flatMap { newConsumer => + consumer + .getAndSet(Some(newConsumer)) + .flatMap { oc => + val consumerTag = oc match { + case Some(oldConsumer) => oldConsumer.getConsumerTag + case None => initialConsumerTag + } + + consumerLogger.plainDebug(s"[$consumerName] Starting consuming") >> + DefaultRabbitMQClientFactory.startConsumingQueue(channelOps.channel, queueName, consumerTag, newConsumer, blocker) >> + isOk.set(true) } - - consumerTag - } - .flatTap(_ => tryCancelRunningTasks) - - case None => - F.delay { - logger.debug("No old consumer to be cancelled!") - initialConsumerTag + .as(newConsumer) } + } { oldConsumer => + val consumerTag = oldConsumer.getConsumerTag + consumerLogger.plainDebug(s"[$consumerName] Cancelling consumer with consumer tag $consumerTag") >> + oldConsumer.stopConsuming() } - .flatMap { consumerTag => - blocker.delay { - logger.debug("Starting consuming") - DefaultRabbitMQClientFactory.startConsumingQueue(newChannel, queueName, consumerTag, newConsumer) - () - } - } >> isOk.set(true) } } } @@ -118,29 +136,16 @@ class DefaultRabbitMQStreamingConsumer[F[_]: ConcurrentEffect: Timer, A: Deliver private lazy val close: F[Unit] = recoveringMutex.withPermit { isOk.get.flatMap { isOk => if (isOk) consumer.get.flatMap { - case Some(streamingConsumer) => - blocker.delay { - streamingConsumer.stopConsuming() - streamingConsumer.channel.close() - } - + case Some(streamingConsumer) => streamingConsumer.stopConsuming() case None => F.unit } else F.unit } >> isOk.set(false) >> isClosed.set(true) } - private lazy val tryCancelRunningTasks: F[Unit] = { - tasks - .update { tasks => - tasks.foreach(_.unsafeRunSync()) - Set.empty - } - .to[F] - } - private lazy val stopConsuming: F[Unit] = { recoveringMutex.withPermit { - isOk.set(false) >> consumer.get.flatMap(_.fold(F.unit)(_.stopConsuming())) // stop consumer, if there is some + isOk.set(false) + // the consumer is stopped by the Resource } } @@ -151,143 +156,148 @@ class DefaultRabbitMQStreamingConsumer[F[_]: ConcurrentEffect: Timer, A: Deliver private def handleStreamFinalize(e: ExitCase[Throwable]): F[Unit] = e match { case ExitCase.Completed => stopConsuming - .flatTap(_ => F.delay(logger.debug(s"[$name] Delivery stream was completed"))) + .flatTap(_ => consumerLogger.plainDebug(s"[$consumerName] Delivery stream was completed")) case ExitCase.Canceled => stopConsuming - .flatTap(_ => F.delay(logger.debug(s"[$name] Delivery stream was cancelled"))) + .flatTap(_ => consumerLogger.plainDebug(s"[$consumerName] Delivery stream was cancelled")) case ExitCase.Error(e: ShutdownSignalException) => stopConsuming .flatTap { _ => - F.delay { - streamFailureMeter.mark() - logger.error(s"[$name] Delivery stream was terminated because of channel shutdown. It might be a BUG int the client", e) - } + streamFailureMeter.mark >> + consumerLogger.plainError(e)( + s"[$consumerName] Delivery stream was terminated because of channel shutdown. It might be a BUG int the client") } case ExitCase.Error(e) => stopConsuming - .flatTap(_ => - F.delay { - streamFailureMeter.mark() - logger.debug(s"[$name] Delivery stream was terminated", e) - }) + .flatTap( + _ => + streamFailureMeter.mark >> + consumerLogger.plainDebug(e)(s"[$consumerName] Delivery stream was terminated")) } - private def deliveryCallback(delivery: Delivery[Bytes]): F[DeliveryResult] = { + private def enqueueDelivery(delivery: Delivery[A], deferred: DeferredResult[F]): F[SignallingRef[F, Option[DeferredResult[F]]]] = { for { - deferred <- Deferred[F, DeliveryResult] - consumerOpt <- this.consumer.get + consumerOpt <- recoveringMutex.withPermit { this.consumer.get } consumer = consumerOpt.getOrElse(throw new IllegalStateException("Consumer has to be initialized at this stage! It's probably a BUG")) - _ <- consumer.queue.enqueue1((delivery, deferred)) - res <- Concurrent.timeout(deferred.get, timeout).recoverWith { case e: TimeoutException => timeoutAction(delivery, e) } + ref <- SignallingRef(Option(deferred)) + streamedDelivery = createStreamedDelivery(delivery, ref) + _ <- consumer.queue.enqueue1((streamedDelivery, ref)) } yield { - res + ref } } - private class StreamingConsumer(override val channel: ServerChannel, val queue: DeliveryQueue[F, Bytes]) - extends ConsumerWithCallbackBase(channel, DeliveryResult.Retry, consumerListener) { - private val receivingEnabled = Ref.unsafe[F, Boolean](true) - override protected val republishStrategy: RepublishStrategy = DefaultRabbitMQStreamingConsumer.this.republishStrategy - - override def handleDelivery(consumerTag: String, envelope: Envelope, properties: AMQP.BasicProperties, body: Array[Byte]): Unit = { - processingCount.incrementAndGet() - - val deliveryTag = envelope.getDeliveryTag - val messageId = properties.getMessageId - val routingKey = properties.getOriginalRoutingKey.getOrElse(envelope.getRoutingKey) + private class StreamingConsumer(channelOps: ConsumerChannelOps[F, A], + val queue: DeliveryQueue[F, A], + receivingEnabled: SignallingRef[F, Boolean]) + extends ConsumerWithCallbackBase(base, channelOps, DeliveryResult.Retry, consumerListener) { + + override protected def handleNewDelivery(d: DeliveryWithContext[A]): F[Option[ConfirmedDeliveryResult[F]]] = { + import d._ + import context._ + + val ignoreDelivery: F[Option[ConfirmedDeliveryResult[F]]] = consumerLogger + .debug( + s"[$consumerName] The consumer was discarded, throwing away delivery $messageId ($deliveryTag) - it will be redelivered later") + .as(None) + + receivingEnabled.get + .flatMap { + case true => + Deferred[F, Either[Throwable, ConfirmedDeliveryResult[F]]] + .flatMap { waitForResult(delivery, messageId, deliveryTag) } + .flatMap { dr => + receivingEnabled.get.flatMap { + case false => ignoreDelivery + case true => F.pure(Some(dr)) + } + } - val task: F[Unit] = receivingEnabled.get.flatMap { - case false => - F.delay { - logger.trace(s"Delivery $messageId (tag $deliveryTag) was ignored because consumer is not OK - it will be redelivered later") - processingCount.decrementAndGet() - () - } + case false => ignoreDelivery + } + } + def stopConsuming(): F[Unit] = { + receivingEnabled.getAndSet(false).flatMap { case true => - val action = handleDelivery(messageId, deliveryTag, properties, routingKey, body)(deliveryCallback) - .flatTap(_ => F.delay(logger.debug(s"Delivery $messageId processed successfully (tag $deliveryTag)"))) - .recoverWith { - case e => - F.delay { - processingFailedMeter.mark() - logger.debug("Could not process delivery", e) - } >> F.raiseError(e) + consumerLogger.plainDebug(s"[$consumerName] Stopping consummation for $getConsumerTag") >> + blocker.delay { + channelOps.channel.basicCancel(getConsumerTag) + channelOps.channel.setDefaultConsumer(null) } - recoveringMutex.withPermit(F.suspend { - lazy val ct: CancelToken[IO] = Effect[F] - .toIO(action) - .runCancelable(_ => { - processingCount.decrementAndGet() - tasks.update(_ - ct) - }) - .unsafeRunSync() - - tasks.update(_ + ct).to[F] - }) + case false => consumerLogger.plainDebug(s"Can't stop consummation for $getConsumerTag because it's already stopped") } - - Effect[F].toIO(task).unsafeToFuture() - - () } - def stopConsuming(): F[Unit] = { - receivingEnabled.set(false) >> blocker.delay { - logger.debug(s"Stopping consummation for $getConsumerTag") - channel.basicCancel(getConsumerTag) - channel.setDefaultConsumer(null) - } - } + private def waitForResult(delivery: Delivery[A], messageId: MessageId, deliveryTag: DeliveryTag)(deferred: DeferredResult[F])( + implicit dctx: DeliveryContext): F[ConfirmedDeliveryResult[F]] = { + enqueueDelivery(delivery, deferred) + .flatMap { ref => + val enqueueTime = Instant.now() + + val discardedNotification = receivingEnabled.discrete + .filter(en => !en) // filter only FALSE + .take(1) // wait for a single (first) update + .compile + .last + .map(_.getOrElse(throw new IllegalStateException("This must not happen!"))) + + val result = F.race(discardedNotification, deferred.get).flatMap { + case Right(Right(r)) => F.pure(r) + case Left(_) => F.pure(ConfirmedDeliveryResult[F](Reject)) // it will be ignored later anyway... + + case Right(Left(err)) => + consumerLogger.debug(err)(s"[$consumerName] Failure when processing delivery $messageId ($deliveryTag)") >> + F.raiseError[ConfirmedDeliveryResult[F]](err) + } - override protected implicit val cs: ContextShift[F] = DefaultRabbitMQStreamingConsumer.this.cs - override protected def name: String = DefaultRabbitMQStreamingConsumer.this.name - override protected def queueName: String = DefaultRabbitMQStreamingConsumer.this.queueName - override protected def blocker: Blocker = DefaultRabbitMQStreamingConsumer.this.blocker - override protected def connectionInfo: RabbitMQConnectionInfo = DefaultRabbitMQStreamingConsumer.this.connectionInfo - override protected def monitor: Monitor = DefaultRabbitMQStreamingConsumer.this.monitor + watchForTimeoutIfConfigured(processTimeout, timeoutAction, timeoutLogLevel)(delivery, result) { + F.defer { + val l = java.time.Duration.between(enqueueTime, Instant.now()) + consumerLogger.debug(s"[$consumerName] Timeout after $l, cancelling processing of $messageId ($deliveryTag)") + } >> ref.set(None) // cancel by this! + } + } + } } } -object DefaultRabbitMQStreamingConsumer extends StrictLogging { +object DefaultRabbitMQStreamingConsumer { - private type DeliveryQueue[F[_], A] = Queue[F, (Delivery[A], Deferred[F, DeliveryResult])] + private type DeferredResult[F[_]] = Deferred[F, Either[Throwable, ConfirmedDeliveryResult[F]]] + private type QueuedDelivery[F[_], A] = (StreamedDelivery[F, A], SignallingRef[F, Option[DeferredResult[F]]]) + private type DeliveryQueue[F[_], A] = Queue[F, QueuedDelivery[F, A]] def make[F[_]: ConcurrentEffect: Timer, A: DeliveryConverter]( - name: String, - newChannel: F[ServerChannel], + base: ConsumerBase[F, A], + channelOpsFactory: ConsumerChannelOpsFactory[F, A], initialConsumerTag: String, - queueName: String, - connectionInfo: RabbitMQConnectionInfo, - consumerListener: ConsumerListener, + consumerListener: ConsumerListener[F], queueBufferSize: Int, - monitor: Monitor, - republishStrategy: RepublishStrategy, timeout: FiniteDuration, - timeoutAction: (Delivery[Bytes], TimeoutException) => F[DeliveryResult], - blocker: Blocker)(implicit cs: ContextShift[F]): Resource[F, DefaultRabbitMQStreamingConsumer[F, A]] = { - val newQueue: F[DeliveryQueue[F, Bytes]] = createQueue(queueBufferSize) + timeoutAction: DeliveryResult, + timeoutLogLevel: Level)(implicit cs: ContextShift[F]): Resource[F, DefaultRabbitMQStreamingConsumer[F, A]] = { + val newQueue: F[DeliveryQueue[F, A]] = createQueue(queueBufferSize) Resource.make(Semaphore[F](1).map { mutex => - new DefaultRabbitMQStreamingConsumer(name, - queueName, - initialConsumerTag, - connectionInfo, - consumerListener, - monitor, - republishStrategy, - timeout, - timeoutAction, - mutex, - blocker)(newQueue, newChannel) + new DefaultRabbitMQStreamingConsumer( + base, + channelOpsFactory, + initialConsumerTag, + consumerListener, + timeout, + timeoutAction, + timeoutLogLevel, + mutex, + )(newQueue) })(_.close) } - private def createQueue[F[_]: ConcurrentEffect](queueBufferSize: Int): F[DeliveryQueue[F, Bytes]] = { - fs2.concurrent.Queue.bounded[F, (Delivery[Bytes], Deferred[F, DeliveryResult])](queueBufferSize) + private def createQueue[F[_]: ConcurrentEffect, A](queueBufferSize: Int): F[DeliveryQueue[F, A]] = { + fs2.concurrent.Queue.bounded[F, QueuedDelivery[F, A]](queueBufferSize) } } diff --git a/core/src/main/scala/com/avast/clients/rabbitmq/DeliveryContext.scala b/core/src/main/scala/com/avast/clients/rabbitmq/DeliveryContext.scala new file mode 100644 index 00000000..d5505f3e --- /dev/null +++ b/core/src/main/scala/com/avast/clients/rabbitmq/DeliveryContext.scala @@ -0,0 +1,48 @@ +package com.avast.clients.rabbitmq + +import com.avast.clients.rabbitmq.DefaultRabbitMQConsumer.CorrelationIdHeaderName +import com.avast.clients.rabbitmq.api.CorrelationIdStrategy.CorrelationIdKeyName +import com.avast.clients.rabbitmq.api.Delivery +import com.avast.clients.rabbitmq.logging.LoggingContext +import com.rabbitmq.client.AMQP.BasicProperties +import com.rabbitmq.client.Envelope + +private[rabbitmq] case class DeliveryWithContext[A](delivery: Delivery[A], implicit val context: DeliveryContext) + +private[rabbitmq] case class DeliveryContext(messageId: MessageId, + correlationId: Option[CorrelationId], + deliveryTag: DeliveryTag, + routingKey: RoutingKey, + fixedProperties: BasicProperties) + extends LoggingContext { + + override lazy val asContextMap: Map[String, String] = { + correlationId.map(_.asContextMap).getOrElse(Map.empty) + } +} + +private[rabbitmq] object DeliveryContext { + def from(envelope: Envelope, properties: BasicProperties): DeliveryContext = { + val correlationIdRaw = Option(properties.getCorrelationId).orElse { + Option(properties.getHeaders).flatMap(h => Option(h.get(CorrelationIdHeaderName))).map(_.toString) + } + + val fixedProperties = properties.builder().correlationId(correlationIdRaw.orNull).build() + + val correlationId = correlationIdRaw.map(CorrelationId) + val messageId = MessageId(Option(fixedProperties.getMessageId).getOrElse("-none-")) + + val deliveryTag = DeliveryTag(envelope.getDeliveryTag) + val routingKey = RoutingKey(fixedProperties.getOriginalRoutingKey.getOrElse(envelope.getRoutingKey)) + + DeliveryContext(messageId, correlationId, deliveryTag, routingKey, fixedProperties) + } +} + +private[rabbitmq] final case class CorrelationId(value: String) extends LoggingContext { + def asContextMap: Map[String, String] = Map(CorrelationIdKeyName -> value) +} + +private[rabbitmq] final case class MessageId(value: String) extends AnyVal +private[rabbitmq] final case class RoutingKey(value: String) extends AnyVal +private[rabbitmq] final case class DeliveryTag(value: Long) extends AnyVal diff --git a/core/src/main/scala/com/avast/clients/rabbitmq/MultiFormatConsumer.scala b/core/src/main/scala/com/avast/clients/rabbitmq/MultiFormatConsumer.scala index a543d7ab..540e74dd 100644 --- a/core/src/main/scala/com/avast/clients/rabbitmq/MultiFormatConsumer.scala +++ b/core/src/main/scala/com/avast/clients/rabbitmq/MultiFormatConsumer.scala @@ -1,46 +1,47 @@ package com.avast.clients.rabbitmq +import cats.effect.Sync +import cats.implicits.{catsSyntaxApplicativeError, catsSyntaxFlatMapOps, toFunctorOps} import com.avast.bytes.Bytes -import com.avast.clients.rabbitmq.api.{ConversionException, Delivery, DeliveryResult} -import com.typesafe.scalalogging.StrictLogging +import com.avast.clients.rabbitmq.api._ +import com.avast.clients.rabbitmq.logging.ImplicitContextLogger import scala.collection.immutable -import scala.language.higherKinds -import scala.util.control.NonFatal class MultiFormatConsumer[F[_], A] private (supportedConverters: immutable.Seq[CheckedDeliveryConverter[A]], - action: Delivery[A] => F[DeliveryResult]) - extends (Delivery[Bytes] => F[DeliveryResult]) - with StrictLogging { + action: Delivery[A] => F[DeliveryResult])(implicit F: Sync[F]) + extends (Delivery[Bytes] => F[DeliveryResult]) { + private val logger = ImplicitContextLogger.createLogger[F, MultiFormatConsumer[F, A]] + override def apply(delivery: Delivery[Bytes]): F[DeliveryResult] = { - val converted: Delivery[A] = try { - supportedConverters - .collectFirst { - case c if c.canConvert(delivery) => - delivery.flatMap[A] { d => - c.convert(d.body) match { - case Right(a) => d.copy(body = a) - case Left(ce) => - logger.debug("Error while converting", ce) - d.toMalformed(ce) - } - } - } - .getOrElse { - delivery.toMalformed(ConversionException(s"Could not find suitable converter for $delivery")) - } - } catch { - case NonFatal(e) => - logger.debug("Error while converting", e) - delivery.toMalformed(ConversionException("Error while converting", e)) - } + implicit val correlationId: CorrelationId = CorrelationId(delivery.properties.correlationId.getOrElse("unknown")) - action(converted) + F.delay { + supportedConverters + .collectFirst { + case c if c.canConvert(delivery) => + delivery.flatMap[A] { d => + c.convert(d.body) match { + case Right(a) => d.copy(body = a) + case Left(ce) => d.toMalformed(ce) + } + } + } + .getOrElse { + delivery.toMalformed(ConversionException(s"Could not find suitable converter for $delivery")) + } + } + .recoverWith { + case e => + logger.debug(e)("Error while converting").as { + delivery.toMalformed(ConversionException("Error while converting", e)) + } + } >>= action } } object MultiFormatConsumer { - def forType[F[_], A](supportedConverters: CheckedDeliveryConverter[A]*)( + def forType[F[_]: Sync, A](supportedConverters: CheckedDeliveryConverter[A]*)( action: Delivery[A] => F[DeliveryResult]): MultiFormatConsumer[F, A] = { new MultiFormatConsumer[F, A](supportedConverters.toList, action) } diff --git a/core/src/main/scala/com/avast/clients/rabbitmq/PoisonedMessageHandler.scala b/core/src/main/scala/com/avast/clients/rabbitmq/PoisonedMessageHandler.scala new file mode 100644 index 00000000..0c586714 --- /dev/null +++ b/core/src/main/scala/com/avast/clients/rabbitmq/PoisonedMessageHandler.scala @@ -0,0 +1,150 @@ +package com.avast.clients.rabbitmq + +import cats.Applicative +import cats.effect.{Resource, Sync} +import cats.implicits.{catsSyntaxApplicativeError, catsSyntaxFlatMapOps, toFunctorOps} +import com.avast.bytes.Bytes +import com.avast.clients.rabbitmq.PoisonedMessageHandler.defaultHandlePoisonedMessage +import com.avast.clients.rabbitmq.api.DeliveryResult.{Reject, Republish} +import com.avast.clients.rabbitmq.api._ +import com.avast.clients.rabbitmq.logging.ImplicitContextLogger +import com.avast.metrics.scalaeffectapi.Monitor + +import scala.util.Try +import scala.util.control.NonFatal + +sealed trait PoisonedMessageHandler[F[_], A] { + def interceptResult(delivery: Delivery[A], messageId: MessageId, rawBody: Bytes)(result: DeliveryResult)( + implicit dctx: DeliveryContext): F[DeliveryResult] +} + +class LoggingPoisonedMessageHandler[F[_]: Sync, A](maxAttempts: Int) extends PoisonedMessageHandler[F, A] { + private val logger = ImplicitContextLogger.createLogger[F, LoggingPoisonedMessageHandler[F, A]] + + override def interceptResult(delivery: Delivery[A], messageId: MessageId, rawBody: Bytes)(result: DeliveryResult)( + implicit dctx: DeliveryContext): F[DeliveryResult] = { + PoisonedMessageHandler.handleResult(delivery, + messageId, + maxAttempts, + logger, + (d: Delivery[A], _) => defaultHandlePoisonedMessage[F, A](maxAttempts, logger)(d))(result) + } +} + +class NoOpPoisonedMessageHandler[F[_]: Sync, A] extends PoisonedMessageHandler[F, A] { + override def interceptResult(delivery: Delivery[A], messageId: MessageId, rawBody: Bytes)(result: DeliveryResult)( + implicit dctx: DeliveryContext): F[DeliveryResult] = Sync[F].pure(result) +} + +class DeadQueuePoisonedMessageHandler[F[_]: Sync, A](maxAttempts: Int)(moveToDeadQueue: (Delivery[A], Bytes, DeliveryContext) => F[Unit]) + extends PoisonedMessageHandler[F, A] { + private val logger = ImplicitContextLogger.createLogger[F, DeadQueuePoisonedMessageHandler[F, A]] + + override def interceptResult(delivery: Delivery[A], messageId: MessageId, rawBody: Bytes)(result: DeliveryResult)( + implicit dctx: DeliveryContext): F[DeliveryResult] = { + PoisonedMessageHandler.handleResult(delivery, messageId, maxAttempts, logger, (d, _) => handlePoisonedMessage(d, messageId, rawBody))( + result) + } + + private def handlePoisonedMessage(delivery: Delivery[A], messageId: MessageId, rawBody: Bytes)( + implicit dctx: DeliveryContext): F[Unit] = { + + logger.warn { + s"Message $messageId failures reached the limit $maxAttempts attempts, moving it to the dead queue: $delivery" + } >> + moveToDeadQueue(delivery, rawBody, dctx) >> + logger.debug(s"Message $messageId moved to the dead queue") + } +} + +object DeadQueuePoisonedMessageHandler { + def make[F[_]: Sync, A](c: DeadQueuePoisonedMessageHandling, + connection: RabbitMQConnection[F], + monitor: Monitor[F]): Resource[F, DeadQueuePoisonedMessageHandler[F, A]] = { + val dqpc = c.deadQueueProducer + val pc = ProducerConfig(name = dqpc.name, + exchange = dqpc.exchange, + declare = dqpc.declare, + reportUnroutable = dqpc.reportUnroutable, + properties = dqpc.properties) + + connection.newProducer[Bytes](pc, monitor.named("deadQueueProducer")).map { producer => + new DeadQueuePoisonedMessageHandler[F, A](c.maxAttempts)((d: Delivery[A], rawBody: Bytes, dctx: DeliveryContext) => { + val cidStrategy = dctx.correlationId match { + case Some(value) => CorrelationIdStrategy.Fixed(value.value) + case None => CorrelationIdStrategy.RandomNew + } + + producer.send(dqpc.routingKey, rawBody, Some(d.properties))(cidStrategy) + }) + } + } +} + +object PoisonedMessageHandler { + final val RepublishCountHeaderName: String = "X-Republish-Count" + + private[rabbitmq] def make[F[_]: Sync, A](config: Option[PoisonedMessageHandlingConfig], + connection: RabbitMQConnection[F], + monitor: Monitor[F]): Resource[F, PoisonedMessageHandler[F, A]] = { + config match { + case Some(LoggingPoisonedMessageHandling(maxAttempts)) => Resource.pure(new LoggingPoisonedMessageHandler[F, A](maxAttempts)) + case Some(c: DeadQueuePoisonedMessageHandling) => DeadQueuePoisonedMessageHandler.make(c, connection, monitor) + case Some(NoOpPoisonedMessageHandling) | None => + Resource.eval { + val logger = ImplicitContextLogger.createLogger[F, NoOpPoisonedMessageHandler[F, A]] + logger.plainWarn("Using NO-OP poisoned message handler. Potential poisoned messages will cycle forever.").as { + new NoOpPoisonedMessageHandler[F, A] + } + } + } + } + + private[rabbitmq] def defaultHandlePoisonedMessage[F[_]: Sync, A](maxAttempts: Int, logger: ImplicitContextLogger[F])( + delivery: Delivery[A])(implicit dctx: DeliveryContext): F[Unit] = { + logger.warn(s"Message failures reached the limit $maxAttempts attempts, throwing away: $delivery") + } + + private[rabbitmq] def handleResult[F[_]: Sync, A]( + delivery: Delivery[A], + messageId: MessageId, + maxAttempts: Int, + logger: ImplicitContextLogger[F], + handlePoisonedMessage: (Delivery[A], Int) => F[Unit])(r: DeliveryResult)(implicit dctx: DeliveryContext): F[DeliveryResult] = { + r match { + case Republish(isPoisoned, newHeaders) if isPoisoned => + adjustDeliveryResult(delivery, messageId, maxAttempts, newHeaders, logger, handlePoisonedMessage) + case r => Applicative[F].pure(r) // keep other results as they are + } + } + + private def adjustDeliveryResult[F[_]: Sync, A]( + delivery: Delivery[A], + messageId: MessageId, + maxAttempts: Int, + newHeaders: Map[String, AnyRef], + logger: ImplicitContextLogger[F], + handlePoisonedMessage: (Delivery[A], Int) => F[Unit])(implicit dctx: DeliveryContext): F[DeliveryResult] = { + // get current attempt no. from passed headers with fallback to original (incoming) headers - the fallback will most likely happen + // but we're giving the programmer chance to programmatically _pretend_ lower attempt number + val attempt = (delivery.properties.headers ++ newHeaders) + .get(RepublishCountHeaderName) + .flatMap(v => Try(v.toString.toInt).toOption) + .getOrElse(0) + 1 + + logger.debug(s"Attempt $attempt/$maxAttempts for $messageId") >> { + if (attempt < maxAttempts) { + Applicative[F].pure( + Republish(countAsPoisoned = true, newHeaders = newHeaders + (RepublishCountHeaderName -> attempt.asInstanceOf[AnyRef]))) + } else { + handlePoisonedMessage(delivery, maxAttempts) + .recoverWith { + case NonFatal(e) => + logger.warn(e)("Custom poisoned message handler failed") + } + .map(_ => Reject) // always REJECT the message + } + } + } + +} diff --git a/core/src/main/scala/com/avast/clients/rabbitmq/RabbitMQConnection.scala b/core/src/main/scala/com/avast/clients/rabbitmq/RabbitMQConnection.scala index d8f58b73..8d6d4537 100644 --- a/core/src/main/scala/com/avast/clients/rabbitmq/RabbitMQConnection.scala +++ b/core/src/main/scala/com/avast/clients/rabbitmq/RabbitMQConnection.scala @@ -1,59 +1,57 @@ package com.avast.clients.rabbitmq -import java.io.IOException -import java.util -import java.util.concurrent.ExecutorService - import cats.effect._ +import cats.implicits.{catsSyntaxApplicativeError, catsSyntaxFlatMapOps, toFlatMapOps} import cats.syntax.functor._ import com.avast.clients.rabbitmq.api._ -import com.avast.metrics.scalaapi.Monitor +import com.avast.clients.rabbitmq.logging.ImplicitContextLogger +import com.avast.metrics.scalaeffectapi.Monitor import com.rabbitmq.client._ -import com.typesafe.scalalogging.StrictLogging -import javax.net.ssl.SSLContext +import java.io.IOException +import java.util +import java.util.concurrent.ExecutorService +import javax.net.ssl.SSLContext import scala.collection.immutable -import scala.language.higherKinds import scala.util.control.NonFatal trait RabbitMQConnection[F[_]] { - /** Creates new channel inside this connection. Usable for some applications-specific actions which are not supported by the library.
- * The caller is responsible for closing the created channel - it's closed automatically only when the whole connection is closed. + /** Creates new channel inside this connection. Usable for some applications-specific actions which are not supported by the library. */ def newChannel(): Resource[F, ServerChannel] /** Creates new instance of consumer, using the passed configuration. * * @param consumerConfig Configuration of the consumer. - * @param monitor Monitor for metrics. + * @param monitor Monitor[F] for metrics. * @param readAction Action executed for each delivered message. You should never return a failed F. */ - def newConsumer[A: DeliveryConverter](consumerConfig: ConsumerConfig, monitor: Monitor)( + def newConsumer[A: DeliveryConverter](consumerConfig: ConsumerConfig, monitor: Monitor[F])( readAction: DeliveryReadAction[F, A]): Resource[F, RabbitMQConsumer[F]] /** Creates new instance of producer, using the passed configuration. * * @param producerConfig Configuration of the producer. - * @param monitor Monitor for metrics. + * @param monitor Monitor[F] for metrics. */ - def newProducer[A: ProductConverter](producerConfig: ProducerConfig, monitor: Monitor): Resource[F, RabbitMQProducer[F, A]] + def newProducer[A: ProductConverter](producerConfig: ProducerConfig, monitor: Monitor[F]): Resource[F, RabbitMQProducer[F, A]] /** Creates new instance of pull consumer, using the passed configuration. * * @param pullConsumerConfig Configuration of the consumer. - * @param monitor Monitor for metrics. + * @param monitor Monitor[F] for metrics. */ def newPullConsumer[A: DeliveryConverter](pullConsumerConfig: PullConsumerConfig, - monitor: Monitor): Resource[F, RabbitMQPullConsumer[F, A]] + monitor: Monitor[F]): Resource[F, RabbitMQPullConsumer[F, A]] /** Creates new instance of streaming consumer, using the passed configuration. * * @param consumerConfig Configuration of the consumer. - * @param monitor Monitor for metrics. + * @param monitor Monitor[F] for metrics. */ def newStreamingConsumer[A: DeliveryConverter](consumerConfig: StreamingConsumerConfig, - monitor: Monitor): Resource[F, RabbitMQStreamingConsumer[F, A]] + monitor: Monitor[F]): Resource[F, RabbitMQStreamingConsumer[F, A]] def declareExchange(config: DeclareExchangeConfig): F[Unit] def declareQueue(config: DeclareQueueConfig): F[Unit] @@ -67,43 +65,55 @@ trait RabbitMQConnection[F[_]] { */ def withChannel[A](f: ServerChannel => F[A]): F[A] - def connectionListener: ConnectionListener - def channelListener: ChannelListener - def consumerListener: ConsumerListener + def connectionListener: ConnectionListener[F] + def channelListener: ChannelListener[F] + def consumerListener: ConsumerListener[F] } -object RabbitMQConnection extends StrictLogging { +object RabbitMQConnection { object DefaultListeners { - final val DefaultConnectionListener: ConnectionListener = ConnectionListener.Default - final val DefaultChannelListener: ChannelListener = ChannelListener.Default - final val DefaultConsumerListener: ConsumerListener = ConsumerListener.Default + def defaultConnectionListener[F[_]: Sync]: ConnectionListener[F] = ConnectionListener.default[F] + def defaultChannelListener[F[_]: Sync]: ChannelListener[F] = ChannelListener.default[F] + def defaultConsumerListener[F[_]: Sync]: ConsumerListener[F] = ConsumerListener.default[F] } def make[F[_]: ConcurrentEffect: Timer: ContextShift]( connectionConfig: RabbitMQConnectionConfig, blockingExecutor: ExecutorService, sslContext: Option[SSLContext] = None, - connectionListener: ConnectionListener = DefaultListeners.DefaultConnectionListener, - channelListener: ChannelListener = DefaultListeners.DefaultChannelListener, - consumerListener: ConsumerListener = DefaultListeners.DefaultConsumerListener): Resource[F, RabbitMQConnection[F]] = { + connectionListener: Option[ConnectionListener[F]] = None, + channelListener: Option[ChannelListener[F]] = None, + consumerListener: Option[ConsumerListener[F]] = None): Resource[F, RabbitMQConnection[F]] = { + val logger = ImplicitContextLogger.createLogger[F, RabbitMQConnection.type] + val blocker = Blocker.liftExecutorService(blockingExecutor) + val connectionInfo = RabbitMQConnectionInfo( hosts = connectionConfig.hosts.toVector, virtualHost = connectionConfig.virtualHost, username = if (connectionConfig.credentials.enabled) Option(connectionConfig.credentials.username) else None ) - createConnection(connectionConfig, connectionInfo, blockingExecutor, sslContext, connectionListener, channelListener, consumerListener) + val finalConnectionListener = connectionListener.getOrElse(ConnectionListener.default) + val finalChannelListener = channelListener.getOrElse(ChannelListener.default) + val finalConsumerListener = consumerListener.getOrElse(ConsumerListener.default) + + createConnection(connectionConfig, + blockingExecutor, + blocker, + sslContext, + finalConnectionListener, + finalChannelListener, + finalConsumerListener, + logger) .evalMap { connection => - val blocker = Blocker.liftExecutorService(blockingExecutor) - DefaultRabbitMQConnection .make( connection = connection, info = connectionInfo, - connectionListener = connectionListener, - channelListener = channelListener, - consumerListener = consumerListener, + connectionListener = finalConnectionListener, + channelListener = finalChannelListener, + consumerListener = finalConsumerListener, blocker = blocker, republishStrategy = connectionConfig.republishStrategy ) @@ -111,47 +121,53 @@ object RabbitMQConnection extends StrictLogging { } } - protected def createConnection[F[_]: Sync](connectionConfig: RabbitMQConnectionConfig, - connectionInfo: RabbitMQConnectionInfo, - executor: ExecutorService, - sslContext: Option[SSLContext], - connectionListener: ConnectionListener, - channelListener: ChannelListener, - consumerListener: ConsumerListener): Resource[F, ServerConnection] = - Resource.make { - Sync[F].delay { - import connectionConfig._ - - val factory = createConnectionFactory(addressResolverType) - val exceptionHandler = createExceptionHandler(connectionListener, channelListener, consumerListener) - - setUpConnection(connectionConfig, factory, exceptionHandler, sslContext, executor) + protected def createConnection[F[_]: Effect: ContextShift](connectionConfig: RabbitMQConnectionConfig, + executor: ExecutorService, + blocker: Blocker, + sslContext: Option[SSLContext], + connectionListener: ConnectionListener[F], + channelListener: ChannelListener[F], + consumerListener: ConsumerListener[F], + logger: ImplicitContextLogger[F]): Resource[F, ServerConnection] = { + import connectionConfig._ - val addresses = try { - hosts.map(Address.parseAddress) - } catch { - case NonFatal(e) => throw new IllegalArgumentException("Invalid format of hosts", e) - } + val factory = createConnectionFactory(addressResolverType) + val exceptionHandler = createExceptionHandler[F](connectionListener, channelListener, consumerListener) - logger.info(s"Connecting to ${hosts.mkString("[", ", ", "]")}, virtual host '$virtualHost'") - - try { - factory.newConnection(addresses.toArray, name) match { - case conn: ServerConnection => - conn.addRecoveryListener(exceptionHandler) - conn.addShutdownListener((cause: ShutdownSignalException) => connectionListener.onShutdown(conn, cause)) - connectionListener.onCreate(conn) - conn - // since we set `factory.setAutomaticRecoveryEnabled(true)` it should always be `Recoverable` (based on docs), so the exception will never be thrown - case _ => throw new IllegalStateException("Required Recoverable Connection") - } - } catch { - case NonFatal(e) => - connectionListener.onCreateFailure(e) - throw e + Resource.make { + setUpConnection(connectionConfig, factory, exceptionHandler, sslContext, executor, blocker) >> + parseAddresses(hosts) >>= { addresses => + logger.plainInfo(s"Connecting to ${hosts.mkString("[", ", ", "]")}, virtual host '$virtualHost'") >> { + blocker + .delay(factory.newConnection(addresses.toArray, name)) + .flatMap { + case conn: ServerConnection => + conn.addRecoveryListener(exceptionHandler) + conn.addShutdownListener { (cause: ShutdownSignalException) => + connectionListener.onShutdown(conn, cause).unsafeStartAndForget() + } + connectionListener.onCreate(conn).as { + conn + } + + // since we set `factory.setAutomaticRecoveryEnabled(true)` it should always be `Recoverable` (based on docs), so the exception will never be thrown + case _ => Sync[F].raiseError[ServerConnection](new IllegalStateException("Required Recoverable Connection")) + } + .handleErrorWith { case NonFatal(e) => connectionListener.onCreateFailure(e) >> Sync[F].raiseError[ServerConnection](e) } } } }(c => Sync[F].delay(c.close())) + } + + private def parseAddresses[F[_]: Effect](hosts: immutable.Seq[String]): F[Seq[Address]] = { + Sync[F].delay { + try { + hosts.map(Address.parseAddress) + } catch { + case NonFatal(e) => throw new IllegalArgumentException("Invalid format of hosts", e) + } + } + } private def createConnectionFactory[F[_]: Sync](addressResolverType: AddressResolverType): ConnectionFactory = { import com.avast.clients.rabbitmq.AddressResolverType._ @@ -168,11 +184,12 @@ object RabbitMQConnection extends StrictLogging { } } - private def setUpConnection(connectionConfig: RabbitMQConnectionConfig, - factory: ConnectionFactory, - exceptionHandler: ExceptionHandler, - sslContext: Option[SSLContext], - executor: ExecutorService): Unit = { + private def setUpConnection[F[_]: Sync: ContextShift](connectionConfig: RabbitMQConnectionConfig, + factory: ConnectionFactory, + exceptionHandler: ExceptionHandler, + sslContext: Option[SSLContext], + executor: ExecutorService, + blocker: Blocker): F[Unit] = blocker.delay { import connectionConfig._ factory.setVirtualHost(virtualHost) @@ -201,35 +218,35 @@ object RabbitMQConnection extends StrictLogging { } // scalastyle:off - private def createExceptionHandler(connectionListener: ConnectionListener, - channelListener: ChannelListener, - consumerListener: ConsumerListener): ExceptionHandler with RecoveryListener = + private def createExceptionHandler[F[_]: Effect](connectionListener: ConnectionListener[F], + channelListener: ChannelListener[F], + consumerListener: ConsumerListener[F]): ExceptionHandler with RecoveryListener = new ExceptionHandler with RecoveryListener { - override def handleReturnListenerException(channel: Channel, exception: Throwable): Unit = { - logger.info( - s"Return listener error on channel $channel (on connection ${channel.getConnection}, name ${channel.getConnection.getClientProvidedName})", - exception - ) + private val logger = ImplicitContextLogger.createLogger[F, ExceptionHandler] + + override def handleReturnListenerException(channel: Channel, exception: Throwable): Unit = startAndForget { + logger.plainInfo(exception)( + s"Return listener error on channel $channel (on connection ${channel.getConnection}, name ${channel.getConnection.getClientProvidedName})") } - override def handleConnectionRecoveryException(conn: Connection, exception: Throwable): Unit = { - logger.debug(s"Recovery error on connection $conn (name ${conn.getClientProvidedName})", exception) - connectionListener.onRecoveryFailure(conn, exception) + override def handleConnectionRecoveryException(conn: Connection, exception: Throwable): Unit = startAndForget { + logger.plainDebug(exception)(s"Recovery error on connection $conn (name ${conn.getClientProvidedName})") >> + connectionListener.onRecoveryFailure(conn, exception) } - override def handleBlockedListenerException(conn: Connection, exception: Throwable): Unit = { - logger.info(s"Recovery error on connection $conn (name ${conn.getClientProvidedName})", exception) + override def handleBlockedListenerException(conn: Connection, exception: Throwable): Unit = startAndForget { + logger.plainInfo(exception)(s"Recovery error on connection $conn (name ${conn.getClientProvidedName})") } - override def handleChannelRecoveryException(ch: Channel, exception: Throwable): Unit = { - logger.debug(s"Recovery error on channel $ch", exception) - channelListener.onRecoveryFailure(ch, exception) + override def handleChannelRecoveryException(ch: Channel, exception: Throwable): Unit = startAndForget { + logger.plainDebug(exception)(s"Recovery error on channel $ch") >> + channelListener.onRecoveryFailure(ch, exception) } - override def handleUnexpectedConnectionDriverException(conn: Connection, exception: Throwable): Unit = { + override def handleUnexpectedConnectionDriverException(conn: Connection, exception: Throwable): Unit = startAndForget { exception match { - case ioe: IOException => logger.info(s"RabbitMQ IO exception on $conn (name ${conn.getClientProvidedName})", ioe) - case e => logger.debug(s"RabbitMQ unexpected exception on $conn (name ${conn.getClientProvidedName})", e) + case ioe: IOException => logger.plainInfo(ioe)(s"RabbitMQ IO exception on $conn (name ${conn.getClientProvidedName})") + case e => logger.plainDebug(e)(s"RabbitMQ unexpected exception on $conn (name ${conn.getClientProvidedName})") } } @@ -237,43 +254,44 @@ object RabbitMQConnection extends StrictLogging { exception: Throwable, consumer: Consumer, consumerTag: String, - methodName: String): Unit = { - logger.debug(s"Consumer exception on channel $channel, consumer with tag '$consumerTag', method '$methodName'") + methodName: String): Unit = startAndForget { + logger.plainDebug(s"Consumer exception on channel $channel, consumer with tag '$consumerTag', method '$methodName'") >> { + val consumerName = consumer match { + case c: DefaultRabbitMQConsumer[_, _] => c.base.consumerName + case _ => "unknown" + } - val consumerName = consumer match { - case c: DefaultRabbitMQConsumer[_] => c.name - case _ => "unknown" + consumerListener.onError(consumer, consumerName, channel, exception) } - - consumerListener.onError(consumer, consumerName, channel, exception) } - override def handleTopologyRecoveryException(conn: Connection, ch: Channel, exception: TopologyRecoveryException): Unit = { - logger.debug(s"Topology recovery error on channel $ch (on connection $conn, name ${conn.getClientProvidedName})", exception) - channelListener.onRecoveryFailure(ch, exception) - } + override def handleTopologyRecoveryException(conn: Connection, ch: Channel, exception: TopologyRecoveryException): Unit = + startAndForget { + logger.plainDebug(exception)(s"Topology recovery error on channel $ch (on connection $conn, name ${conn.getClientProvidedName})") >> + channelListener.onRecoveryFailure(ch, exception) + } - override def handleConfirmListenerException(channel: Channel, exception: Throwable): Unit = { - logger.debug(s"Confirm listener error on channel $channel", exception) + override def handleConfirmListenerException(channel: Channel, exception: Throwable): Unit = startAndForget { + logger.plainDebug(exception)(s"Confirm listener error on channel $channel") } // recovery listener - override def handleRecovery(recoverable: Recoverable): Unit = { - logger.debug(s"Recovery completed on $recoverable") - - recoverable match { - case ch: ServerChannel => channelListener.onRecoveryCompleted(ch) - case conn: ServerConnection => connectionListener.onRecoveryCompleted(conn) + override def handleRecovery(recoverable: Recoverable): Unit = startAndForget { + logger.plainDebug(s"Recovery completed on $recoverable") >> { + recoverable match { + case ch: ServerChannel => channelListener.onRecoveryCompleted(ch) + case conn: ServerConnection => connectionListener.onRecoveryCompleted(conn) + } } } - override def handleRecoveryStarted(recoverable: Recoverable): Unit = { - logger.debug(s"Recovery started on $recoverable") - - recoverable match { - case ch: ServerChannel => channelListener.onRecoveryStarted(ch) - case conn: ServerConnection => connectionListener.onRecoveryStarted(conn) + override def handleRecoveryStarted(recoverable: Recoverable): Unit = startAndForget { + logger.plainDebug(s"Recovery started on $recoverable") >> { + recoverable match { + case ch: ServerChannel => channelListener.onRecoveryStarted(ch) + case conn: ServerConnection => connectionListener.onRecoveryStarted(conn) + } } } } diff --git a/core/src/main/scala/com/avast/clients/rabbitmq/RepublishStrategy.scala b/core/src/main/scala/com/avast/clients/rabbitmq/RepublishStrategy.scala index a31104f1..765a3c8b 100644 --- a/core/src/main/scala/com/avast/clients/rabbitmq/RepublishStrategy.scala +++ b/core/src/main/scala/com/avast/clients/rabbitmq/RepublishStrategy.scala @@ -1,61 +1,70 @@ package com.avast.clients.rabbitmq import cats.effect.{Blocker, ContextShift, Sync} +import cats.implicits.{catsSyntaxApplicativeError, catsSyntaxFlatMapOps, toFlatMapOps} +import com.avast.bytes.Bytes +import com.avast.clients.rabbitmq.logging.ImplicitContextLogger import com.rabbitmq.client.AMQP.BasicProperties -import com.typesafe.scalalogging.StrictLogging -import scala.language.higherKinds -import scala.util.control.NonFatal +import scala.util.{Left, Right} -trait RepublishStrategy { - def republish[F[_]: Sync: ContextShift](blocker: Blocker, channel: ServerChannel, consumerName: String)(originalQueueName: String, - messageId: String, - deliveryTag: Long, - properties: BasicProperties, - body: Array[Byte]): F[Unit] +trait RepublishStrategy[F[_]] { + def republish(blocker: Blocker, channel: ServerChannel, consumerName: String)(originalQueueName: String, + properties: BasicProperties, + rawBody: Bytes)(implicit dctx: DeliveryContext): F[Unit] } object RepublishStrategy { - case class CustomExchange(exchangeName: String) extends RepublishStrategy with StrictLogging { - def republish[F[_]: Sync: ContextShift](blocker: Blocker, channel: ServerChannel, consumerName: String)(originalQueueName: String, - messageId: String, - deliveryTag: Long, - properties: BasicProperties, - body: Array[Byte]): F[Unit] = { - blocker.delay { - try { - logger.debug { - s"[$consumerName] Republishing delivery (ID $messageId, deliveryTag $deliveryTag) to end of queue '$originalQueueName' through '$exchangeName'($originalQueueName)" + case class CustomExchange[F[_]: Sync: ContextShift](exchangeName: String) extends RepublishStrategy[F] { + private val logger = ImplicitContextLogger.createLogger[F, CustomExchange[F]] + + def republish(blocker: Blocker, channel: ServerChannel, consumerName: String)( + originalQueueName: String, + properties: BasicProperties, + rawBody: Bytes)(implicit dctx: DeliveryContext): F[Unit] = { + import dctx._ + + logger.debug { + s"[$consumerName] Republishing delivery ($messageId, $deliveryTag) to end of queue '$originalQueueName' through '$exchangeName'($originalQueueName)" + } >> + blocker + .delay { + if (!channel.isOpen) throw new IllegalStateException("Cannot republish delivery on closed channel") + channel.basicPublish(exchangeName, originalQueueName, properties, rawBody.toByteArray) + channel.basicAck(deliveryTag.value, false) + } + .attempt + .flatMap { + case Right(()) => Sync[F].unit + case Left(e) => logger.warn(e)(s"[$consumerName] Error while republishing the delivery $messageId") } - if (!channel.isOpen) throw new IllegalStateException("Cannot republish delivery on closed channel") - channel.basicPublish(exchangeName, originalQueueName, properties, body) - channel.basicAck(deliveryTag, false) - } catch { - case NonFatal(e) => logger.warn(s"[$consumerName] Error while republishing the delivery", e) - } - } } } - case object DefaultExchange extends RepublishStrategy with StrictLogging { - def republish[F[_]: Sync: ContextShift](blocker: Blocker, channel: ServerChannel, consumerName: String)(originalQueueName: String, - messageId: String, - deliveryTag: Long, - properties: BasicProperties, - body: Array[Byte]): F[Unit] = { - blocker.delay { - try { - logger.debug { - s"[$consumerName] Republishing delivery (ID $messageId, deliveryTag $deliveryTag) to end of queue '$originalQueueName' (through default exchange)" + case class DefaultExchange[F[_]: Sync: ContextShift]() extends RepublishStrategy[F] { + private val logger = ImplicitContextLogger.createLogger[F, DefaultExchange[F]] + + def republish(blocker: Blocker, channel: ServerChannel, consumerName: String)( + originalQueueName: String, + properties: BasicProperties, + rawBody: Bytes)(implicit dctx: DeliveryContext): F[Unit] = { + import dctx._ + + logger.debug { + s"[$consumerName] Republishing delivery ($messageId, $deliveryTag) to end of queue '$originalQueueName' (through default exchange)" + } >> + blocker + .delay { + if (!channel.isOpen) throw new IllegalStateException("Cannot republish delivery on closed channel") + channel.basicPublish("", originalQueueName, properties, rawBody.toByteArray) + channel.basicAck(deliveryTag.value, false) + } + .attempt + .flatMap { + case Right(()) => Sync[F].unit + case Left(e) => logger.warn(e)(s"[$consumerName] Error while republishing the delivery $messageId") } - if (!channel.isOpen) throw new IllegalStateException("Cannot republish delivery on closed channel") - channel.basicPublish("", originalQueueName, properties, body) - channel.basicAck(deliveryTag, false) - } catch { - case NonFatal(e) => logger.warn(s"[$consumerName] Error while republishing the delivery", e) - } - } } } } diff --git a/core/src/main/scala/com/avast/clients/rabbitmq/configuration.scala b/core/src/main/scala/com/avast/clients/rabbitmq/configuration.scala index 0a6d9b97..eab68980 100644 --- a/core/src/main/scala/com/avast/clients/rabbitmq/configuration.scala +++ b/core/src/main/scala/com/avast/clients/rabbitmq/configuration.scala @@ -1,5 +1,6 @@ package com.avast.clients.rabbitmq +import cats.effect.{ContextShift, Sync} import com.avast.clients.rabbitmq.api.DeliveryResult import com.rabbitmq.client.RecoveryDelayHandler import org.slf4j.event.Level @@ -32,7 +33,8 @@ final case class ConsumerConfig(name: String, timeoutLogLevel: Level = Level.WARN, prefetchCount: Int = 100, declare: Option[AutoDeclareQueueConfig] = None, - consumerTag: String = "Default") + consumerTag: String = "Default", + poisonedMessageHandling: Option[PoisonedMessageHandlingConfig] = None) final case class StreamingConsumerConfig(name: String, queueName: String, @@ -43,13 +45,14 @@ final case class StreamingConsumerConfig(name: String, prefetchCount: Int = 100, queueBufferSize: Int = 100, declare: Option[AutoDeclareQueueConfig] = None, - consumerTag: String = "Default") + consumerTag: String = "Default", + poisonedMessageHandling: Option[PoisonedMessageHandlingConfig] = None) final case class PullConsumerConfig(name: String, queueName: String, bindings: immutable.Seq[AutoBindQueueConfig], - failureAction: DeliveryResult = DeliveryResult.Republish(), - declare: Option[AutoDeclareQueueConfig] = None) + declare: Option[AutoDeclareQueueConfig] = None, + poisonedMessageHandling: Option[PoisonedMessageHandlingConfig] = None) final case class AutoDeclareQueueConfig(enabled: Boolean = false, durable: Boolean = true, @@ -106,6 +109,20 @@ final case class BindExchangeConfig(sourceExchangeName: String, routingKeys: immutable.Seq[String], arguments: BindArgumentsConfig = BindArgumentsConfig()) +sealed trait PoisonedMessageHandlingConfig + +final case class DeadQueueProducerConfig(name: String, + exchange: String, + routingKey: String, + declare: Option[AutoDeclareExchangeConfig] = None, + reportUnroutable: Boolean = true, + properties: ProducerPropertiesConfig = ProducerPropertiesConfig()) + +case object NoOpPoisonedMessageHandling extends PoisonedMessageHandlingConfig +final case class LoggingPoisonedMessageHandling(maxAttempts: Int) extends PoisonedMessageHandlingConfig +final case class DeadQueuePoisonedMessageHandling(maxAttempts: Int, deadQueueProducer: DeadQueueProducerConfig) + extends PoisonedMessageHandlingConfig + sealed trait AddressResolverType object AddressResolverType { case object Default extends AddressResolverType @@ -134,16 +151,16 @@ object ExchangeType { } trait RepublishStrategyConfig { - def toRepublishStrategy: RepublishStrategy + def toRepublishStrategy[F[_]: Sync: ContextShift]: RepublishStrategy[F] } object RepublishStrategyConfig { case class CustomExchange(exchangeName: String, exchangeDeclare: Boolean = true, exchangeAutoBind: Boolean = true) extends RepublishStrategyConfig { - override def toRepublishStrategy: RepublishStrategy = RepublishStrategy.CustomExchange(exchangeName) + override def toRepublishStrategy[F[_]: Sync: ContextShift]: RepublishStrategy[F] = RepublishStrategy.CustomExchange(exchangeName) } case object DefaultExchange extends RepublishStrategyConfig { - override def toRepublishStrategy: RepublishStrategy = RepublishStrategy.DefaultExchange + override def toRepublishStrategy[F[_]: Sync: ContextShift]: RepublishStrategy[F] = RepublishStrategy.DefaultExchange[F]() } } diff --git a/core/src/main/scala/com/avast/clients/rabbitmq/logging/ImplicitContextLogger.scala b/core/src/main/scala/com/avast/clients/rabbitmq/logging/ImplicitContextLogger.scala new file mode 100644 index 00000000..bbe17111 --- /dev/null +++ b/core/src/main/scala/com/avast/clients/rabbitmq/logging/ImplicitContextLogger.scala @@ -0,0 +1,48 @@ +package com.avast.clients.rabbitmq.logging + +import cats.effect.Sync + +import scala.reflect.ClassTag + +private[rabbitmq] class ImplicitContextLogger[F[_]](private val contextLessLogger: GenericPlainLogger[F]) { + + // contextLessLogger.info() + def info[Ctx <: LoggingContext](msg: => String)(implicit ctx: Ctx): F[Unit] = contextLessLogger.info(ctx.asContextMap)(msg) + def info[Ctx <: LoggingContext](t: Throwable)(msg: => String)(implicit ctx: Ctx): F[Unit] = + contextLessLogger.info(ctx.asContextMap, t)(msg) + def plainInfo(msg: => String): F[Unit] = contextLessLogger.info(msg) + def plainInfo(t: Throwable)(msg: => String): F[Unit] = contextLessLogger.info(t)(msg) + + // contextLessLogger.warn() + def warn[Ctx <: LoggingContext](msg: => String)(implicit ctx: Ctx): F[Unit] = contextLessLogger.warn(ctx.asContextMap)(msg) + def warn[Ctx <: LoggingContext](t: Throwable)(msg: => String)(implicit ctx: Ctx): F[Unit] = + contextLessLogger.warn(ctx.asContextMap, t)(msg) + def plainWarn(msg: => String): F[Unit] = contextLessLogger.warn(msg) + def plainWarn(t: Throwable)(msg: => String): F[Unit] = contextLessLogger.warn(t)(msg) + + // contextLessLogger.error() + def error[Ctx <: LoggingContext](msg: => String)(implicit ctx: Ctx): F[Unit] = contextLessLogger.error(ctx.asContextMap)(msg) + def error[Ctx <: LoggingContext](t: Throwable)(msg: => String)(implicit ctx: Ctx): F[Unit] = + contextLessLogger.error(ctx.asContextMap, t)(msg) + def plainError(msg: => String): F[Unit] = contextLessLogger.error(msg) + def plainError(t: Throwable)(msg: => String): F[Unit] = contextLessLogger.error(t)(msg) + + // contextLessLogger.debug() + def debug[Ctx <: LoggingContext](msg: => String)(implicit ctx: Ctx): F[Unit] = contextLessLogger.debug(ctx.asContextMap)(msg) + def debug[Ctx <: LoggingContext](t: Throwable)(msg: => String)(implicit ctx: Ctx): F[Unit] = + contextLessLogger.debug(ctx.asContextMap, t)(msg) + def plainDebug(msg: => String): F[Unit] = contextLessLogger.debug(msg) + def plainDebug(t: Throwable)(msg: => String): F[Unit] = contextLessLogger.debug(t)(msg) + + // contextLessLogger.trace() + def trace[Ctx <: LoggingContext](msg: => String)(implicit ctx: Ctx): F[Unit] = contextLessLogger.trace(ctx.asContextMap)(msg) + def trace[Ctx <: LoggingContext](t: Throwable)(msg: => String)(implicit ctx: Ctx): F[Unit] = + contextLessLogger.trace(ctx.asContextMap, t)(msg) + def plainTrace(msg: => String): F[Unit] = contextLessLogger.trace(msg) + def plainTrace(t: Throwable)(msg: => String): F[Unit] = contextLessLogger.trace(t)(msg) +} + +private[rabbitmq] object ImplicitContextLogger { + def createLogger[F[_]: Sync, A](implicit ct: ClassTag[A]): ImplicitContextLogger[F] = + new ImplicitContextLogger(createPlainLogger[F, A]) +} diff --git a/core/src/main/scala/com/avast/clients/rabbitmq/logging/LoggingContext.scala b/core/src/main/scala/com/avast/clients/rabbitmq/logging/LoggingContext.scala new file mode 100644 index 00000000..e9aafe4d --- /dev/null +++ b/core/src/main/scala/com/avast/clients/rabbitmq/logging/LoggingContext.scala @@ -0,0 +1,5 @@ +package com.avast.clients.rabbitmq.logging + +private[rabbitmq] trait LoggingContext { + def asContextMap: Map[String, String] +} diff --git a/core/src/main/scala/com/avast/clients/rabbitmq/logging/package.scala b/core/src/main/scala/com/avast/clients/rabbitmq/logging/package.scala new file mode 100644 index 00000000..914026e0 --- /dev/null +++ b/core/src/main/scala/com/avast/clients/rabbitmq/logging/package.scala @@ -0,0 +1,14 @@ +package com.avast.clients.rabbitmq + +import cats.effect.Sync +import org.typelevel.log4cats.SelfAwareStructuredLogger +import org.typelevel.log4cats.slf4j.Slf4jLogger + +import scala.reflect.ClassTag + +package object logging { + type GenericPlainLogger[F[_]] = SelfAwareStructuredLogger[F] + + private[rabbitmq] def createPlainLogger[F[_]: Sync, A](implicit ct: ClassTag[A]): GenericPlainLogger[F] = + Slf4jLogger.getLoggerFromClass(ct.runtimeClass) +} diff --git a/core/src/main/scala/com/avast/clients/rabbitmq/rabbitmq.scala b/core/src/main/scala/com/avast/clients/rabbitmq/rabbitmq.scala index 6dd3a060..92912151 100644 --- a/core/src/main/scala/com/avast/clients/rabbitmq/rabbitmq.scala +++ b/core/src/main/scala/com/avast/clients/rabbitmq/rabbitmq.scala @@ -1,7 +1,7 @@ package com.avast.clients -import cats.effect.Sync import cats.effect.concurrent.Ref +import cats.effect.{Effect, Sync} import cats.implicits.catsSyntaxFlatMapOps import com.avast.bytes.Bytes import com.avast.clients.rabbitmq.DefaultRabbitMQConsumer.{FederationOriginalRoutingKeyHeaderName, RepublishOriginalRoutingKeyHeaderName} @@ -10,7 +10,8 @@ import com.rabbitmq.client.{RecoverableChannel, RecoverableConnection} import fs2.RaiseThrowable import java.util.concurrent.Executors -import scala.language.{higherKinds, implicitConversions} +import scala.concurrent.Future +import scala.language.implicitConversions package object rabbitmq { private[rabbitmq] type ServerConnection = RecoverableConnection @@ -67,4 +68,15 @@ package object rabbitmq { } } + private[rabbitmq] implicit class RunningF[F[_], A](val f: F[A]) extends AnyVal { + def unsafeStartAndForget()(implicit F: Effect[F]): Unit = { + F.toIO(f).unsafeToFuture() + () + } + } + + private[rabbitmq] def startAndForget[F[_]: Effect](f: F[Unit]): Unit = { + f.unsafeStartAndForget + } + } diff --git a/core/src/test/resources/application.conf b/core/src/test/resources/application.conf index 567901dd..1897c310 100644 --- a/core/src/test/resources/application.conf +++ b/core/src/test/resources/application.conf @@ -28,6 +28,8 @@ myConfig { queueName = "QUEUE1" + prefetchCount = 500 + processTimeout = 500 ms declare { @@ -111,6 +113,7 @@ myConfig { } prefetchCount = 500 + queueBufferSize = 100 bindings = [ { @@ -150,10 +153,154 @@ myConfig { enabled = true } + prefetchCount = 100 + queueBufferSize = 10 + + processTimeout = 500 ms + + bindings = [ + { + routingKeys = ["test"] + + exchange { + name = "EXCHANGE1" + + declare { + enabled = true + + type = "direct" + } + } + }, { + routingKeys = ["test2"] + + exchange { + name = "EXCHANGE2" + + declare { + enabled = true + + type = "direct" + } + } + } + ] + } + + testingWithPoisonedMessageHandler { + name = "Testing" + + queueName = "QUEUE1" + + processTimeout = 500 ms + prefetchCount = 500 + declare { + enabled = true + } + + bindings = [ + { + routingKeys = ["test"] + + exchange { + name = "EXCHANGE1" + + declare { + enabled = true + + type = "direct" + } + } + } + ] + + poisonedMessageHandling { + type = "deadQueue" + + maxAttempts = 2 + + deadQueueProducer { + routingKey = "dead" + name = "DeadQueueProducer" + exchange = "EXCHANGE3" + declare { + enabled = true + type = "direct" + } + } + } + } + + testingPullWithPoisonedMessageHandler { + name = "Testing" + + queueName = "QUEUE1" + + declare { + enabled = true + } + + bindings = [ + { + routingKeys = ["test"] + + exchange { + name = "EXCHANGE1" + + declare { + enabled = true + + type = "direct" + } + } + }, { + routingKeys = ["test2"] + + exchange { + name = "EXCHANGE2" + + declare { + enabled = true + + type = "direct" + } + } + } + ] + + poisonedMessageHandling { + type = "deadQueue" + + maxAttempts = 2 + + deadQueueProducer { + routingKey = "dead" + name = "DeadQueueProducer" + exchange = "EXCHANGE3" + declare { + enabled = true + type = "direct" + } + } + } + } + + testingStreamingWithPoisonedMessageHandler { + name = "Testing" + + queueName = "QUEUE1" + + declare { + enabled = true + } + processTimeout = 500 ms + prefetchCount = 500 + queueBufferSize = 100 + bindings = [ { routingKeys = ["test"] @@ -181,6 +328,22 @@ myConfig { } } ] + + poisonedMessageHandling { + type = "deadQueue" + + maxAttempts = 2 + + deadQueueProducer { + routingKey = "dead" + name = "DeadQueueProducer" + exchange = "EXCHANGE3" + declare { + enabled = true + type = "direct" + } + } + } } } @@ -245,12 +408,12 @@ myConfig { declareQueue { name = "QUEUE2" - arguments = {"x-max-length": 10000} + arguments = {"x-max-length": 1000000} // This just to test it accepts some arguments. 1M should be big enough for all tests (10k wasn't :-D). } bindQueue { queueName = "QUEUE2" - routingKeys = ["test"] + routingKeys = ["test", "dead"] exchangeName = "EXCHANGE3" } } diff --git a/core/src/test/resources/logback.xml b/core/src/test/resources/logback.xml index 8cb1efb2..e7504937 100644 --- a/core/src/test/resources/logback.xml +++ b/core/src/test/resources/logback.xml @@ -8,7 +8,7 @@ - + diff --git a/core/src/test/scala/com/avast/clients/rabbitmq/LiveTest.scala b/core/src/test/scala/com/avast/clients/rabbitmq/BasicLiveTest.scala similarity index 51% rename from core/src/test/scala/com/avast/clients/rabbitmq/LiveTest.scala rename to core/src/test/scala/com/avast/clients/rabbitmq/BasicLiveTest.scala index 38e8062f..591d1c86 100644 --- a/core/src/test/scala/com/avast/clients/rabbitmq/LiveTest.scala +++ b/core/src/test/scala/com/avast/clients/rabbitmq/BasicLiveTest.scala @@ -5,8 +5,7 @@ import com.avast.bytes.Bytes import com.avast.clients.rabbitmq.api.DeliveryResult._ import com.avast.clients.rabbitmq.api._ import com.avast.clients.rabbitmq.extras.format.JsonDeliveryConverter -import com.avast.clients.rabbitmq.extras.{PoisonedMessageHandler, StreamingPoisonedMessageHandler} -import com.avast.metrics.scalaapi.Monitor +import com.avast.metrics.scalaeffectapi.Monitor import com.typesafe.config._ import monix.eval.Task import monix.execution.Scheduler @@ -20,13 +19,16 @@ import scala.concurrent.duration._ import scala.jdk.CollectionConverters._ import scala.util.Random -class LiveTest extends TestBase with ScalaFutures { +class BasicLiveTest extends TestBase with ScalaFutures { import pureconfig._ def randomString(length: Int): String = { Random.alphanumeric.take(length).mkString("") } + // default, is overridden in some tests + private implicit val cidStrategy: CorrelationIdStrategy = CorrelationIdStrategy.RandomNew + private implicit val p: PatienceConfig = PatienceConfig(timeout = Span(5, Seconds)) private lazy val testHelper = new TestHelper(System.getProperty("rabbit.host", System.getenv("rabbit.host")), @@ -57,12 +59,6 @@ class LiveTest extends TestBase with ScalaFutures { .withValue("consumers.testing.bindings", ConfigValueFactory.fromIterable(bindConfigs.toSeq.map(_.root()).asJava)) .withValue("consumers.testingPull.queueName", ConfigValueFactory.fromAnyRef(queueName1)) .withValue("consumers.testingPull.bindings", ConfigValueFactory.fromIterable(bindConfigs.toSeq.map(_.root()).asJava)) - .withValue("consumers.testingStreaming.queueName", ConfigValueFactory.fromAnyRef(queueName1)) - .withValue("consumers.testingStreaming.queueBufferSize", ConfigValueFactory.fromAnyRef(200)) - .withValue("consumers.testingStreaming.bindings", ConfigValueFactory.fromIterable(bindConfigs.toSeq.map(_.root()).asJava)) - .withValue("consumers.testingStreamingWithTimeout.queueName", ConfigValueFactory.fromAnyRef(queueName1)) - .withValue("consumers.testingStreamingWithTimeout.queueBufferSize", ConfigValueFactory.fromAnyRef(200)) - .withValue("consumers.testingStreamingWithTimeout.bindings", ConfigValueFactory.fromIterable(bindConfigs.toSeq.map(_.root()).asJava)) .withValue("producers.testing.exchange", ConfigValueFactory.fromAnyRef(exchange1)) .withValue("producers.testing2.exchange", ConfigValueFactory.fromAnyRef(exchange2)) .withValue("producers.testing3.exchange", ConfigValueFactory.fromAnyRef(exchange4)) @@ -77,7 +73,10 @@ class LiveTest extends TestBase with ScalaFutures { val ex: ExecutorService = ExecutionContext.fromExecutorService(Executors.newCachedThreadPool()) - implicit val sched: Scheduler = Scheduler(Executors.newCachedThreadPool()) + implicit val sched: Scheduler = Scheduler( + Executors.newScheduledThreadPool(4), + ExecutionContext.fromExecutor(new ForkJoinPool()) + ) } test("basic") { @@ -116,7 +115,7 @@ class LiveTest extends TestBase with ScalaFutures { import c._ RabbitMQConnection.fromConfig[Task](config, ex).withResource { rabbitConnection => - val count = Random.nextInt(500) + 500 // random 500 - 1000 messages + val count = Random.nextInt(2000) + 2000 // 2-4k logger.info(s"Sending $count messages") @@ -125,18 +124,15 @@ class LiveTest extends TestBase with ScalaFutures { val d = new AtomicInteger(0) val cons = rabbitConnection.newConsumer("testing", Monitor.noOp()) { _: Delivery[Bytes] => - Task { - val n = d.incrementAndGet() + Task(d.incrementAndGet()).flatMap { n => + Task.sleep((if (n % 5 == 0) 150 else 0).millis) >> + Task { + latch.countDown() - Thread.sleep(if (n % 2 == 0) 300 else 0) - latch.countDown() - - if (n < (count - 100) || n > count) Ack - else { - if (n < (count - 50)) Retry else Republish() - } + if (n < (count - 100) || n > count) Ack else Retry - // ^ example: 750 messages in total => 650 * Ack, 50 * Retry, 50 * Republish => processing 850 (== +100) messages in total + // ^ example: 750 messages in total => 650 * Ack, 100 * Retry => processing 850 (== +100) messages in total + } } } @@ -147,13 +143,15 @@ class LiveTest extends TestBase with ScalaFutures { } // it takes some time before the stats appear... :-| - eventually(timeout(Span(3, Seconds)), interval(Span(0.1, Seconds))) { + eventually(timeout(Span(5, Seconds)), interval(Span(0.5, Seconds))) { assertResult(count)(testHelper.queue.getPublishedCount(queueName1)) } - eventually(timeout(Span(3, Seconds)), interval(Span(0.1, Seconds))) { + eventually(timeout(Span(120, Seconds)), interval(Span(2, Seconds))) { + val inQueue = testHelper.queue.getMessagesCount(queueName1) + println(s"In QUEUE COUNT: $inQueue") assertResult(true)(latch.await(1000, TimeUnit.MILLISECONDS)) - assertResult(0)(testHelper.queue.getMessagesCount(queueName1)) + assertResult(0)(inQueue) } } } @@ -209,7 +207,7 @@ class LiveTest extends TestBase with ScalaFutures { sender.send("test", Bytes.copyFromUtf8(Random.nextString(10))).await } - eventually(timeout(Span(3, Seconds)), interval(Span(0.25, Seconds))) { + eventually(timeout(Span(3, Seconds)), interval(Span(0.5, Seconds))) { assert(cnt.get() >= 40) assert(testHelper.queue.getMessagesCount(queueName1) <= 20) } @@ -222,7 +220,7 @@ class LiveTest extends TestBase with ScalaFutures { val c = createConfig() import c._ - implicit val cs: ContextShift[IO] = IO.contextShift(ExecutionContext.global) + implicit val cs: ContextShift[IO] = IO.contextShift(TestBase.testBlockingScheduler) implicit val timer: Timer[IO] = IO.timer(ExecutionContext.global) RabbitMQConnection.fromConfig[IO](config, ex).withResource { rabbitConnection => @@ -237,10 +235,10 @@ class LiveTest extends TestBase with ScalaFutures { cons.withResource { _ => rabbitConnection.newProducer[Bytes]("testing", Monitor.noOp()).withResource { sender => for (_ <- 1 to 10) { - sender.send("test", Bytes.copyFromUtf8(Random.nextString(10))).unsafeRunSync() + sender.send("test", Bytes.copyFromUtf8(Random.nextString(10))).await } - eventually(timeout(Span(5, Seconds)), interval(Span(0.25, Seconds))) { + eventually(timeout(Span(5, Seconds)), interval(Span(0.5, Seconds))) { assert(cnt.get() >= 40) assert(testHelper.queue.getMessagesCount(queueName1) <= 20) } @@ -259,7 +257,7 @@ class LiveTest extends TestBase with ScalaFutures { |--(test) --> EXCHANGE1 --(test)--> QUEUE1 */ - implicit val cs: ContextShift[IO] = IO.contextShift(ExecutionContext.global) + implicit val cs: ContextShift[IO] = IO.contextShift(TestBase.testBlockingScheduler) implicit val timer: Timer[IO] = IO.timer(ExecutionContext.global) RabbitMQConnection.fromConfig[IO](config, ex).withResource { rabbitConnection => @@ -282,13 +280,13 @@ class LiveTest extends TestBase with ScalaFutures { _ <- rabbitConnection.bindQueue("bindQueue") } yield ()).unsafeRunSync() - assertResult(Map("x-max-length" -> 10000))(testHelper.queue.getArguments(queueName2)) + assertResult(Map("x-max-length" -> 1000000))(testHelper.queue.getArguments(queueName2)) assertResult(0)(testHelper.queue.getMessagesCount(queueName1)) assertResult(0)(testHelper.queue.getMessagesCount(queueName2)) for (_ <- 1 to 10) { - sender.send("test", Bytes.copyFromUtf8(Random.nextString(10))).unsafeRunSync() + sender.send("test", Bytes.copyFromUtf8(Random.nextString(10))).await } eventually(timeout(Span(2, Seconds)), interval(Span(200, Milliseconds))) { @@ -302,124 +300,11 @@ class LiveTest extends TestBase with ScalaFutures { } } - test("PoisonedMessageHandler") { - val c = createConfig() - import c._ - - RabbitMQConnection.fromConfig[Task](config, ex).withResource { rabbitConnection => - val poisoned = new AtomicInteger(0) - val processed = new AtomicInteger(0) - - val h = PoisonedMessageHandler.withCustomPoisonedAction[Task, Bytes](2) { _: Delivery[Bytes] => - Task { - processed.incrementAndGet() - DeliveryResult.Republish() - } - } { _: Delivery[Bytes] => - Task { - poisoned.incrementAndGet() - () - } - } - - rabbitConnection.newConsumer("testing", Monitor.noOp())(h).withResource { _ => - rabbitConnection.newProducer[Bytes]("testing", Monitor.noOp()).withResource { sender => - for (_ <- 1 to 10) { - sender.send("test", Bytes.copyFromUtf8(Random.nextString(10))).await - } - - eventually(timeout(Span(2, Seconds)), interval(Span(0.25, Seconds))) { - assertResult(20)(processed.get()) - assertResult(0)(testHelper.queue.getMessagesCount(queueName1)) - assertResult(10)(poisoned.get()) - } - } - } - } - } - - test("PoisonedMessageHandler streaming") { - val c = createConfig() - import c._ - - RabbitMQConnection.fromConfig[Task](config, ex).withResource { rabbitConnection => - val poisoned = new AtomicInteger(0) - val processed = new AtomicInteger(0) - - val pmh = StreamingPoisonedMessageHandler.piped[Task, Bytes](2) - - rabbitConnection.newStreamingConsumer[Bytes]("testingStreaming", Monitor.noOp()).withResource { cons => - cons.deliveryStream - .through(pmh) - .evalMap { d => - Task { processed.incrementAndGet() } >> - d.handle(DeliveryResult.Republish()) - } - .compile - .drain - .runToFuture - - rabbitConnection.newProducer[Bytes]("testing", Monitor.noOp()).withResource { sender => - for (_ <- 1 to 10) { - sender.send("test", Bytes.copyFromUtf8(Random.nextString(10))).await - } - - eventually(timeout(Span(2, Seconds)), interval(Span(0.25, Seconds))) { - assertResult(20)(processed.get()) - assertResult(0)(testHelper.queue.getMessagesCount(queueName1)) -// assertResult(10)(poisoned.get()) - } - } - } - } - } - - test("PoisonedMessageHandler streaming custom") { - val c = createConfig() - import c._ - - RabbitMQConnection.fromConfig[Task](config, ex).withResource { rabbitConnection => - val poisoned = new AtomicInteger(0) - val processed = new AtomicInteger(0) - - val pmh = StreamingPoisonedMessageHandler.pipedWithCustomPoisonedAction[Task, Bytes](2) { _ => - Task { - logger.debug("Poisoned received!") - poisoned.incrementAndGet() - } - } - - rabbitConnection.newStreamingConsumer[Bytes]("testingStreaming", Monitor.noOp()).withResource { cons => - cons.deliveryStream - .through(pmh) - .evalMap { d => - Task { processed.incrementAndGet() } >> - d.handle(DeliveryResult.Republish()) - } - .compile - .drain - .runToFuture - - rabbitConnection.newProducer[Bytes]("testing", Monitor.noOp()).withResource { sender => - for (_ <- 1 to 10) { - sender.send("test", Bytes.copyFromUtf8(Random.nextString(10))).await - } - - eventually(timeout(Span(2, Seconds)), interval(Span(0.25, Seconds))) { - assertResult(20)(processed.get()) - assertResult(0)(testHelper.queue.getMessagesCount(queueName1)) - assertResult(10)(poisoned.get()) - } - } - } - } - } - test("pull consumer") { val c = createConfig() import c._ - implicit val cs: ContextShift[IO] = IO.contextShift(ExecutionContext.global) + implicit val cs: ContextShift[IO] = IO.contextShift(TestBase.testBlockingScheduler) implicit val timer: Timer[IO] = IO.timer(ExecutionContext.global) RabbitMQConnection.fromConfig[IO](config, ex).withResource { rabbitConnection => @@ -428,7 +313,7 @@ class LiveTest extends TestBase with ScalaFutures { cons.withResource { consumer => rabbitConnection.newProducer[Bytes]("testing", Monitor.noOp()).withResource { sender => for (_ <- 1 to 10) { - sender.send("test", Bytes.copyFromUtf8(Random.nextString(10))).unsafeRunSync() + sender.send("test", Bytes.copyFromUtf8(Random.nextString(10))).await } eventually(timeout = timeout(Span(5, Seconds))) { @@ -436,8 +321,8 @@ class LiveTest extends TestBase with ScalaFutures { } for (_ <- 1 to 3) { - val PullResult.Ok(dwh) = consumer.pull().unsafeRunSync() - dwh.handle(DeliveryResult.Ack).unsafeRunSync() + val PullResult.Ok(dwh) = consumer.pull().await + dwh.handle(DeliveryResult.Ack).await } eventually(timeout = timeout(Span(5, Seconds))) { @@ -445,8 +330,8 @@ class LiveTest extends TestBase with ScalaFutures { } for (_ <- 1 to 7) { - val PullResult.Ok(dwh) = consumer.pull().unsafeRunSync() - dwh.handle(DeliveryResult.Ack).unsafeRunSync() + val PullResult.Ok(dwh) = consumer.pull().await + dwh.handle(DeliveryResult.Ack).await } eventually(timeout = timeout(Span(5, Seconds))) { @@ -454,220 +339,7 @@ class LiveTest extends TestBase with ScalaFutures { } for (_ <- 1 to 10) { - assertResult(PullResult.EmptyQueue)(consumer.pull().unsafeRunSync()) - } - } - } - } - } - - test("streaming consumer") { - val c = createConfig() - import c._ - - RabbitMQConnection.fromConfig[Task](config, ex).withResource { rabbitConnection => - val count = Random.nextInt(50000) + 50000 // random 50 - 100k messages - - logger.info(s"Sending $count messages") - - val latch = new CountDownLatch(count + 10000) // explanation below - - val d = new AtomicInteger(0) - - rabbitConnection.newStreamingConsumer[Bytes]("testingStreaming", Monitor.noOp()).withResource { cons => - val stream = cons.deliveryStream - .mapAsyncUnordered(50) { del => - Task.delay(d.incrementAndGet()).flatMap { n => - del.handle { - latch.countDown() - - if (n <= (count - 10000) || n > count) Ack - else { - if (n <= (count - 5000)) Retry else Republish() - } - - // ^ example: 100000 messages in total => 6500 * Ack, 5000 * Retry, 5000 * Republish => processing 110000 (== +10000) messages in total - } - } - } - - rabbitConnection.newProducer[Bytes]("testing", Monitor.noOp()).withResource { sender => - for (_ <- 1 to count) { - sender.send("test", Bytes.copyFromUtf8(Random.nextString(10))).await - } - - // it takes some time before the stats appear... :-| - eventually(timeout(Span(50, Seconds)), interval(Span(1, Seconds))) { - assertResult(count)(testHelper.queue.getPublishedCount(queueName1)) - } - - sched.execute(() => stream.compile.drain.runSyncUnsafe()) // run the stream - - eventually(timeout(Span(4, Minutes)), interval(Span(1, Seconds))) { - println("D: " + d.get()) - assertResult(count + 10000)(d.get()) - println("LATCH: " + latch.getCount) - assertResult(true)(latch.await(1000, TimeUnit.MILLISECONDS)) - assertResult(0)(testHelper.queue.getMessagesCount(queueName1)) - } - } - } - } - } - - test("streaming consumers to single queue") { - val c = createConfig() - import c._ - - RabbitMQConnection.fromConfig[Task](config, ex).withResource { rabbitConnection => - val count = Random.nextInt(10000) + 10000 // random 10k - 20k messages - - logger.info(s"Sending $count messages") - - val latch = new CountDownLatch(count) - - def toStream(cons1: RabbitMQStreamingConsumer[Task, Bytes], count: Int, d: AtomicInteger): fs2.Stream[Task, StreamedResult] = { - cons1.deliveryStream - .mapAsyncUnordered(20) { del => - Task.delay(d.incrementAndGet()).flatMap { n => - Task.sleep((if (n % 500 == 0) Random.nextInt(100) else 0).millis) >> // random slowdown 0-100 ms for every 500th message - del.handle { - latch.countDown() - - Ack - } - } - } - } - - rabbitConnection.newStreamingConsumer[Bytes]("testingStreaming", Monitor.noOp()).withResource { cons1 => - rabbitConnection.newStreamingConsumer[Bytes]("testingStreaming", Monitor.noOp()).withResource { cons2 => - val d1 = new AtomicInteger(0) - val d2 = new AtomicInteger(0) - - val stream1 = toStream(cons1, count, d1) - val stream2 = toStream(cons2, count, d2) - - rabbitConnection.newProducer[Bytes]("testing", Monitor.noOp()).withResource { sender => - for (_ <- 1 to count) { - sender.send("test", Bytes.copyFromUtf8(Random.nextString(10))).await - } - - // it takes some time before the stats appear... :-| - eventually(timeout(Span(50, Seconds)), interval(Span(1, Seconds))) { - assertResult(count)(testHelper.queue.getPublishedCount(queueName1)) - } - - sched.execute(() => stream1.compile.drain.runSyncUnsafe()) // run the stream - sched.execute(() => stream2.compile.drain.runSyncUnsafe()) // run the stream - - eventually(timeout(Span(5, Minutes)), interval(Span(1, Seconds))) { - println(s"D: ${d1.get}/${d2.get()}") - assertResult(count)(d1.get() + d2.get()) - assert(d1.get() > 0) - assert(d2.get() > 0) - println("LATCH: " + latch.getCount) - assertResult(true)(latch.await(1000, TimeUnit.MILLISECONDS)) - assertResult(0)(testHelper.queue.getMessagesCount(queueName1)) - } - } - } - } - } - } - - test("streaming consumer stream can be manually restarted") { - for (_ <- 1 to 5) { - val c = createConfig() - import c._ - - RabbitMQConnection.fromConfig[Task](config, ex).withResource { rabbitConnection => - val count = Random.nextInt(50000) + 50000 // random 50k - 100k messages - - val nth = 150 - - logger.info(s"Sending $count messages") - - val d = new AtomicInteger(0) - - rabbitConnection.newStreamingConsumer[Bytes]("testingStreaming", Monitor.noOp()).withResource { cons => - def stream: fs2.Stream[Task, StreamedResult] = - cons.deliveryStream - .evalMap { del => - Task - .delay(d.incrementAndGet()) - .flatMap { n => - if (n % nth != 0) del.handle(Ack) - else { - Task.raiseError(new RuntimeException(s"My failure $n")) - } - // ^^ cause failure for every nth message - } - } - .handleErrorWith { e => - logger.info(s"Stream has failed: ${e.getMessage}") - stream - } - - rabbitConnection.newProducer[Bytes]("testing", Monitor.noOp()).withResource { sender => - for (_ <- 1 to count) { - sender.send("test", Bytes.copyFromUtf8(Random.nextString(10))).await - } - - // it takes some time before the stats appear... :-| - eventually(timeout(Span(50, Seconds)), interval(Span(0.5, Seconds))) { - assertResult(count)(testHelper.queue.getPublishedCount(queueName1)) - } - - sched.execute(() => stream.compile.drain.runSyncUnsafe()) // run the stream - - eventually(timeout(Span(5, Minutes)), interval(Span(1, Seconds))) { - println("D: " + d.get()) - assert(d.get() > count) // can't say exact number, number of redeliveries is unpredictable - assertResult(0)(testHelper.queue.getMessagesCount(queueName1)) - } - } - } - } - } - } - - test("streaming consumer timeouts") { - val c = createConfig() - import c._ - - RabbitMQConnection.fromConfig[Task](config, ex).withResource { rabbitConnection => - val count = 100 - - logger.info(s"Sending $count messages") - - val d = new AtomicInteger(0) - - rabbitConnection.newStreamingConsumer[Bytes]("testingStreamingWithTimeout", Monitor.noOp()).withResource { cons => - val stream = cons.deliveryStream - .mapAsyncUnordered(50) { del => - Task.delay(d.incrementAndGet()) >> - del - .handle(Ack) - .delayExecution(800.millis) - } - - rabbitConnection.newProducer[Bytes]("testing", Monitor.noOp()).withResource { sender => - for (_ <- 1 to count) { - sender.send("test", Bytes.copyFromUtf8(Random.nextString(10))).await - } - - // it takes some time before the stats appear... :-| - eventually(timeout(Span(50, Seconds)), interval(Span(1, Seconds))) { - assertResult(count)(testHelper.queue.getPublishedCount(queueName1)) - } - - sched.execute(() => stream.compile.drain.runSyncUnsafe()) // run the stream - - eventually(timeout(Span(20, Seconds)), interval(Span(1, Seconds))) { - println("D: " + d.get()) - assert(d.get() > count + 200) // more than sent messages - assert(testHelper.exchange.getPublishedCount(exchange5) > 0) + assertResult(PullResult.EmptyQueue)(consumer.pull().await) } } } @@ -682,7 +354,7 @@ class LiveTest extends TestBase with ScalaFutures { implicit val conv: DeliveryConverter[Abc] = JsonDeliveryConverter.derive[Abc]() - implicit val cs: ContextShift[IO] = IO.contextShift(ExecutionContext.global) + implicit val cs: ContextShift[IO] = IO.contextShift(TestBase.testBlockingScheduler) implicit val timer: Timer[IO] = IO.timer(ExecutionContext.global) RabbitMQConnection.fromConfig[IO](config, ex).withResource { rabbitConnection => @@ -709,7 +381,7 @@ class LiveTest extends TestBase with ScalaFutures { cons.withResource { _ => rabbitConnection.newProducer[Bytes]("testing", Monitor.noOp()).withResource { sender => - sender.send("test", Bytes.copyFromUtf8(randomString(10))).unsafeRunSync() + sender.send("test", Bytes.copyFromUtf8(randomString(10))).await eventually(timeout = timeout(Span(5, Seconds))) { assertResult(0)(testHelper.queue.getMessagesCount(queueName1)) @@ -755,4 +427,38 @@ class LiveTest extends TestBase with ScalaFutures { } } } + + test("propagates correlation ID") { + val c = createConfig() + import c._ + + val messageCount = 10 + + RabbitMQConnection.fromConfig[Task](config, ex).withResource { rabbitConnection => + val processed = new AtomicInteger(0) + + val cons = rabbitConnection.newConsumer[Bytes]("testing", Monitor.noOp()) { + case Delivery.Ok(body, properties, _) => + assertResult(Some(body.toStringUtf8))(properties.correlationId) + processed.incrementAndGet() + Task.now(DeliveryResult.Ack) + + case _ => fail("malformed") + } + + cons.withResource { _ => + rabbitConnection.newProducer[Bytes]("testing", Monitor.noOp()).withResource { sender => + for (i <- 1 to messageCount) { + implicit val cidStrategy: CorrelationIdStrategy = CorrelationIdStrategy.Fixed(s"cid$i") + sender.send("test", Bytes.copyFromUtf8(s"cid$i")).await + } + + eventually { + assertResult(messageCount)(processed.get()) + assertResult(0)(testHelper.queue.getMessagesCount(queueName1)) + } + } + } + } + } } diff --git a/core/src/test/scala/com/avast/clients/rabbitmq/DefaultRabbitMQConsumerTest.scala b/core/src/test/scala/com/avast/clients/rabbitmq/DefaultRabbitMQConsumerTest.scala index 4da414f2..e3cc7d1b 100644 --- a/core/src/test/scala/com/avast/clients/rabbitmq/DefaultRabbitMQConsumerTest.scala +++ b/core/src/test/scala/com/avast/clients/rabbitmq/DefaultRabbitMQConsumerTest.scala @@ -1,23 +1,27 @@ package com.avast.clients.rabbitmq -import java.time.Duration -import java.util.UUID - +import com.avast.bytes.Bytes +import com.avast.clients.rabbitmq.DefaultRabbitMQConsumer.CorrelationIdHeaderName import com.avast.clients.rabbitmq.RabbitMQConnection.DefaultListeners import com.avast.clients.rabbitmq.api.DeliveryResult -import com.avast.metrics.scalaapi._ +import com.avast.clients.rabbitmq.api.DeliveryResult.Republish +import com.avast.clients.rabbitmq.logging.ImplicitContextLogger +import com.avast.metrics.scalaeffectapi._ import com.rabbitmq.client.AMQP.BasicProperties import com.rabbitmq.client.Envelope import com.rabbitmq.client.impl.recovery.AutorecoveringChannel import monix.eval.Task import monix.execution.Scheduler.Implicits.global -import org.mockito.{ArgumentCaptor, Matchers} import org.mockito.Mockito._ +import org.mockito.{ArgumentCaptor, Matchers} import org.scalatest.time.{Seconds, Span} +import org.slf4j.event.Level -import scala.jdk.CollectionConverters._ +import java.time.Duration +import java.util.UUID import scala.collection.immutable -import scala.concurrent.{ExecutionContext, Future} +import scala.concurrent.duration.{DurationInt, Duration => ScalaDuration} +import scala.jdk.CollectionConverters._ import scala.util._ class DefaultRabbitMQConsumerTest extends TestBase { @@ -32,23 +36,12 @@ class DefaultRabbitMQConsumerTest extends TestBase { val envelope = mock[Envelope] when(envelope.getDeliveryTag).thenReturn(deliveryTag) - val properties = mock[BasicProperties] - when(properties.getMessageId).thenReturn(messageId) + val properties = new BasicProperties.Builder().messageId(messageId).build() val channel = mock[AutorecoveringChannel] when(channel.isOpen).thenReturn(true) - val consumer = new DefaultRabbitMQConsumer[Task]( - "test", - channel, - "queueName", - connectionInfo, - Monitor.noOp, - DeliveryResult.Reject, - DefaultListeners.DefaultConsumerListener, - RepublishStrategy.DefaultExchange, - TestBase.testBlocker - )({ delivery => + val consumer = newConsumer(channel)({ delivery => assertResult(Some(messageId))(delivery.properties.messageId) Task.now(DeliveryResult.Ack) @@ -73,23 +66,12 @@ class DefaultRabbitMQConsumerTest extends TestBase { val envelope = mock[Envelope] when(envelope.getDeliveryTag).thenReturn(deliveryTag) - val properties = mock[BasicProperties] - when(properties.getMessageId).thenReturn(messageId) + val properties = new BasicProperties.Builder().messageId(messageId).build() val channel = mock[AutorecoveringChannel] when(channel.isOpen).thenReturn(true) - val consumer = new DefaultRabbitMQConsumer[Task]( - "test", - channel, - "queueName", - connectionInfo, - Monitor.noOp, - DeliveryResult.Reject, - DefaultListeners.DefaultConsumerListener, - RepublishStrategy.DefaultExchange, - TestBase.testBlocker - )({ delivery => + val consumer = newConsumer(channel)({ delivery => assertResult(Some(messageId))(delivery.properties.messageId) Task.now(DeliveryResult.Retry) @@ -114,23 +96,12 @@ class DefaultRabbitMQConsumerTest extends TestBase { val envelope = mock[Envelope] when(envelope.getDeliveryTag).thenReturn(deliveryTag) - val properties = mock[BasicProperties] - when(properties.getMessageId).thenReturn(messageId) + val properties = new BasicProperties.Builder().messageId(messageId).build() val channel = mock[AutorecoveringChannel] when(channel.isOpen).thenReturn(true) - val consumer = new DefaultRabbitMQConsumer[Task]( - "test", - channel, - "queueName", - connectionInfo, - Monitor.noOp, - DeliveryResult.Reject, - DefaultListeners.DefaultConsumerListener, - RepublishStrategy.DefaultExchange, - TestBase.testBlocker - )({ delivery => + val consumer = newConsumer(channel)({ delivery => assertResult(Some(messageId))(delivery.properties.messageId) Task.now(DeliveryResult.Reject) @@ -161,17 +132,7 @@ class DefaultRabbitMQConsumerTest extends TestBase { val channel = mock[AutorecoveringChannel] when(channel.isOpen).thenReturn(true) - val consumer = new DefaultRabbitMQConsumer[Task]( - "test", - channel, - "queueName", - connectionInfo, - Monitor.noOp, - DeliveryResult.Reject, - DefaultListeners.DefaultConsumerListener, - RepublishStrategy.DefaultExchange, - TestBase.testBlocker - )({ delivery => + val consumer = newConsumer(channel)({ delivery => assertResult(Some(messageId))(delivery.properties.messageId) Task.now(DeliveryResult.Republish()) @@ -198,23 +159,12 @@ class DefaultRabbitMQConsumerTest extends TestBase { val envelope = mock[Envelope] when(envelope.getDeliveryTag).thenReturn(deliveryTag) - val properties = mock[BasicProperties] - when(properties.getMessageId).thenReturn(messageId) + val properties = new BasicProperties.Builder().messageId(messageId).build() val channel = mock[AutorecoveringChannel] when(channel.isOpen).thenReturn(true) - val consumer = new DefaultRabbitMQConsumer[Task]( - "test", - channel, - "queueName", - connectionInfo, - Monitor.noOp, - DeliveryResult.Retry, - DefaultListeners.DefaultConsumerListener, - RepublishStrategy.DefaultExchange, - TestBase.testBlocker - )({ delivery => + val consumer = newConsumer(channel, failureAction = DeliveryResult.Retry)({ delivery => assertResult(Some(messageId))(delivery.properties.messageId) Task.raiseError(new RuntimeException) @@ -236,23 +186,12 @@ class DefaultRabbitMQConsumerTest extends TestBase { val envelope = mock[Envelope] when(envelope.getDeliveryTag).thenReturn(deliveryTag) - val properties = mock[BasicProperties] - when(properties.getMessageId).thenReturn(messageId) + val properties = new BasicProperties.Builder().messageId(messageId).build() val channel = mock[AutorecoveringChannel] when(channel.isOpen).thenReturn(true) - val consumer = new DefaultRabbitMQConsumer[Task]( - "test", - channel, - "queueName", - connectionInfo, - Monitor.noOp, - DeliveryResult.Retry, - DefaultListeners.DefaultConsumerListener, - RepublishStrategy.DefaultExchange, - TestBase.testBlocker - )({ delivery => + val consumer = newConsumer(channel, failureAction = DeliveryResult.Retry)({ delivery => assertResult(Some(messageId))(delivery.properties.messageId) throw new RuntimeException @@ -274,44 +213,45 @@ class DefaultRabbitMQConsumerTest extends TestBase { val envelope = mock[Envelope] when(envelope.getDeliveryTag).thenReturn(deliveryTag) - val properties = mock[BasicProperties] - when(properties.getMessageId).thenReturn(messageId) + val properties = new BasicProperties.Builder().messageId(messageId).build() val channel = mock[AutorecoveringChannel] when(channel.isOpen).thenReturn(true) - val monitor = mock[Monitor] - when(monitor.meter(Matchers.anyString())).thenReturn(Monitor.noOp.meter("")) - when(monitor.named(Matchers.eq("results"))).thenReturn(Monitor.noOp()) - val tasksMonitor = mock[Monitor] + val monitor = mock[Monitor[Task]] + when(monitor.meter(Matchers.anyString())).thenReturn(Monitor.noOp[Task]().meter("")) + when(monitor.named(Matchers.eq("results"))).thenReturn(Monitor.noOp[Task]()) + val tasksMonitor = mock[Monitor[Task]] when(monitor.named(Matchers.eq("tasks"))).thenReturn(tasksMonitor) - when(tasksMonitor.gauge(Matchers.anyString())(Matchers.any())) - .thenReturn(Monitor.noOp().gauge("")(() => 0).asInstanceOf[Gauge[Nothing]]) + when(tasksMonitor.gauge).thenReturn(new GaugeFactory[Task] { + override def settableLong(n: String, replaceExisting: Boolean): SettableGauge[Task, Long] = new SettableGauge[Task, Long] { + override def set(value: Long): Task[Unit] = fail("Should have not be called") + override def update(f: Long => Long): Task[Long] = fail("Should have not be called") + override def inc: Task[Long] = Task.now(42) // returned value is not used anywhere + override def dec: Task[Long] = Task.now(42) // returned value is not used anywhere + override def value: Task[Long] = fail("Should have not be called") + override def name: String = n + } + override def settableDouble(name: String, replaceExisting: Boolean): SettableGauge[Task, Double] = fail("Should have not be called") + override def generic[T](name: String, replaceExisting: Boolean)(gauge: () => T): Gauge[Task, T] = fail("Should have not be called") + }) var successLengths = Seq.newBuilder[Long] // scalastyle:ignore var failuresLengths = Seq.newBuilder[Long] // scalastyle:ignore - when(tasksMonitor.timerPair(Matchers.eq("processed"))).thenReturn(new TimerPair { - override def update(duration: Duration): Unit = successLengths += duration.toMillis - override def updateFailure(duration: Duration): Unit = failuresLengths += duration.toMillis + when(tasksMonitor.timerPair(Matchers.eq("processed"))).thenReturn(new TimerPair[Task] { + override def update(duration: Duration): Task[Unit] = Task.delay(successLengths += duration.toMillis) + override def updateFailure(duration: Duration): Task[Unit] = Task.delay(failuresLengths += duration.toMillis) - override def start(): TimeContext = fail("Should have not be called") - override def time[A](block: => A): A = fail("Should have not be called") - override def time[A](future: => Future[A])(implicit ec: ExecutionContext): Future[A] = fail("Should have not be called") + override def update(duration: ScalaDuration): Task[Unit] = fail("Should have not be called") + override def updateFailure(duration: ScalaDuration): Task[Unit] = fail("Should have not be called") + override def start(): Task[TimerPairContext] = fail("Should have not be called") + override def time[T](action: Task[T]): Task[T] = fail("Should have not be called") + override def time[T](action: Task[T])(successCheck: T => Boolean): Task[T] = fail("Should have not be called") }) { - val consumer = new DefaultRabbitMQConsumer[Task]( - "test", - channel, - "queueName", - connectionInfo, - monitor, - DeliveryResult.Retry, - DefaultListeners.DefaultConsumerListener, - RepublishStrategy.DefaultExchange, - TestBase.testBlocker - )({ delivery => + val consumer = newConsumer(channel, DeliveryResult.Retry, monitor)({ delivery => assertResult(Some(messageId))(delivery.properties.messageId) Task.now(DeliveryResult.Ack) // immediate }) @@ -330,17 +270,7 @@ class DefaultRabbitMQConsumerTest extends TestBase { failuresLengths = Seq.newBuilder { - val consumer = new DefaultRabbitMQConsumer[Task]( - "test", - channel, - "queueName", - connectionInfo, - monitor, - DeliveryResult.Retry, - DefaultListeners.DefaultConsumerListener, - RepublishStrategy.DefaultExchange, - TestBase.testBlocker - )({ delivery => + val consumer = newConsumer(channel, DeliveryResult.Retry, monitor)({ delivery => assertResult(Some(messageId))(delivery.properties.messageId) import scala.concurrent.duration._ Task.now(DeliveryResult.Ack).delayResult(2.second) @@ -365,44 +295,45 @@ class DefaultRabbitMQConsumerTest extends TestBase { val envelope = mock[Envelope] when(envelope.getDeliveryTag).thenReturn(deliveryTag) - val properties = mock[BasicProperties] - when(properties.getMessageId).thenReturn(messageId) + val properties = new BasicProperties.Builder().messageId(messageId).build() val channel = mock[AutorecoveringChannel] when(channel.isOpen).thenReturn(true) - val monitor = mock[Monitor] - when(monitor.meter(Matchers.anyString())).thenReturn(Monitor.noOp.meter("")) - when(monitor.named(Matchers.eq("results"))).thenReturn(Monitor.noOp()) - val tasksMonitor = mock[Monitor] + val monitor = mock[Monitor[Task]] + when(monitor.meter(Matchers.anyString())).thenReturn(Monitor.noOp[Task]().meter("")) + when(monitor.named(Matchers.eq("results"))).thenReturn(Monitor.noOp[Task]()) + val tasksMonitor = mock[Monitor[Task]] when(monitor.named(Matchers.eq("tasks"))).thenReturn(tasksMonitor) - when(tasksMonitor.gauge(Matchers.anyString())(Matchers.any())) - .thenReturn(Monitor.noOp().gauge("")(() => 0).asInstanceOf[Gauge[Nothing]]) + when(tasksMonitor.gauge).thenReturn(new GaugeFactory[Task] { + override def settableLong(n: String, replaceExisting: Boolean): SettableGauge[Task, Long] = new SettableGauge[Task, Long] { + override def set(value: Long): Task[Unit] = fail("Should have not be called") + override def update(f: Long => Long): Task[Long] = fail("Should have not be called") + override def inc: Task[Long] = Task.now(42) // returned value is not used anywhere + override def dec: Task[Long] = Task.now(42) // returned value is not used anywhere + override def value: Task[Long] = fail("Should have not be called") + override def name: String = n + } + override def settableDouble(name: String, replaceExisting: Boolean): SettableGauge[Task, Double] = fail("Should have not be called") + override def generic[T](name: String, replaceExisting: Boolean)(gauge: () => T): Gauge[Task, T] = fail("Should have not be called") + }) var successLengths = Seq.newBuilder[Long] // scalastyle:ignore var failuresLengths = Seq.newBuilder[Long] // scalastyle:ignore - when(tasksMonitor.timerPair(Matchers.eq("processed"))).thenReturn(new TimerPair { - override def update(duration: Duration): Unit = successLengths += duration.toMillis - override def updateFailure(duration: Duration): Unit = failuresLengths += duration.toMillis + when(tasksMonitor.timerPair(Matchers.eq("processed"))).thenReturn(new TimerPair[Task] { + override def update(duration: Duration): Task[Unit] = Task.delay(successLengths += duration.toMillis) + override def updateFailure(duration: Duration): Task[Unit] = Task.delay(failuresLengths += duration.toMillis) - override def start(): TimeContext = fail("Should have not be called") - override def time[A](block: => A): A = fail("Should have not be called") - override def time[A](future: => Future[A])(implicit ec: ExecutionContext): Future[A] = fail("Should have not be called") + override def update(duration: ScalaDuration): Task[Unit] = fail("Should have not be called") + override def updateFailure(duration: ScalaDuration): Task[Unit] = fail("Should have not be called") + override def start(): Task[TimerPairContext] = fail("Should have not be called") + override def time[T](action: Task[T]): Task[T] = fail("Should have not be called") + override def time[T](action: Task[T])(successCheck: T => Boolean): Task[T] = fail("Should have not be called") }) { - val consumer = new DefaultRabbitMQConsumer[Task]( - "test", - channel, - "queueName", - connectionInfo, - monitor, - DeliveryResult.Retry, - DefaultListeners.DefaultConsumerListener, - RepublishStrategy.DefaultExchange, - TestBase.testBlocker - )({ delivery => + val consumer = newConsumer(channel, DeliveryResult.Retry, monitor)({ delivery => assertResult(Some(messageId))(delivery.properties.messageId) Task.raiseError(new RuntimeException) // immediate }) @@ -421,20 +352,10 @@ class DefaultRabbitMQConsumerTest extends TestBase { failuresLengths = Seq.newBuilder { - val consumer = new DefaultRabbitMQConsumer[Task]( - "test", - channel, - "queueName", - connectionInfo, - monitor, - DeliveryResult.Retry, - DefaultListeners.DefaultConsumerListener, - RepublishStrategy.DefaultExchange, - TestBase.testBlocker - )({ delivery => + val consumer = newConsumer(channel, DeliveryResult.Retry, monitor)({ delivery => assertResult(Some(messageId))(delivery.properties.messageId) import scala.concurrent.duration._ - Task.raiseError(new RuntimeException).delayExecution(2.second) + Task.raiseError(new RuntimeException("my exception")).delayExecution(2.second) }) consumer.handleDelivery("abcd", envelope, properties, Random.nextString(5).getBytes) @@ -448,4 +369,101 @@ class DefaultRabbitMQConsumerTest extends TestBase { } } + test("passes correlation id") { + val messageId = UUID.randomUUID().toString + val correlationId = UUID.randomUUID().toString + + val deliveryTag = Random.nextInt(1000) + + val envelope = mock[Envelope] + when(envelope.getDeliveryTag).thenReturn(deliveryTag) + + val properties = new BasicProperties.Builder().messageId(messageId).correlationId(correlationId).build() + + val channel = mock[AutorecoveringChannel] + when(channel.isOpen).thenReturn(true) + + val consumer = newConsumer(channel, failureAction = DeliveryResult.Reject)({ delivery => + assertResult(Some(messageId))(delivery.properties.messageId) + assertResult(Some(correlationId))(delivery.properties.correlationId) + + Task.now(DeliveryResult.Ack) + }) + + val body = Random.nextString(5).getBytes + consumer.handleDelivery("abcd", envelope, properties, body) + + eventually(timeout(Span(1, Seconds)), interval(Span(0.1, Seconds))) { + verify(channel, times(1)).basicAck(deliveryTag, false) + verify(channel, times(0)).basicReject(deliveryTag, false) + } + } + + test("parses correlation id from header") { + val messageId = UUID.randomUUID().toString + val correlationId = UUID.randomUUID().toString + + val deliveryTag = Random.nextInt(1000) + + val envelope = mock[Envelope] + when(envelope.getDeliveryTag).thenReturn(deliveryTag) + + val properties = new BasicProperties.Builder() + .messageId(messageId) + .headers(Map(CorrelationIdHeaderName -> correlationId.asInstanceOf[AnyRef]).asJava) + .build() + + val channel = mock[AutorecoveringChannel] + when(channel.isOpen).thenReturn(true) + + val consumer = newConsumer(channel, failureAction = DeliveryResult.Reject) { delivery => + assertResult(Some(messageId))(delivery.properties.messageId) + assertResult(Some(correlationId))(delivery.properties.correlationId) + + Task.now(DeliveryResult.Ack) + } + + val body = Random.nextString(5).getBytes + consumer.handleDelivery("abcd", envelope, properties, body) + + eventually(timeout(Span(1, Seconds)), interval(Span(0.1, Seconds))) { + verify(channel, times(1)).basicAck(deliveryTag, false) + verify(channel, times(0)).basicReject(deliveryTag, false) + } + } + + private def newConsumer(channel: ServerChannel, failureAction: DeliveryResult = Republish(), monitor: Monitor[Task] = Monitor.noOp())( + userAction: DeliveryReadAction[Task, Bytes]): DefaultRabbitMQConsumer[Task, Bytes] = { + val base = new ConsumerBase[Task, Bytes]( + "test", + "queueName", + TestBase.testBlocker, + ImplicitContextLogger.createLogger, + monitor + ) + + val channelOps = new ConsumerChannelOps[Task, Bytes]( + "test", + "queueName", + channel, + TestBase.testBlocker, + RepublishStrategy.DefaultExchange[Task](), + PMH, + connectionInfo, + ImplicitContextLogger.createLogger, + monitor + ) + + new DefaultRabbitMQConsumer[Task, Bytes]( + base, + channelOps, + 10.seconds, + DeliveryResult.Republish(), + Level.ERROR, + failureAction, + DefaultListeners.defaultConsumerListener, + )(userAction) + } + + object PMH extends LoggingPoisonedMessageHandler[Task, Bytes](3) } diff --git a/core/src/test/scala/com/avast/clients/rabbitmq/DefaultRabbitMQProducerTest.scala b/core/src/test/scala/com/avast/clients/rabbitmq/DefaultRabbitMQProducerTest.scala index 4dce95d9..7f9c2cc3 100644 --- a/core/src/test/scala/com/avast/clients/rabbitmq/DefaultRabbitMQProducerTest.scala +++ b/core/src/test/scala/com/avast/clients/rabbitmq/DefaultRabbitMQProducerTest.scala @@ -1,12 +1,12 @@ package com.avast.clients.rabbitmq import com.avast.bytes.Bytes -import com.avast.clients.rabbitmq.api.MessageProperties -import com.avast.metrics.scalaapi.Monitor +import com.avast.clients.rabbitmq.api.{CorrelationIdStrategy, MessageProperties} +import com.avast.clients.rabbitmq.logging.ImplicitContextLogger +import com.avast.metrics.scalaeffectapi.Monitor import com.rabbitmq.client.AMQP import com.rabbitmq.client.impl.recovery.AutorecoveringChannel import monix.eval.Task -import monix.execution.Scheduler import monix.execution.Scheduler.Implicits.global import org.mockito.Mockito._ import org.mockito.{ArgumentCaptor, Matchers} @@ -15,6 +15,9 @@ import scala.util.Random class DefaultRabbitMQProducerTest extends TestBase { + // default, is overridden in some tests + implicit val cidStrategy: CorrelationIdStrategy = CorrelationIdStrategy.RandomNew + test("basic") { val exchangeName = Random.nextString(10) val routingKey = Random.nextString(10) @@ -25,10 +28,11 @@ class DefaultRabbitMQProducerTest extends TestBase { name = "test", exchangeName = exchangeName, channel = channel, - monitor = Monitor.noOp, + monitor = Monitor.noOp(), defaultProperties = MessageProperties.empty, reportUnroutable = false, - blocker = TestBase.testBlocker + blocker = TestBase.testBlocker, + logger = ImplicitContextLogger.createLogger ) val properties = new AMQP.BasicProperties.Builder() @@ -46,8 +50,123 @@ class DefaultRabbitMQProducerTest extends TestBase { captor.capture(), Matchers.eq(body.toByteArray)) - assertResult(properties.toString)(captor.getValue.toString) // AMQP.BasicProperties doesn't have `equals` method :-/ + val caughtProperties = captor.getValue + + assert(caughtProperties.getCorrelationId != null) + + val caughtWithoutIds = caughtProperties.builder().messageId(null).correlationId(null).build() + + assertResult(properties.toString)(caughtWithoutIds.toString) // AMQP.BasicProperties doesn't have `equals` method :-/ } - // todo add more tests + test("correlation id is taken from properties") { + val exchangeName = Random.nextString(10) + val routingKey = Random.nextString(10) + + val channel = mock[AutorecoveringChannel] + + val producer = new DefaultRabbitMQProducer[Task, Bytes]( + name = "test", + exchangeName = exchangeName, + channel = channel, + monitor = Monitor.noOp(), + defaultProperties = MessageProperties.empty, + reportUnroutable = false, + blocker = TestBase.testBlocker, + logger = ImplicitContextLogger.createLogger + ) + + val cid = Random.nextString(10) + val cid2 = Random.nextString(10) + + val body = Bytes.copyFromUtf8(Random.nextString(10)) + + val mp = Some( + MessageProperties(correlationId = Some(cid), headers = Map(CorrelationIdStrategy.CorrelationIdKeyName -> cid2.asInstanceOf[AnyRef])) + ) + + implicit val cidStrategy: CorrelationIdStrategy = CorrelationIdStrategy.FromPropertiesOrRandomNew(mp) + + producer.send(routingKey, body, mp).await + + val captor = ArgumentCaptor.forClass(classOf[AMQP.BasicProperties]) + + verify(channel, times(1)).basicPublish(Matchers.eq(exchangeName), + Matchers.eq(routingKey), + captor.capture(), + Matchers.eq(body.toByteArray)) + + // check that the one from properties was used + assertResult(cid)(captor.getValue.getCorrelationId) + } + + test("correlation id is taken from header if not in properties") { + val exchangeName = Random.nextString(10) + val routingKey = Random.nextString(10) + + val channel = mock[AutorecoveringChannel] + + val producer = new DefaultRabbitMQProducer[Task, Bytes]( + name = "test", + exchangeName = exchangeName, + channel = channel, + monitor = Monitor.noOp(), + defaultProperties = MessageProperties.empty, + reportUnroutable = false, + blocker = TestBase.testBlocker, + logger = ImplicitContextLogger.createLogger + ) + + val cid = Random.nextString(10) + + val body = Bytes.copyFromUtf8(Random.nextString(10)) + + val mp = Some(MessageProperties(headers = Map(CorrelationIdStrategy.CorrelationIdKeyName -> cid.asInstanceOf[AnyRef]))) + + implicit val cidStrategy: CorrelationIdStrategy = CorrelationIdStrategy.FromPropertiesOrRandomNew(mp) + + producer.send(routingKey, body, mp).await + + val captor = ArgumentCaptor.forClass(classOf[AMQP.BasicProperties]) + + verify(channel, times(1)).basicPublish(Matchers.eq(exchangeName), + Matchers.eq(routingKey), + captor.capture(), + Matchers.eq(body.toByteArray)) + + // check that the one from headers was used + assertResult(cid)(captor.getValue.getCorrelationId) + } + + test("correlation id is generated if not in header nor properties") { + val exchangeName = Random.nextString(10) + val routingKey = Random.nextString(10) + + val channel = mock[AutorecoveringChannel] + + val producer = new DefaultRabbitMQProducer[Task, Bytes]( + name = "test", + exchangeName = exchangeName, + channel = channel, + monitor = Monitor.noOp(), + defaultProperties = MessageProperties.empty, + reportUnroutable = false, + blocker = TestBase.testBlocker, + logger = ImplicitContextLogger.createLogger + ) + + val body = Bytes.copyFromUtf8(Random.nextString(10)) + + producer.send(routingKey, body).await + + val captor = ArgumentCaptor.forClass(classOf[AMQP.BasicProperties]) + + verify(channel, times(1)).basicPublish(Matchers.eq(exchangeName), + Matchers.eq(routingKey), + captor.capture(), + Matchers.eq(body.toByteArray)) + + // check that some CID was generated + assert(captor.getValue.getCorrelationId != null) + } } diff --git a/core/src/test/scala/com/avast/clients/rabbitmq/DefaultRabbitMQPullConsumerTest.scala b/core/src/test/scala/com/avast/clients/rabbitmq/DefaultRabbitMQPullConsumerTest.scala index 4c2d0e6f..5ecac720 100644 --- a/core/src/test/scala/com/avast/clients/rabbitmq/DefaultRabbitMQPullConsumerTest.scala +++ b/core/src/test/scala/com/avast/clients/rabbitmq/DefaultRabbitMQPullConsumerTest.scala @@ -1,10 +1,10 @@ package com.avast.clients.rabbitmq -import java.util.UUID - import com.avast.bytes.Bytes +import com.avast.clients.rabbitmq.DefaultRabbitMQConsumer.CorrelationIdHeaderName import com.avast.clients.rabbitmq.api._ -import com.avast.metrics.scalaapi.Monitor +import com.avast.clients.rabbitmq.logging.ImplicitContextLogger +import com.avast.metrics.scalaeffectapi.Monitor import com.rabbitmq.client.AMQP.BasicProperties import com.rabbitmq.client.impl.recovery.AutorecoveringChannel import com.rabbitmq.client.{Envelope, GetResponse} @@ -14,8 +14,9 @@ import org.mockito.Mockito._ import org.mockito.{ArgumentCaptor, Matchers} import org.scalatest.time.{Seconds, Span} -import scala.jdk.CollectionConverters._ +import java.util.UUID import scala.collection.immutable +import scala.jdk.CollectionConverters._ import scala.util.Random class DefaultRabbitMQPullConsumerTest extends TestBase { @@ -30,8 +31,7 @@ class DefaultRabbitMQPullConsumerTest extends TestBase { val envelope = mock[Envelope] when(envelope.getDeliveryTag).thenReturn(deliveryTag) - val properties = mock[BasicProperties] - when(properties.getMessageId).thenReturn(messageId) + val properties = new BasicProperties.Builder().messageId(messageId).build() val channel = mock[AutorecoveringChannel] when(channel.isOpen).thenReturn(true) @@ -42,16 +42,7 @@ class DefaultRabbitMQPullConsumerTest extends TestBase { new GetResponse(envelope, properties, body, 1) ) - val consumer = new DefaultRabbitMQPullConsumer[Task, Bytes]( - "test", - channel, - "queueName", - connectionInfo, - DeliveryResult.Reject, - Monitor.noOp, - RepublishStrategy.DefaultExchange, - TestBase.testBlocker - ) + val consumer = newConsumer[Bytes](channel) val PullResult.Ok(dwh) = consumer.pull().await @@ -75,8 +66,7 @@ class DefaultRabbitMQPullConsumerTest extends TestBase { val envelope = mock[Envelope] when(envelope.getDeliveryTag).thenReturn(deliveryTag) - val properties = mock[BasicProperties] - when(properties.getMessageId).thenReturn(messageId) + val properties = new BasicProperties.Builder().messageId(messageId).build() val channel = mock[AutorecoveringChannel] when(channel.isOpen).thenReturn(true) @@ -87,16 +77,7 @@ class DefaultRabbitMQPullConsumerTest extends TestBase { new GetResponse(envelope, properties, body, 1) ) - val consumer = new DefaultRabbitMQPullConsumer[Task, Bytes]( - "test", - channel, - "queueName", - connectionInfo, - DeliveryResult.Reject, - Monitor.noOp, - RepublishStrategy.DefaultExchange, - TestBase.testBlocker - ) + val consumer = newConsumer[Bytes](channel) val PullResult.Ok(dwh) = consumer.pull().await @@ -120,8 +101,7 @@ class DefaultRabbitMQPullConsumerTest extends TestBase { val envelope = mock[Envelope] when(envelope.getDeliveryTag).thenReturn(deliveryTag) - val properties = mock[BasicProperties] - when(properties.getMessageId).thenReturn(messageId) + val properties = new BasicProperties.Builder().messageId(messageId).build() val channel = mock[AutorecoveringChannel] when(channel.isOpen).thenReturn(true) @@ -132,16 +112,7 @@ class DefaultRabbitMQPullConsumerTest extends TestBase { new GetResponse(envelope, properties, body, 1) ) - val consumer = new DefaultRabbitMQPullConsumer[Task, Bytes]( - "test", - channel, - "queueName", - connectionInfo, - DeliveryResult.Ack, - Monitor.noOp, - RepublishStrategy.DefaultExchange, - TestBase.testBlocker - ) + val consumer = newConsumer[Bytes](channel) val PullResult.Ok(dwh) = consumer.pull().await @@ -177,16 +148,7 @@ class DefaultRabbitMQPullConsumerTest extends TestBase { new GetResponse(envelope, properties, body, 1) ) - val consumer = new DefaultRabbitMQPullConsumer[Task, Bytes]( - "test", - channel, - "queueName", - connectionInfo, - DeliveryResult.Reject, - Monitor.noOp, - RepublishStrategy.DefaultExchange, - TestBase.testBlocker - ) + val consumer = newConsumer[Bytes](channel) val PullResult.Ok(dwh) = consumer.pull().await @@ -204,7 +166,7 @@ class DefaultRabbitMQPullConsumerTest extends TestBase { } } - test("should NACK because of unexpected failure") { + test("should propagate conversion failure") { val messageId = UUID.randomUUID().toString val deliveryTag = Random.nextInt(1000) @@ -212,8 +174,7 @@ class DefaultRabbitMQPullConsumerTest extends TestBase { val envelope = mock[Envelope] when(envelope.getDeliveryTag).thenReturn(deliveryTag) - val properties = mock[BasicProperties] - when(properties.getMessageId).thenReturn(messageId) + val properties = new BasicProperties.Builder().messageId(messageId).build() val channel = mock[AutorecoveringChannel] when(channel.isOpen).thenReturn(true) @@ -230,28 +191,20 @@ class DefaultRabbitMQPullConsumerTest extends TestBase { throw new IllegalArgumentException } - val consumer = new DefaultRabbitMQPullConsumer[Task, Abc]( - "test", - channel, - "queueName", - connectionInfo, - DeliveryResult.Retry, - Monitor.noOp, - RepublishStrategy.DefaultExchange, - TestBase.testBlocker - ) - - assertThrows[IllegalArgumentException] { - consumer.pull().await - } + val consumer = newConsumer[Abc](channel) - eventually(timeout(Span(1, Seconds)), interval(Span(0.1, Seconds))) { - verify(channel, times(0)).basicAck(deliveryTag, false) - verify(channel, times(1)).basicReject(deliveryTag, true) + consumer.pull().await match { + case PullResult.Ok(dwh) => + dwh.delivery match { + case _: Delivery.Ok[Abc] => fail("the conversion should have failed") + case _: Delivery.MalformedContent => // ok + } + case PullResult.EmptyQueue => fail("empty response") } } - test("should NACK because of conversion failure") { + test("passes correlation id") { + val correlationId = UUID.randomUUID().toString val messageId = UUID.randomUUID().toString val deliveryTag = Random.nextInt(1000) @@ -259,8 +212,7 @@ class DefaultRabbitMQPullConsumerTest extends TestBase { val envelope = mock[Envelope] when(envelope.getDeliveryTag).thenReturn(deliveryTag) - val properties = mock[BasicProperties] - when(properties.getMessageId).thenReturn(messageId) + val properties = new BasicProperties.Builder().messageId(messageId).correlationId(correlationId).build() val channel = mock[AutorecoveringChannel] when(channel.isOpen).thenReturn(true) @@ -271,33 +223,68 @@ class DefaultRabbitMQPullConsumerTest extends TestBase { new GetResponse(envelope, properties, body, 1) ) - case class Abc(i: Int) + val consumer = newConsumer[Bytes](channel) - implicit val c: DeliveryConverter[Abc] = (_: Bytes) => { - Left(ConversionException(messageId)) - } + val PullResult.Ok(dwh) = consumer.pull().await - val consumer = new DefaultRabbitMQPullConsumer[Task, Abc]( - "test", - channel, - "queueName", - connectionInfo, - DeliveryResult.Ack, - Monitor.noOp, - RepublishStrategy.DefaultExchange, - TestBase.testBlocker + assertResult(Some(messageId))(dwh.delivery.properties.messageId) + assertResult(Some(correlationId))(dwh.delivery.properties.correlationId) + } + + test("parses correlation id from header") { + val correlationId = UUID.randomUUID().toString + val messageId = UUID.randomUUID().toString + + val deliveryTag = Random.nextInt(1000) + + val envelope = mock[Envelope] + when(envelope.getDeliveryTag).thenReturn(deliveryTag) + + val properties = new BasicProperties.Builder() + .messageId(messageId) + .headers(Map(CorrelationIdHeaderName -> correlationId.asInstanceOf[AnyRef]).asJava) + .build() + + val channel = mock[AutorecoveringChannel] + when(channel.isOpen).thenReturn(true) + + val body = Random.nextString(5).getBytes + + when(channel.basicGet(Matchers.eq("queueName"), Matchers.eq(false))).thenReturn( + new GetResponse(envelope, properties, body, 1) ) + val consumer = newConsumer[Bytes](channel) + val PullResult.Ok(dwh) = consumer.pull().await - val Delivery.MalformedContent(_, _, _, ce) = dwh.delivery - assertResult(messageId)(ce.getMessage) + assertResult(Some(messageId))(dwh.delivery.properties.messageId) + assertResult(Some(correlationId))(dwh.delivery.properties.correlationId) + } - dwh.handle(DeliveryResult.Retry).await + private def newConsumer[A: DeliveryConverter](channel: ServerChannel): DefaultRabbitMQPullConsumer[Task, A] = { + val base = new ConsumerBase[Task, A]( + "test", + "queueName", + TestBase.testBlocker, + ImplicitContextLogger.createLogger, + Monitor.noOp() + ) - eventually(timeout(Span(1, Seconds)), interval(Span(0.1, Seconds))) { - verify(channel, times(0)).basicAck(deliveryTag, false) - verify(channel, times(1)).basicReject(deliveryTag, true) - } + val channelOps = new ConsumerChannelOps[Task, A]( + "test", + "queueName", + channel, + TestBase.testBlocker, + RepublishStrategy.DefaultExchange[Task](), + new PMH, + connectionInfo, + ImplicitContextLogger.createLogger, + Monitor.noOp() + ) + + new DefaultRabbitMQPullConsumer[Task, A](base, channelOps) } + + class PMH[A] extends LoggingPoisonedMessageHandler[Task, A](3) } diff --git a/core/src/test/scala/com/avast/clients/rabbitmq/MultiFormatConsumerTest.scala b/core/src/test/scala/com/avast/clients/rabbitmq/MultiFormatConsumerTest.scala index 64e37865..64971ba1 100644 --- a/core/src/test/scala/com/avast/clients/rabbitmq/MultiFormatConsumerTest.scala +++ b/core/src/test/scala/com/avast/clients/rabbitmq/MultiFormatConsumerTest.scala @@ -1,21 +1,15 @@ package com.avast.clients.rabbitmq import com.avast.bytes.Bytes -import com.avast.bytes.gpb.ByteStringBytes -import com.avast.cactus.bytes._ -import com.avast.clients.rabbitmq.api.{ConversionException, Delivery, DeliveryResult, MessageProperties} +import com.avast.clients.rabbitmq.api._ import com.avast.clients.rabbitmq.extras.format._ -import com.avast.clients.rabbitmq.test.ExampleEvents.{FileSource => FileSourceGpb, NewFileSourceAdded => NewFileSourceAddedGpb} -import com.google.protobuf.ByteString import io.circe.Decoder import io.circe.generic.extras.Configuration import io.circe.generic.extras.auto._ -import org.scalatest.concurrent.ScalaFutures +import monix.eval.Task +import monix.execution.Scheduler.Implicits.global -import scala.jdk.CollectionConverters._ -import scala.concurrent.Future - -class MultiFormatConsumerTest extends TestBase with ScalaFutures { +class MultiFormatConsumerTest extends TestBase { val StringDeliveryConverter: CheckedDeliveryConverter[String] = new CheckedDeliveryConverter[String] { override def canConvert(d: Delivery[Bytes]): Boolean = d.properties.contentType.contains("text/plain") @@ -31,10 +25,10 @@ class MultiFormatConsumerTest extends TestBase with ScalaFutures { case class NewFileSourceAdded(fileSources: Seq[FileSource]) test("basic") { - val consumer = MultiFormatConsumer.forType[Future, String](StringDeliveryConverter) { + val consumer = MultiFormatConsumer.forType[Task, String](StringDeliveryConverter) { case d: Delivery.Ok[String] => assertResult("abc321")(d.body) - Future.successful(DeliveryResult.Ack) + Task.now(DeliveryResult.Ack) case _ => fail() } @@ -45,17 +39,17 @@ class MultiFormatConsumerTest extends TestBase with ScalaFutures { routingKey = "" ) - val result = consumer.apply(delivery).futureValue + val result = consumer.apply(delivery).await assertResult(DeliveryResult.Ack)(result) } test("non-supported content-type") { - val consumer = MultiFormatConsumer.forType[Future, String](StringDeliveryConverter) { + val consumer = MultiFormatConsumer.forType[Task, String](StringDeliveryConverter) { case _: Delivery.Ok[String] => - Future.successful(DeliveryResult.Ack) + Task.now(DeliveryResult.Ack) case _ => - Future.successful(DeliveryResult.Reject) + Task.now(DeliveryResult.Reject) } val delivery = Delivery( @@ -64,13 +58,13 @@ class MultiFormatConsumerTest extends TestBase with ScalaFutures { routingKey = "" ) - val result = consumer.apply(delivery).futureValue + val result = consumer.apply(delivery).await assertResult(DeliveryResult.Reject)(result) } test("json") { - val consumer = MultiFormatConsumer.forType[Future, NewFileSourceAdded](JsonDeliveryConverter.derive()) { + val consumer = MultiFormatConsumer.forType[Task, NewFileSourceAdded](JsonDeliveryConverter.derive()) { case d: Delivery.Ok[NewFileSourceAdded] => assertResult( NewFileSourceAdded( @@ -79,9 +73,9 @@ class MultiFormatConsumerTest extends TestBase with ScalaFutures { FileSource(Bytes.copyFromUtf8("def"), "theSource") )))(d.body) - Future.successful(DeliveryResult.Ack) + Task.now(DeliveryResult.Ack) - case _ => Future.successful(DeliveryResult.Reject) + case _ => Task.now(DeliveryResult.Reject) } val delivery = Delivery( @@ -95,43 +89,7 @@ class MultiFormatConsumerTest extends TestBase with ScalaFutures { routingKey = "" ) - val result = consumer.apply(delivery).futureValue - - assertResult(DeliveryResult.Ack)(result) - } - - test("gpb") { - val consumer = MultiFormatConsumer.forType[Future, NewFileSourceAdded](JsonDeliveryConverter.derive(), - GpbDeliveryConverter[NewFileSourceAddedGpb].derive()) { - case d: Delivery.Ok[NewFileSourceAdded] => - assertResult( - NewFileSourceAdded( - Seq( - FileSource(Bytes.copyFromUtf8("abc"), "theSource"), - FileSource(Bytes.copyFromUtf8("def"), "theSource") - )))(d.body) - - Future.successful(DeliveryResult.Ack) - - case _ => fail() - } - - val delivery = Delivery( - body = ByteStringBytes.wrap { - NewFileSourceAddedGpb - .newBuilder() - .addAllFileSources(Seq( - FileSourceGpb.newBuilder().setFileId(ByteString.copyFromUtf8("abc")).setSource("theSource").build(), - FileSourceGpb.newBuilder().setFileId(ByteString.copyFromUtf8("def")).setSource("theSource").build() - ).asJava) - .build() - .toByteString - }: Bytes, - properties = MessageProperties(contentType = GpbDeliveryConverter.ContentTypes.headOption.map(_.toUpperCase)), - routingKey = "" - ) - - val result = consumer.apply(delivery).futureValue + val result = consumer.apply(delivery).await assertResult(DeliveryResult.Ack)(result) } diff --git a/core/src/test/scala/com/avast/clients/rabbitmq/PoisonedMessageHandlerLiveTest.scala b/core/src/test/scala/com/avast/clients/rabbitmq/PoisonedMessageHandlerLiveTest.scala new file mode 100644 index 00000000..e03c701f --- /dev/null +++ b/core/src/test/scala/com/avast/clients/rabbitmq/PoisonedMessageHandlerLiveTest.scala @@ -0,0 +1,339 @@ +package com.avast.clients.rabbitmq + +import com.avast.bytes.Bytes +import com.avast.clients.rabbitmq.api._ +import com.avast.clients.rabbitmq.pureconfig._ +import com.avast.metrics.scalaeffectapi.Monitor +import com.typesafe.config._ +import monix.eval.Task +import monix.execution.Scheduler +import org.scalatest.concurrent.ScalaFutures +import org.scalatest.time._ + +import java.util.concurrent._ +import java.util.concurrent.atomic.AtomicInteger +import scala.concurrent.ExecutionContext +import scala.concurrent.duration._ +import scala.jdk.CollectionConverters._ +import scala.util.Random + +class PoisonedMessageHandlerLiveTest extends TestBase with ScalaFutures { + + def randomString(length: Int): String = { + Random.alphanumeric.take(length).mkString("") + } + + private implicit val cidStrategy: CorrelationIdStrategy = CorrelationIdStrategy.RandomNew + + private implicit val p: PatienceConfig = PatienceConfig(timeout = Span(5, Seconds)) + + private lazy val testHelper = new TestHelper(System.getProperty("rabbit.host", System.getenv("rabbit.host")), + System.getProperty("rabbit.tcp.15672", System.getenv("rabbit.tcp.15672")).toInt) + + //noinspection ScalaStyle + private def createConfig() = new { + val queueName1: String = randomString(4) + "_QU1" + val queueName2: String = randomString(4) + "_QU2" + val exchange1: String = randomString(4) + "_EX1" + val exchange2: String = randomString(4) + "_EX2" + val exchange3: String = randomString(4) + "_EX3" + val exchange5: String = randomString(4) + "_EX5" + + val initialRoutingKey: String = "test_" + randomString(4) + val deadRoutingKey: String = "dead_" + randomString(4) + + testHelper.queue.delete(queueName1) + testHelper.queue.delete(queueName2) + + private val original = ConfigFactory.load().getConfig("myConfig") + + val bindConfigs: Array[Config] = + original.getObjectList("consumers.testingWithPoisonedMessageHandler.bindings").asScala.map(_.toConfig).toArray + bindConfigs(0) = bindConfigs(0) + .withValue("exchange.name", ConfigValueFactory.fromAnyRef(exchange1)) + .withValue("routingKeys", ConfigValueFactory.fromIterable(Seq(initialRoutingKey).asJava)) + + // @formatter:off + val config: Config = original + .withValue("republishStrategy.exchangeName", ConfigValueFactory.fromAnyRef(exchange5)) + .withValue("consumers.testingWithPoisonedMessageHandler.queueName", ConfigValueFactory.fromAnyRef(queueName1)) + .withValue("consumers.testingWithPoisonedMessageHandler.poisonedMessageHandling.deadQueueProducer.exchange", ConfigValueFactory.fromAnyRef(exchange3)) + .withValue("consumers.testingWithPoisonedMessageHandler.poisonedMessageHandling.deadQueueProducer.routingKey", ConfigValueFactory.fromAnyRef(deadRoutingKey)) + .withValue("consumers.testingWithPoisonedMessageHandler.bindings", ConfigValueFactory.fromIterable(bindConfigs.toSeq.map(_.root()).asJava)) + .withValue("consumers.testingStreamingWithPoisonedMessageHandler.queueName", ConfigValueFactory.fromAnyRef(queueName1)) + .withValue("consumers.testingStreamingWithPoisonedMessageHandler.poisonedMessageHandling.deadQueueProducer.exchange", ConfigValueFactory.fromAnyRef(exchange3)) + .withValue("consumers.testingStreamingWithPoisonedMessageHandler.poisonedMessageHandling.deadQueueProducer.routingKey", ConfigValueFactory.fromAnyRef(deadRoutingKey)) + .withValue("consumers.testingStreamingWithPoisonedMessageHandler.bindings", ConfigValueFactory.fromIterable(bindConfigs.toSeq.map(_.root()).asJava)) + .withValue("consumers.testingPullWithPoisonedMessageHandler.queueName", ConfigValueFactory.fromAnyRef(queueName1)) + .withValue("consumers.testingPullWithPoisonedMessageHandler.poisonedMessageHandling.deadQueueProducer.exchange", ConfigValueFactory.fromAnyRef(exchange3)) + .withValue("consumers.testingPullWithPoisonedMessageHandler.poisonedMessageHandling.deadQueueProducer.routingKey", ConfigValueFactory.fromAnyRef(deadRoutingKey)) + .withValue("consumers.testingPullWithPoisonedMessageHandler.bindings", ConfigValueFactory.fromIterable(bindConfigs.toSeq.map(_.root()).asJava)) + .withValue("producers.testing.exchange", ConfigValueFactory.fromAnyRef(exchange1)) + .withValue("declarations.declareQueue.name", ConfigValueFactory.fromAnyRef(queueName2)) + .withValue("declarations.bindQueue.exchangeName", ConfigValueFactory.fromAnyRef(exchange3)) + .withValue("declarations.bindQueue.queueName", ConfigValueFactory.fromAnyRef(queueName2)) + .withValue("declarations.bindQueue.routingKeys", ConfigValueFactory.fromIterable(Seq(initialRoutingKey, deadRoutingKey).asJava)) + // @formatter:on + + val ex: ExecutorService = ExecutionContext.fromExecutorService(Executors.newCachedThreadPool()) + + implicit val sched: Scheduler = Scheduler( + Executors.newScheduledThreadPool(4), + ExecutionContext.fromExecutor(new ForkJoinPool()) + ) + } + + /* + Dead queue mapping: + + -- > EXCHANGE3 --(test)--> QUEUE2 + */ + + test("PoisonedMessageHandler") { + val c = createConfig() + import c._ + + val messagesCount = Random.nextInt(5000) + 5000 // 5-10k + + println(s"Sending $messagesCount messages!") + + RabbitMQConnection.fromConfig[Task](config, ex).withResource { rabbitConnection => + val processed = new AtomicInteger(0) + + rabbitConnection + .newConsumer("testingWithPoisonedMessageHandler", Monitor.noOp()) { _: Delivery[Bytes] => + Task { + processed.incrementAndGet() + DeliveryResult.Republish() + } + } + .withResource { _ => + rabbitConnection.newProducer[Bytes]("testing", Monitor.noOp()).withResource { sender => + // this need to be done _after_ the producer is created (it declares the exchange) but _before_ it starts send messages (so they are not lost) + rabbitConnection.declareQueue("declareQueue").await + rabbitConnection.bindQueue("bindQueue").await + + for (n <- 1 to messagesCount) { + sender.send(initialRoutingKey, Bytes.copyFromUtf8(n.toString), Some(MessageProperties(messageId = Some(s"msg_${n}_")))).await + } + + eventually(timeout(Span(90, Seconds)), interval(Span(1, Seconds))) { + println(s"PROCESSED COUNT: ${processed.get()}") + assertResult(messagesCount * 2)(processed.get()) + assertResult(0)(testHelper.queue.getMessagesCount(queueName1)) // original dest. queue + assertResult(messagesCount)(testHelper.queue.getMessagesCount(queueName2)) // dead queue + } + } + } + } + } + + test("PoisonedMessageHandler with timeouting messages") { + val c = createConfig() + import c._ + + val messagesCount = (Random.nextInt(1000) + 1000) * 2 // 2-4k, even + + println(s"Sending $messagesCount messages!") + + RabbitMQConnection.fromConfig[Task](config, ex).withResource { rabbitConnection => + val processed = new AtomicInteger(0) + + rabbitConnection + .newConsumer[Bytes]("testingWithPoisonedMessageHandler", Monitor.noOp()) { + case Delivery.Ok(body, _, _) => + val n = body.toStringUtf8.toInt + + /* This basically means every second message should be timed-out. That will cause it to be republished. + * Thx to maxAttempts = 2, this will be done twice in a row, so the resulting numbers are: + * + * - processed = messagesCount * 1.5 (half of messages is "processed" twice) + * - poisoned = messagesCount / 2 (half of messages is "thrown away") + * - rest in queue = 0 + * + */ + + Task { + processed.incrementAndGet() + } >> + sleepIfEven(n, 800.millis) >> // timeout is 500 ms, this need to be quite much longer to be deterministic + Task.now(DeliveryResult.Ack) + + case _ => fail() + } + .withResource { _ => + rabbitConnection.newProducer[Bytes]("testing", Monitor.noOp()).withResource { sender => + // this need to be done _after_ the producer is created (it declares the exchange) but _before_ it starts send messages (so they are not lost) + rabbitConnection.declareQueue("declareQueue").await + rabbitConnection.bindQueue("bindQueue").await + + for (n <- 1 to messagesCount) { + sender.send(initialRoutingKey, Bytes.copyFromUtf8(n.toString), Some(MessageProperties(messageId = Some(s"msg_${n}_")))).await + } + + eventually(timeout(Span(90, Seconds)), interval(Span(1, Seconds))) { + println(s"PROCESSED COUNT: ${processed.get()}") + assertResult(1.5 * messagesCount)(processed.get()) + assertResult(0)(testHelper.queue.getMessagesCount(queueName1)) // original dest. queue + assertResult(messagesCount / 2)(testHelper.queue.getMessagesCount(queueName2)) // dead queue + } + } + } + } + } + + test("PoisonedMessageHandler pull") { + val c = createConfig() + import c._ + + val messagesCount = Random.nextInt(2000) + 2000 // only 2-4k, this consumer is just slow + + println(s"Sending $messagesCount messages!") + + RabbitMQConnection.fromConfig[Task](config, ex).withResource { rabbitConnection => + val processed = new AtomicInteger(0) + + rabbitConnection.newPullConsumer[Bytes]("testingPullWithPoisonedMessageHandler", Monitor.noOp()).withResource { cons => + rabbitConnection.newProducer[Bytes]("testing", Monitor.noOp()).withResource { sender => + // this need to be done _after_ the producer is created (it declares the exchange) but _before_ it starts send messages (so they are not lost) + rabbitConnection.declareQueue("declareQueue").await + rabbitConnection.bindQueue("bindQueue").await + + for (n <- 1 to messagesCount) { + sender.send(initialRoutingKey, Bytes.copyFromUtf8(n.toString), Some(MessageProperties(messageId = Some(s"msg_${n}_")))).await + } + + // run async: + ex.execute(() => { + while (true) { + val PullResult.Ok(dwh) = cons.pull().await + processed.incrementAndGet() + dwh.handle(DeliveryResult.Republish()).await + } + }) + + eventually(timeout(Span(90, Seconds)), interval(Span(1, Seconds))) { + println(s"PROCESSED COUNT: ${processed.get()}") + assertResult(2 * messagesCount)(processed.get()) + assertResult(0)(testHelper.queue.getMessagesCount(queueName1)) // original dest. queue + assertResult(messagesCount)(testHelper.queue.getMessagesCount(queueName2)) // dead queue + } + } + } + } + } + + test("PoisonedMessageHandler streaming") { + val c = createConfig() + import c._ + + val messagesCount = Random.nextInt(5000) + 5000 // 5-10k + + println(s"Sending $messagesCount messages!") + + RabbitMQConnection.fromConfig[Task](config, ex).withResource { rabbitConnection => + val processed = new AtomicInteger(0) + + rabbitConnection.newStreamingConsumer[Bytes]("testingStreamingWithPoisonedMessageHandler", Monitor.noOp()).withResource { cons => + cons.deliveryStream + .evalMap { + _.handleWith { _ => + Task { + processed.incrementAndGet() + DeliveryResult.Republish() + } + } + } + .compile + .drain + .runToFuture + + rabbitConnection.newProducer[Bytes]("testing", Monitor.noOp()).withResource { sender => + // this need to be done _after_ the producer is created (it declares the exchange) but _before_ it starts send messages (so they are not lost) + rabbitConnection.declareQueue("declareQueue").await + rabbitConnection.bindQueue("bindQueue").await + + for (n <- 1 to messagesCount) { + sender.send(initialRoutingKey, Bytes.copyFromUtf8(n.toString), Some(MessageProperties(messageId = Some(s"msg_${n}_")))).await + } + + eventually(timeout(Span(30, Seconds)), interval(Span(1, Seconds))) { + println(s"PROCESSED COUNT: ${processed.get()}") + // we can't assert the `processed` here - some deliveries may have been cancelled before they were even executed + assertResult(0)(testHelper.queue.getMessagesCount(queueName1)) // original dest. queue + assertResult(messagesCount)(testHelper.queue.getMessagesCount(queueName2)) // dead queue + } + } + } + } + } + + test("PoisonedMessageHandler streaming with timeouting messages") { + val c = createConfig() + import c._ + + val monitor = new TestMonitor[Task] + + val messagesCount = (Random.nextInt(2000) + 2000) * 2 // 4-8k, even + + println(s"Sending $messagesCount messages!") + + RabbitMQConnection.fromConfig[Task](config, ex).withResource { rabbitConnection => + val processed = new AtomicInteger(0) + + rabbitConnection.newStreamingConsumer[Bytes]("testingStreamingWithPoisonedMessageHandler", monitor).withResource { cons => + cons.deliveryStream + .parEvalMapUnordered(200) { + _.handleWith { + case Delivery.Ok(body, _, _) => + val n = body.toStringUtf8.toInt + + /* This basically means every second message should be timed-out. That will cause it to be republished. + * Thx to maxAttempts = 2, this will be done twice in a row, so the resulting numbers are: + * + * - poisoned = messagesCount / 2 (half of messages is "thrown away") + * - rest in queue = 0 + * + */ + + Task { + processed.incrementAndGet() + } >> + sleepIfEven(n, 800.millis) >> // timeout is 500 ms, this need to be quite much longer to be deterministic + Task.now(DeliveryResult.Ack) + + case _ => fail() + } + } + .compile + .drain + .runToFuture + + rabbitConnection.newProducer[Bytes]("testing", Monitor.noOp()).withResource { sender => + // this need to be done _after_ the producer is created (it declares the exchange) but _before_ it starts send messages (so they are not lost) + rabbitConnection.declareQueue("declareQueue").await + rabbitConnection.bindQueue("bindQueue").await + + for (n <- 1 to messagesCount) { + sender.send(initialRoutingKey, Bytes.copyFromUtf8(n.toString), Some(MessageProperties(messageId = Some(s"msg_${n}_")))).await + } + + eventually(timeout(Span(90, Seconds)), interval(Span(1, Seconds))) { + println(s"PROCESSED COUNT: ${processed.get()}") + // we can't assert the `processed` here - some deliveries may have been cancelled before they were even executed + assertResult(0)(testHelper.queue.getMessagesCount(queueName1)) + assertResult(messagesCount / 2)(testHelper.queue.getMessagesCount(queueName2)) // dead queue + } + } + } + } + } + + private def sleepIfEven(n: Int, length: FiniteDuration): Task[Unit] = { + if (n % 2 == 0) { + Task.sleep(length) + } else { + Task.unit + } + } +} diff --git a/core/src/test/scala/com/avast/clients/rabbitmq/PoisonedMessageHandlerTest.scala b/core/src/test/scala/com/avast/clients/rabbitmq/PoisonedMessageHandlerTest.scala new file mode 100644 index 00000000..d9c324c0 --- /dev/null +++ b/core/src/test/scala/com/avast/clients/rabbitmq/PoisonedMessageHandlerTest.scala @@ -0,0 +1,154 @@ +package com.avast.clients.rabbitmq + +import com.avast.bytes.Bytes +import com.avast.clients.rabbitmq.PoisonedMessageHandler._ +import com.avast.clients.rabbitmq.api.DeliveryResult.Republish +import com.avast.clients.rabbitmq.api._ +import com.avast.clients.rabbitmq.logging.ImplicitContextLogger +import monix.eval.Task +import monix.execution.Scheduler.Implicits.global + +import java.util.concurrent.atomic.AtomicInteger + +class PoisonedMessageHandlerTest extends TestBase { + + implicit val dctx: DeliveryContext = TestDeliveryContext.create() + + private val ilogger = ImplicitContextLogger.createLogger[Task, PoisonedMessageHandlerTest] + + test("PoisonedMessageHandler.handleResult ignores non-poisoned") { + def readAction(d: Delivery[Bytes]): Task[DeliveryResult] = { + Task.now(Republish(countAsPoisoned = false)) + } + + val movedCount = new AtomicInteger(0) + + PoisonedMessageHandler + .handleResult[Task, Bytes](Delivery.Ok(Bytes.empty(), MessageProperties(), ""), MessageId("msg-id"), 1, ilogger, (_, _) => { + Task.delay { movedCount.incrementAndGet() } + })(Republish(countAsPoisoned = false)) + .await + + assertResult(0)(movedCount.get()) + + movedCount.set(0) + + PoisonedMessageHandler + .handleResult[Task, Bytes](Delivery.Ok(Bytes.empty(), MessageProperties(), ""), MessageId("msg-id"), 1, ilogger, (_, _) => { + Task.delay { movedCount.incrementAndGet() } + })(Republish()) + .await + + assertResult(1)(movedCount.get()) + } + + test("LoggingPoisonedMessageHandler basic") { + def readAction(d: Delivery[Bytes]): Task[DeliveryResult] = { + Task.now(Republish()) + } + + val handler = new LoggingPoisonedMessageHandler[Task, Bytes](5) + + val properties = (1 to 4).foldLeft(MessageProperties.empty) { + case (p, _) => + run(handler, readAction, p) match { + case Republish(_, h) => MessageProperties(headers = h) + case _ => MessageProperties.empty + } + } + + // check it increases the header with count + assertResult(MessageProperties(headers = Map(RepublishCountHeaderName -> 4.asInstanceOf[AnyRef])))(properties) + + // check it will Reject the message on 5th attempt + assertResult(DeliveryResult.Reject)(run(handler, readAction, properties)) + } + + test("NoOpPoisonedMessageHandler basic") { + def readAction(d: Delivery[Bytes]): Task[DeliveryResult] = { + Task.now(Republish()) + } + + val handler = new NoOpPoisonedMessageHandler[Task, Bytes] + + val properties = (1 to 4).foldLeft(MessageProperties.empty) { + case (p, _) => + run(handler, readAction, p) match { + case Republish(_, h) => MessageProperties(headers = h) + case _ => MessageProperties.empty + } + } + + // check it increases the header with count + assertResult(MessageProperties(headers = Map.empty))(properties) + } + + test("DeadQueuePoisonedMessageHandler basic") { + def readAction(d: Delivery[Bytes]): Task[DeliveryResult] = { + Task.now(Republish()) + } + + val movedCount = new AtomicInteger(0) + + val handler = new DeadQueuePoisonedMessageHandler[Task, Bytes](5)({ (_, _, _) => + Task.delay(movedCount.incrementAndGet()) + }) + + val properties = (1 to 4).foldLeft(MessageProperties.empty) { + case (p, _) => + run(handler, readAction, p) match { + case Republish(_, h) => MessageProperties(headers = h) + case _ => MessageProperties.empty + } + } + + // check it increases the header with count + assertResult(MessageProperties(headers = Map(RepublishCountHeaderName -> 4.asInstanceOf[AnyRef])))(properties) + + // check it will Reject the message on 5th attempt + assertResult(DeliveryResult.Reject)(run(handler, readAction, properties)) + + assertResult(1)(movedCount.get()) + } + + test("pretend lower no. of attempts") { + def readAction(d: Delivery[Bytes]): Task[DeliveryResult] = { + Task.now(Republish()) + } + + val movedCount = new AtomicInteger(0) + + val handler = new DeadQueuePoisonedMessageHandler[Task, Bytes](5)({ (_, _, _) => + Task.delay(movedCount.incrementAndGet()) + }) + + val properties = (1 to 4).foldLeft(MessageProperties.empty) { + case (p, i) => + run(handler, readAction, p) match { + case Republish(_, h) => + if (i == 3) { + MessageProperties(headers = h + (RepublishCountHeaderName -> 1.asInstanceOf[AnyRef])) + } else { + MessageProperties(headers = h) + } + + case _ => fail("unreachable") + } + } + + // attempts no. will be only 2 because programmer said that ;-) + assertResult(MessageProperties(headers = Map(RepublishCountHeaderName -> 2.asInstanceOf[AnyRef])))(properties) + + assertResult(0)(movedCount.get()) + } + + def run(handler: PoisonedMessageHandler[Task, Bytes], + readAction: Delivery[Bytes] => Task[DeliveryResult], + properties: MessageProperties): DeliveryResult = { + val delivery = Delivery(Bytes.empty(), properties, "") + + readAction(delivery).flatMap { + handler.interceptResult(delivery, MessageId("msg-id"), Bytes.empty()) + } + }.runSyncUnsafe() +} diff --git a/core/src/test/scala/com/avast/clients/rabbitmq/RepublishStrategyTest.scala b/core/src/test/scala/com/avast/clients/rabbitmq/RepublishStrategyTest.scala index c6b83c54..27e2dc25 100644 --- a/core/src/test/scala/com/avast/clients/rabbitmq/RepublishStrategyTest.scala +++ b/core/src/test/scala/com/avast/clients/rabbitmq/RepublishStrategyTest.scala @@ -1,10 +1,11 @@ package com.avast.clients.rabbitmq -import java.util.UUID - +import com.avast.bytes.Bytes import com.avast.clients.rabbitmq.RabbitMQConnection.DefaultListeners import com.avast.clients.rabbitmq.api.DeliveryResult -import com.avast.metrics.scalaapi.Monitor +import com.avast.clients.rabbitmq.api.DeliveryResult.Republish +import com.avast.clients.rabbitmq.logging.ImplicitContextLogger +import com.avast.metrics.scalaeffectapi.Monitor import com.rabbitmq.client.AMQP.BasicProperties import com.rabbitmq.client.Envelope import com.rabbitmq.client.impl.recovery.AutorecoveringChannel @@ -13,9 +14,12 @@ import monix.execution.Scheduler.Implicits.global import org.mockito.Mockito._ import org.mockito.{ArgumentCaptor, Matchers} import org.scalatest.time.{Seconds, Span} +import org.slf4j.event.Level -import scala.jdk.CollectionConverters._ +import java.util.UUID import scala.collection.immutable +import scala.concurrent.duration.DurationInt +import scala.jdk.CollectionConverters._ import scala.util.Random class RepublishStrategyTest extends TestBase { @@ -36,21 +40,11 @@ class RepublishStrategyTest extends TestBase { val channel = mock[AutorecoveringChannel] when(channel.isOpen).thenReturn(true) - val consumer = new DefaultRabbitMQConsumer[Task]( - "test", - channel, - "queueName", - connectionInfo, - Monitor.noOp, - DeliveryResult.Reject, - DefaultListeners.DefaultConsumerListener, - RepublishStrategy.DefaultExchange, - TestBase.testBlocker - )({ delivery => + val consumer = newConsumer(channel, RepublishStrategy.DefaultExchange[Task]()) { delivery => assertResult(Some(messageId))(delivery.properties.messageId) Task.now(DeliveryResult.Republish()) - }) + } val body = Random.nextString(5).getBytes consumer.handleDelivery("abcd", envelope, properties, body) @@ -80,21 +74,11 @@ class RepublishStrategyTest extends TestBase { val channel = mock[AutorecoveringChannel] when(channel.isOpen).thenReturn(true) - val consumer = new DefaultRabbitMQConsumer[Task]( - "test", - channel, - "queueName", - connectionInfo, - Monitor.noOp, - DeliveryResult.Reject, - DefaultListeners.DefaultConsumerListener, - RepublishStrategy.CustomExchange("myCustomExchange"), - TestBase.testBlocker - )({ delivery => + val consumer = newConsumer(channel, RepublishStrategy.CustomExchange("myCustomExchange")) { delivery => assertResult(Some(messageId))(delivery.properties.messageId) Task.now(DeliveryResult.Republish()) - }) + } val body = Random.nextString(5).getBytes consumer.handleDelivery("abcd", envelope, properties, body) @@ -112,4 +96,33 @@ class RepublishStrategyTest extends TestBase { assertResult(Some(originalUserId))(propertiesCaptor.getValue.getHeaders.asScala.get(DefaultRabbitMQConsumer.RepublishOriginalUserId)) } } + + private def newConsumer(channel: ServerChannel, republishStrategy: RepublishStrategy[Task])( + userAction: DeliveryReadAction[Task, Bytes]): DefaultRabbitMQConsumer[Task, Bytes] = { + val base = new ConsumerBase[Task, Bytes]("test", "queueName", TestBase.testBlocker, ImplicitContextLogger.createLogger, Monitor.noOp()) + + val channelOps = new ConsumerChannelOps[Task, Bytes]( + "test", + "queueName", + channel, + TestBase.testBlocker, + republishStrategy, + PMH, + connectionInfo, + ImplicitContextLogger.createLogger, + Monitor.noOp() + ) + + new DefaultRabbitMQConsumer[Task, Bytes]( + base, + channelOps, + 10.seconds, + DeliveryResult.Republish(), + Level.ERROR, + Republish(), + DefaultListeners.defaultConsumerListener, + )(userAction) + } + + object PMH extends LoggingPoisonedMessageHandler[Task, Bytes](3) } diff --git a/core/src/test/scala/com/avast/clients/rabbitmq/StreamingConsumerLiveTest.scala b/core/src/test/scala/com/avast/clients/rabbitmq/StreamingConsumerLiveTest.scala new file mode 100644 index 00000000..a998e832 --- /dev/null +++ b/core/src/test/scala/com/avast/clients/rabbitmq/StreamingConsumerLiveTest.scala @@ -0,0 +1,479 @@ +package com.avast.clients.rabbitmq + +import com.avast.bytes.Bytes +import com.avast.clients.rabbitmq.api.DeliveryResult._ +import com.avast.clients.rabbitmq.api._ +import com.avast.metrics.scalaeffectapi.Monitor +import com.typesafe.config._ +import monix.eval.Task +import monix.execution.Scheduler +import org.scalatest.concurrent.ScalaFutures +import org.scalatest.time._ + +import java.util.concurrent._ +import java.util.concurrent.atomic.AtomicInteger +import scala.concurrent.ExecutionContext +import scala.concurrent.duration._ +import scala.jdk.CollectionConverters._ +import scala.util.Random + +class StreamingConsumerLiveTest extends TestBase with ScalaFutures { + import pureconfig._ + + def randomString(length: Int): String = { + Random.alphanumeric.take(length).mkString("") + } + + private implicit val cidStrategy: CorrelationIdStrategy = CorrelationIdStrategy.RandomNew + + private implicit val p: PatienceConfig = PatienceConfig(timeout = Span(5, Seconds)) + + private lazy val testHelper = new TestHelper(System.getProperty("rabbit.host", System.getenv("rabbit.host")), + System.getProperty("rabbit.tcp.15672", System.getenv("rabbit.tcp.15672")).toInt) + + //noinspection ScalaStyle + private def createConfig() = new { + val queueName1: String = randomString(4) + "_QU1" + val queueName2: String = randomString(4) + "_QU2" + val exchange1: String = randomString(4) + "_EX1" + val exchange2: String = randomString(4) + "_EX2" + val exchange4: String = randomString(4) + "_EX4" + val exchange5: String = randomString(4) + "_EX5" + + testHelper.queue.delete(queueName1) + testHelper.queue.delete(queueName2) + + private val original = ConfigFactory.load().getConfig("myConfig") + + val bindConfigs: Array[Config] = original.getObjectList("consumers.testing.bindings").asScala.map(_.toConfig).toArray + bindConfigs(0) = bindConfigs(0).withValue("exchange.name", ConfigValueFactory.fromAnyRef(exchange1)) + bindConfigs(1) = bindConfigs(1).withValue("exchange.name", ConfigValueFactory.fromAnyRef(exchange2)) + + val config: Config = original + .withValue("republishStrategy.exchangeName", ConfigValueFactory.fromAnyRef(exchange5)) + .withValue("consumers.testingStreaming.queueName", ConfigValueFactory.fromAnyRef(queueName1)) + .withValue("consumers.testingStreaming.bindings", ConfigValueFactory.fromIterable(bindConfigs.toSeq.map(_.root()).asJava)) + .withValue("consumers.testingStreamingWithTimeout.queueName", ConfigValueFactory.fromAnyRef(queueName1)) + .withValue("consumers.testingStreamingWithTimeout.bindings", ConfigValueFactory.fromIterable(bindConfigs.toSeq.map(_.root()).asJava)) + .withValue("producers.testing.exchange", ConfigValueFactory.fromAnyRef(exchange1)) + .withValue("producers.testing2.exchange", ConfigValueFactory.fromAnyRef(exchange2)) + .withValue("producers.testing3.exchange", ConfigValueFactory.fromAnyRef(exchange4)) + + val ex: ExecutorService = ExecutionContext.fromExecutorService(Executors.newCachedThreadPool()) + + implicit val sched: Scheduler = Scheduler( + Executors.newScheduledThreadPool(4), + ExecutionContext.fromExecutor(new ForkJoinPool()) + ) + } + + test("streaming consumer") { + val c = createConfig() + import c._ + + RabbitMQConnection.fromConfig[Task](config, ex).withResource { rabbitConnection => + val count = Random.nextInt(50000) + 50000 // random 50 - 100k messages + + logger.info(s"Sending $count messages") + + val latch = new CountDownLatch(count + 10000) // explanation below + + val d = new AtomicInteger(0) + + rabbitConnection.newStreamingConsumer[Bytes]("testingStreaming", Monitor.noOp()).withResource { cons => + val stream = cons.deliveryStream + .parEvalMapUnordered(50) { + _.handleWith { del => + Task.delay(d.incrementAndGet()).flatMap { n => + Task { + latch.countDown() + + if (n <= (count - 10000) || n > count) Ack + else { + if (n <= (count - 5000)) Retry else Republish() + } + + // ^ example: 100000 messages in total => 6500 * Ack, 5000 * Retry, 5000 * Republish => processing 110000 (== +10000) messages in total + } + } + } + } + + rabbitConnection.newProducer[Bytes]("testing", Monitor.noOp()).withResource { sender => + for (_ <- 1 to count) { + sender.send("test", Bytes.copyFromUtf8(Random.nextString(10))).await + } + + // it takes some time before the stats appear... :-| + eventually(timeout(Span(5, Seconds)), interval(Span(0.5, Seconds))) { + assertResult(count)(testHelper.queue.getPublishedCount(queueName1)) + } + + sched.execute(() => stream.compile.drain.runSyncUnsafe()) // run the stream + + eventually(timeout(Span(4, Minutes)), interval(Span(1, Seconds))) { + println("D: " + d.get()) + assertResult(count + 10000)(d.get()) + assertResult(true)(latch.await(1000, TimeUnit.MILLISECONDS)) + assertResult(0)(testHelper.queue.getMessagesCount(queueName1)) + } + } + } + } + } + + test("streaming consumers to single queue") { + val c = createConfig() + import c._ + + RabbitMQConnection.fromConfig[Task](config, ex).withResource { rabbitConnection => + val count = Random.nextInt(10000) + 10000 // random 10k - 20k messages + + logger.info(s"Sending $count messages") + + val latch = new CountDownLatch(count) + + def toStream(cons1: RabbitMQStreamingConsumer[Task, Bytes], d: AtomicInteger): fs2.Stream[Task, Unit] = { + cons1.deliveryStream + .parEvalMapUnordered(20) { + _.handleWith { _ => + Task.delay(d.incrementAndGet()).flatMap { n => + Task.sleep((if (n % 500 == 0) Random.nextInt(100) else 0).millis) >> // random slowdown 0-100 ms for every 500th message + Task { + latch.countDown() + + Ack + } + } + } + } + } + + rabbitConnection.newStreamingConsumer[Bytes]("testingStreaming", Monitor.noOp()).withResource { cons1 => + rabbitConnection.newStreamingConsumer[Bytes]("testingStreaming", Monitor.noOp()).withResource { cons2 => + val d1 = new AtomicInteger(0) + val d2 = new AtomicInteger(0) + + val stream1 = toStream(cons1, d1) + val stream2 = toStream(cons2, d2) + + rabbitConnection.newProducer[Bytes]("testing", Monitor.noOp()).withResource { sender => + for (_ <- 1 to count) { + sender.send("test", Bytes.copyFromUtf8(Random.nextString(10))).await + } + + // it takes some time before the stats appear... :-| + eventually(timeout(Span(5, Seconds)), interval(Span(0.5, Seconds))) { + assertResult(count)(testHelper.queue.getPublishedCount(queueName1)) + } + + ex.execute(() => stream1.compile.drain.runSyncUnsafe()) // run the stream + ex.execute(() => stream2.compile.drain.runSyncUnsafe()) // run the stream + + eventually(timeout(Span(5, Minutes)), interval(Span(5, Seconds))) { + val inQueue = testHelper.queue.getMessagesCount(queueName1) + println(s"D: ${d1.get}/${d2.get()}, IN QUEUE $inQueue") + assertResult(count)(d1.get() + d2.get()) + assert(d1.get() > 0) + assert(d2.get() > 0) + println("LATCH: " + latch.getCount) + assertResult(true)(latch.await(1000, TimeUnit.MILLISECONDS)) + assertResult(0)(inQueue) + } + } + } + } + } + } + + test("streaming consumer stream doesn't fail with failed result") { + for (_ <- 1 to 5) { + val c = createConfig() + import c._ + + RabbitMQConnection.fromConfig[Task](config, ex).withResource { rabbitConnection => + val count = Random.nextInt(5000) + 5000 // random 5k - 10k messages + + val nth = 150 + + logger.info(s"Sending $count messages") + + val d = new AtomicInteger(0) + + rabbitConnection.newStreamingConsumer[Bytes]("testingStreaming", Monitor.noOp()).withResource { cons => + def stream: fs2.Stream[Task, Unit] = + cons.deliveryStream + .evalMap { + _.handleWith { _ => + Task + .delay(d.incrementAndGet()) + .flatMap { n => + if (n % nth != 0) Task.now(Ack) + else { + Task.raiseError(new RuntimeException(s"My failure $n")) + } + // ^^ cause failure for every nth message + } + } + } + + rabbitConnection.newProducer[Bytes]("testing", Monitor.noOp()).withResource { sender => + for (_ <- 1 to count) { + sender.send("test", Bytes.copyFromUtf8(Random.nextString(10))).await + } + + // it takes some time before the stats appear... :-| + eventually(timeout(Span(5, Seconds)), interval(Span(0.5, Seconds))) { + assertResult(count)(testHelper.queue.getPublishedCount(queueName1)) + } + + ex.execute(() => stream.compile.drain.runSyncUnsafe()) // run the stream + + eventually(timeout(Span(5, Minutes)), interval(Span(1, Seconds))) { + println("D: " + d.get()) + assert(d.get() > count) // can't say exact number, number of redeliveries is unpredictable + assertResult(0)(testHelper.queue.getMessagesCount(queueName1)) + } + } + } + } + } + } + + test("streaming consumer stream doesn't fail with thrown exception") { + for (_ <- 1 to 5) { + val c = createConfig() + import c._ + + RabbitMQConnection.fromConfig[Task](config, ex).withResource { rabbitConnection => + val count = Random.nextInt(5000) + 5000 // random 5k - 10k messages + + val nth = 150 + + logger.info(s"Sending $count messages") + + val d = new AtomicInteger(0) + + rabbitConnection.newStreamingConsumer[Bytes]("testingStreaming", Monitor.noOp()).withResource { cons => + def stream: fs2.Stream[Task, Unit] = + cons.deliveryStream + .evalMap { + _.handleWith { _ => + Task + .delay(d.incrementAndGet()) + .flatMap { n => + if (n % nth != 0) Task.now(Ack) + else { + throw new RuntimeException(s"My failure $n") + } + // ^^ cause failure for every nth message + } + } + } + + rabbitConnection.newProducer[Bytes]("testing", Monitor.noOp()).withResource { sender => + for (_ <- 1 to count) { + sender.send("test", Bytes.copyFromUtf8(Random.nextString(10))).await + } + + // it takes some time before the stats appear... :-| + eventually(timeout(Span(5, Seconds)), interval(Span(0.5, Seconds))) { + assertResult(count)(testHelper.queue.getPublishedCount(queueName1)) + } + + ex.execute(() => stream.compile.drain.runSyncUnsafe()) // run the stream + + eventually(timeout(Span(5, Minutes)), interval(Span(1, Seconds))) { + println("D: " + d.get()) + assert(d.get() > count) // can't say exact number, number of redeliveries is unpredictable + assertResult(0)(testHelper.queue.getMessagesCount(queueName1)) + } + } + } + } + } + } + + test("streaming consumer timeouts") { + val c = createConfig() + import c._ + + RabbitMQConnection.fromConfig[Task](config, ex).withResource { rabbitConnection => + val count = 100 + + logger.info(s"Sending $count messages") + + val d = new AtomicInteger(0) + + rabbitConnection.newStreamingConsumer[Bytes]("testingStreamingWithTimeout", Monitor.noOp()).withResource { cons => + val stream = cons.deliveryStream + .mapAsyncUnordered(50) { + _.handleWith { _ => + Task.delay(d.incrementAndGet()) >> + Task + .now(Ack) + .delayExecution(800.millis) + } + } + + rabbitConnection.newProducer[Bytes]("testing", Monitor.noOp()).withResource { sender => + for (_ <- 1 to count) { + sender.send("test", Bytes.copyFromUtf8(Random.nextString(10))).await + } + + // it takes some time before the stats appear... :-| + eventually(timeout(Span(5, Seconds)), interval(Span(0.5, Seconds))) { + assertResult(count)(testHelper.queue.getPublishedCount(queueName1)) + } + + ex.execute(() => stream.compile.drain.runSyncUnsafe()) // run the stream + + eventually(timeout(Span(30, Seconds)), interval(Span(1, Seconds))) { + println("D: " + d.get()) + assert(d.get() > count + 200) // more than sent messages + assert(testHelper.exchange.getPublishedCount(exchange5) > 0) + } + } + } + } + } + + test("can be closed properly") { + val c = createConfig() + import c._ + + // single stream + RabbitMQConnection.fromConfig[Task](config, ex).withResource { rabbitConnection => + rabbitConnection.newStreamingConsumer[Bytes]("testingStreamingWithTimeout", Monitor.noOp()).withResource { cons => + val stream = cons.deliveryStream + .evalMap { + _.handleWith { _ => + Task.now(Ack) + } + } + + rabbitConnection.newProducer[Bytes]("testing", Monitor.noOp()).withResource { sender => + for (_ <- 1 to 50) sender.send("test", Bytes.copyFromUtf8(Random.nextString(10))).await + } + + stream.take(1).compile.drain.runSyncUnsafe() + } + } + + // two streams + RabbitMQConnection.fromConfig[Task](config, ex).withResource { rabbitConnection => + rabbitConnection.newStreamingConsumer[Bytes]("testingStreamingWithTimeout", Monitor.noOp()).withResource { cons => + def createStream(): fs2.Stream[Task, Unit] = + cons.deliveryStream + .evalMap { + _.handleWith { _ => + Task.now(Ack) + } + } + + rabbitConnection.newProducer[Bytes]("testing", Monitor.noOp()).withResource { sender => + for (_ <- 1 to 50) { + sender.send("test", Bytes.copyFromUtf8(Random.nextString(10))).await + } + } + + val stream1 = createStream().take(1).compile.drain.start + val stream2 = createStream().take(1).compile.drain.start + + Task.map2(stream1, stream2)((a, b) => a.join >> b.join).flatten.await(20.seconds) + } + } + + // ok + } + + test("releases buffered messages after stream finalization") { + val c = createConfig() + import c._ + + val monitor = new TestMonitor[Task] + + RabbitMQConnection.fromConfig[Task](config, ex).withResource { rabbitConnection => + rabbitConnection.newStreamingConsumer[Bytes]("testingStreamingWithTimeout", monitor).withResource { cons => + def createStream(): fs2.Stream[Task, Unit] = + cons.deliveryStream + .evalMap { + _.handleWith { d => + logger.debug(s"Processing $d") + Task.now(Ack) + } + } + + rabbitConnection.newProducer[Bytes]("testing", Monitor.noOp()).withResource { sender => + for (_ <- 1 to 10) { + sender.send("test", Bytes.copyFromUtf8(Random.nextString(10))).await + } + } + + // This should buffer all the 10 messages, but process only the first one. + // When the stream is closed, it has to immediately release the unprocessed messages. + createStream().take(1).compile.drain.await + + Thread.sleep(1000) // the timeout is 500 so we give it a chance to timeout and check it didn't happen later + + // As one message should have been already processed, only 9 should rest - and we will count it. + val processedFromRest = new AtomicInteger(0) + + createStream().map(_ => processedFromRest.incrementAndGet()).compile.drain.startAndForget.await // run asynchronously + + eventually(timeout(Span(5, Seconds)), interval(Span(0.2, Seconds))) { + println("D: " + processedFromRest.get()) + assertResult(9)(processedFromRest.get()) + assertResult(0)(testHelper.queue.getMessagesCount(queueName1)) + } + + // ensure the deliveries released from buffer (after the first consumer closes) don't time-out + assertResult(0)(monitor.registry.meterCount("timeouts")) + + assertResult(10)(monitor.registry.meterCount("results.ack")) + assertResult(0)(monitor.registry.meterCount("results.reject")) + assertResult(0)(monitor.registry.meterCount("results.retry")) + assertResult(0)(monitor.registry.meterCount("results.republish")) + } + } + } + + test("always finishes the ACK before stream is discarded") { + val c = createConfig() + import c._ + + for (_ <- 1 to 500) { + RabbitMQConnection.fromConfig[Task](config, ex).withResource { rabbitConnection => + val monitor = new TestMonitor[Task] + rabbitConnection.newStreamingConsumer[Bytes]("testingStreamingWithTimeout", monitor).withResource { cons => + def createStream(): fs2.Stream[Task, Unit] = + cons.deliveryStream + .evalMap { + _.handleWith { d => + logger.debug(s"Processing $d") + Task.now(Ack) + } + } + + rabbitConnection.newProducer[Bytes]("testing", Monitor.noOp()).withResource { sender => + // send a single message + sender.send("test", Bytes.copyFromUtf8(Random.nextString(10))).await + } + + createStream().take(1).compile.drain.await + + eventually(timeout(Span(5, Seconds)), interval(Span(0.2, Seconds))) { + assertResult(0)(testHelper.queue.getMessagesCount(queueName1)) + + assertResult(1)(monitor.registry.meterCount("results.ack")) + assertResult(0)(monitor.registry.meterCount("results.reject")) + assertResult(0)(monitor.registry.meterCount("results.retry")) + assertResult(0)(monitor.registry.meterCount("results.republish")) + + assertResult(0)(monitor.registry.meterCount("timeouts")) + } + } + } + } + } +} diff --git a/core/src/test/scala/com/avast/clients/rabbitmq/TestBase.scala b/core/src/test/scala/com/avast/clients/rabbitmq/TestBase.scala index e80fc678..15415b08 100644 --- a/core/src/test/scala/com/avast/clients/rabbitmq/TestBase.scala +++ b/core/src/test/scala/com/avast/clients/rabbitmq/TestBase.scala @@ -1,7 +1,6 @@ package com.avast.clients.rabbitmq -import java.util.concurrent.Executors - -import cats.effect.{Blocker, IO, Resource} +import cats.effect.{Blocker, ContextShift, IO, Resource, Timer} +import cats.implicits.catsSyntaxFlatMapOps import com.typesafe.scalalogging.StrictLogging import monix.eval.Task import monix.execution.Scheduler @@ -13,13 +12,15 @@ import org.scalatest.concurrent.Eventually import org.scalatestplus.junit.JUnitRunner import org.scalatestplus.mockito.MockitoSugar -import scala.concurrent.TimeoutException +import java.util.concurrent.Executors import scala.concurrent.duration._ -import scala.language.{higherKinds, implicitConversions} +import scala.concurrent.{ExecutionContext, TimeoutException} +import scala.language.implicitConversions @RunWith(classOf[JUnitRunner]) class TestBase extends FunSuite with MockitoSugar with Eventually with StrictLogging { protected implicit def taskToOps[A](t: Task[A]): TaskOps[A] = new TaskOps[A](t) + protected implicit def IOToOps[A](t: IO[A]): IOOps[A] = new IOOps[A](t) protected implicit def resourceToIOOps[A](t: Resource[IO, A]): ResourceIOOps[A] = new ResourceIOOps[A](t) protected implicit def resourceToTaskOps[A](t: Resource[Task, A]): ResourceTaskOps[A] = new ResourceTaskOps[A](t) } @@ -34,6 +35,14 @@ class TaskOps[A](t: Task[A]) { def await: A = await(10.seconds) } +class IOOps[A](t: IO[A]) { + private implicit val cs: ContextShift[IO] = IO.contextShift(TestBase.testBlockingScheduler) + private implicit val timer: Timer[IO] = IO.timer(ExecutionContext.global) + + def await(duration: FiniteDuration): A = (cs.shift >> t.timeout(duration)).unsafeRunSync() + def await: A = await(10.seconds) +} + class ResourceIOOps[A](val r: Resource[IO, A]) extends AnyVal { def withResource[B](f: A => B): B = { withResource(f, Duration.Inf) diff --git a/core/src/test/scala/com/avast/clients/rabbitmq/TestDeliveryContext.scala b/core/src/test/scala/com/avast/clients/rabbitmq/TestDeliveryContext.scala new file mode 100644 index 00000000..c56778fa --- /dev/null +++ b/core/src/test/scala/com/avast/clients/rabbitmq/TestDeliveryContext.scala @@ -0,0 +1,21 @@ +package com.avast.clients.rabbitmq + +import com.rabbitmq.client.AMQP.BasicProperties + +import scala.util.Random + +object TestDeliveryContext { + private def randomString(length: Int): String = { + Random.alphanumeric.take(length).mkString("") + } + + def create(): DeliveryContext = { + DeliveryContext( + messageId = MessageId("test-message-id"), + correlationId = Some(CorrelationId("test-corr-id")), + deliveryTag = DeliveryTag(42), + routingKey = RoutingKey(randomString(10)), + fixedProperties = new BasicProperties() + ) + } +} diff --git a/core/src/test/scala/com/avast/clients/rabbitmq/TestHelper.scala b/core/src/test/scala/com/avast/clients/rabbitmq/TestHelper.scala index 86aa9f96..c982f0e5 100644 --- a/core/src/test/scala/com/avast/clients/rabbitmq/TestHelper.scala +++ b/core/src/test/scala/com/avast/clients/rabbitmq/TestHelper.scala @@ -22,9 +22,6 @@ class TestHelper(host: String, port: Int) { val resp = Http(s"$RootUri/queues/%2f/$encoded").auth("guest", "guest").asString.body - println("MESSAGES COUNT:") - println(resp) - decode[QueueProperties](resp) match { case Right(p) => p.messages case r => throw new IllegalStateException(s"Wrong response $r") @@ -36,9 +33,6 @@ class TestHelper(host: String, port: Int) { val resp = Http(s"$RootUri/queues/%2f/$encoded").auth("guest", "guest").asString.body - println("PUBLISHED COUNT:") - println(resp) - decode[QueueProperties](resp) match { case Right(p) => p.message_stats.map(_.publish).getOrElse { @@ -101,9 +95,6 @@ class TestHelper(host: String, port: Int) { val resp = Http(s"$RootUri/exchanges/%2f/$encoded").auth("guest", "guest").asString.body - println("PUBLISHED COUNT:") - println(resp) - decode[ExchangeProperties](resp) match { case Right(p) => p.message_stats.map(_.publish_in).getOrElse { diff --git a/core/src/test/scala/com/avast/clients/rabbitmq/TestMonitor.scala b/core/src/test/scala/com/avast/clients/rabbitmq/TestMonitor.scala new file mode 100644 index 00000000..7124113b --- /dev/null +++ b/core/src/test/scala/com/avast/clients/rabbitmq/TestMonitor.scala @@ -0,0 +1,39 @@ +package com.avast.clients.rabbitmq + +import cats.effect.Sync +import com.avast.metrics.api.Naming +import com.avast.metrics.dropwizard.MetricsMonitor +import com.avast.metrics.scalaapi +import com.avast.metrics.scalaeffectapi._ +import com.codahale.metrics.MetricRegistry + +import scala.jdk.CollectionConverters._ + +class TestMonitor[F[_]: Sync] extends Monitor[F] { + + private val metricsRegistry = new MetricRegistry + val underlying: Monitor[F] = Monitor.wrapJava[F](new MetricsMonitor(metricsRegistry, Naming.defaultNaming())) + + override def named(name: String): Monitor[F] = underlying.named(name) + override def named(name1: String, name2: String, restOfNames: String*): Monitor[F] = underlying.named(name1, name2, restOfNames: _*) + override def meter(name: String): Meter[F] = underlying.meter(name) + override def counter(name: String): Counter[F] = underlying.counter(name) + override def histogram(name: String): Histogram[F] = underlying.histogram(name) + override def timer(name: String): Timer[F] = underlying.timer(name) + override def timerPair(name: String): TimerPair[F] = underlying.timerPair(name) + override def gauge: GaugeFactory[F] = underlying.gauge + override def asPlainScala: scalaapi.Monitor = underlying.asPlainScala + override def asJava: com.avast.metrics.api.Monitor = underlying.asJava + override def getName: String = underlying.getName + + override def close(): Unit = underlying.close() + + val registry: Registry = new Registry(metricsRegistry) +} + +class Registry(registry: MetricRegistry) { + def meterCount(path: String): Long = registry.getMeters.asScala(path.replace('.', '/')).getCount + def timerCount(path: String): Long = registry.getTimers.asScala(path.replace('.', '/')).getCount + def timerPairCountSuccesses(path: String): Long = registry.getTimers.asScala(path.replace('.', '/') + "Successes").getCount + def timerPairCountFailures(path: String): Long = registry.getTimers.asScala(path.replace('.', '/') + "Failures").getCount +} diff --git a/extras-cactus/README.md b/extras-cactus/README.md deleted file mode 100644 index 676f81b5..00000000 --- a/extras-cactus/README.md +++ /dev/null @@ -1,18 +0,0 @@ -# RabbitMQ client extras - Cactus - -This is an extra module with some optional functionality dependent on [Cactus](https://github.com/avast/cactus) (library for converting -between GPBs and Scala case classes). -```groovy -compile 'com.avast.clients.rabbitmq:rabbitmq-client-extras-cactus_$scalaVersion:x.x.x' -``` - -## GpbDeliveryConverter - -This is an implementation of [DeliveryConverter](../core/src/main/scala/com/avast/clients/rabbitmq/converters.scala) which adds support -for GPB decoding done by [Cactus](https://github.com/avast/cactus). - -The suitability of the converter for concrete message is decided based on Content-Type property - `application/protobuf` and -`application/x-protobuf` are supported. - -See [Providing converters](../README.md#providing-converters-for-producer/consumer) and [MultiFormatConsumer](../README.md#multiformatconsumer) -description for usage. diff --git a/extras-cactus/build.gradle b/extras-cactus/build.gradle deleted file mode 100644 index 66564ca3..00000000 --- a/extras-cactus/build.gradle +++ /dev/null @@ -1,10 +0,0 @@ -archivesBaseName = "rabbitmq-client-extras-cactus_$scalaVersion" - -dependencies { - api project(":core") - api project(":extras") - - api "com.avast.cactus:cactus-common_$scalaVersion:$cactusVersion" - compileOnly "com.google.protobuf:protobuf-java:$protobufVersion" - api "com.avast.bytes:bytes-gpb:${bytesVersion}" -} diff --git a/extras-cactus/src/main/scala/com/avast/clients/rabbitmq/extras/format/GpbDeliveryConverter.scala b/extras-cactus/src/main/scala/com/avast/clients/rabbitmq/extras/format/GpbDeliveryConverter.scala deleted file mode 100644 index 6533e4b6..00000000 --- a/extras-cactus/src/main/scala/com/avast/clients/rabbitmq/extras/format/GpbDeliveryConverter.scala +++ /dev/null @@ -1,54 +0,0 @@ -package com.avast.clients.rabbitmq.extras.format - -import cats.syntax.either._ -import com.avast.bytes.Bytes -import com.avast.cactus.CactusParser._ -import com.avast.cactus.Converter -import com.avast.clients.rabbitmq.CheckedDeliveryConverter -import com.avast.clients.rabbitmq.api.{ConversionException, Delivery} -import com.google.protobuf.MessageLite - -import scala.annotation.implicitNotFound -import scala.reflect.ClassTag -import scala.util.control.NonFatal -import scala.util.{Failure, Success} - -@implicitNotFound( - "Could not generate GpbDeliveryConverter from ${GpbMessage} to ${A}, try to import or define some\nMaybe you're missing some Cactus imports?") -trait GpbDeliveryConverter[GpbMessage, A] extends CheckedDeliveryConverter[A] - -object GpbDeliveryConverter { - final val ContentTypes: Set[String] = Set("application/protobuf", "application/x-protobuf") - - def apply[GpbMessage <: MessageLite]: GpbDeliveryConverterDerivator[GpbMessage] = new GpbDeliveryConverterDerivator[GpbMessage] { - override def derive[A: GpbDeliveryConverter[GpbMessage, ?]](): GpbDeliveryConverter[GpbMessage, A] = - implicitly[GpbDeliveryConverter[GpbMessage, A]] - } - - trait GpbDeliveryConverterDerivator[GpbMessage <: MessageLite] { - def derive[A: GpbDeliveryConverter[GpbMessage, ?]](): GpbDeliveryConverter[GpbMessage, A] - } - - implicit def createGpbDeliveryConverter[GpbMessage <: MessageLite: GpbParser: Converter[?, A]: ClassTag, A: ClassTag] - : GpbDeliveryConverter[GpbMessage, A] = new GpbDeliveryConverter[GpbMessage, A] { - override def convert(body: Bytes): Either[ConversionException, A] = { - implicitly[GpbParser[GpbMessage]].parseFrom(body) match { - case Success(gpb) => - gpb - .asCaseClass[A] - .leftMap { fs => - ConversionException { - s"Errors while converting to ${implicitly[ClassTag[A]].runtimeClass.getName}: ${fs.toList.mkString("[", ", ", "]")}" - } - } - case Failure(NonFatal(e)) => - Left { - ConversionException(s"Could not parse GPB message ${implicitly[ClassTag[GpbMessage]].runtimeClass.getName}", e) - } - } - } - - override def canConvert(d: Delivery[Bytes]): Boolean = d.properties.contentType.map(_.toLowerCase).exists(ContentTypes.contains) - } - -} diff --git a/extras-cactus/src/main/scala/com/avast/clients/rabbitmq/extras/format/GpbParser.scala b/extras-cactus/src/main/scala/com/avast/clients/rabbitmq/extras/format/GpbParser.scala deleted file mode 100644 index 6051a713..00000000 --- a/extras-cactus/src/main/scala/com/avast/clients/rabbitmq/extras/format/GpbParser.scala +++ /dev/null @@ -1,21 +0,0 @@ -package com.avast.clients.rabbitmq.extras.format - -import com.avast.bytes.Bytes -import com.google.protobuf.{MessageLite, Parser} - -import scala.reflect.ClassTag -import scala.util.Try - -trait GpbParser[A <: MessageLite] { - def parseFrom(bytes: Bytes): Try[A] -} - -object GpbParser { - implicit def parserForGpb[Gpb <: MessageLite: ClassTag]: GpbParser[Gpb] = { - val gpbClass = implicitly[ClassTag[Gpb]].runtimeClass - val parser = gpbClass.getMethod("getDefaultInstance").invoke(gpbClass).asInstanceOf[Gpb].getParserForType.asInstanceOf[Parser[Gpb]] - - (bytes: Bytes) => - Try(parser.parseFrom(bytes.newInputStream())) - } -} diff --git a/extras-cactus/src/main/scala/com/avast/clients/rabbitmq/extras/format/GpbProductConverter.scala b/extras-cactus/src/main/scala/com/avast/clients/rabbitmq/extras/format/GpbProductConverter.scala deleted file mode 100644 index d8360d0e..00000000 --- a/extras-cactus/src/main/scala/com/avast/clients/rabbitmq/extras/format/GpbProductConverter.scala +++ /dev/null @@ -1,60 +0,0 @@ -package com.avast.clients.rabbitmq.extras.format - -import cats.syntax.either._ -import com.avast.bytes.Bytes -import com.avast.bytes.gpb.ByteStringBytes -import com.avast.cactus.CactusParser._ -import com.avast.cactus.Converter -import com.avast.clients.rabbitmq.ProductConverter -import com.avast.clients.rabbitmq.api.{ConversionException, MessageProperties} -import com.google.protobuf.MessageLite - -import scala.annotation.implicitNotFound -import scala.reflect.ClassTag -import scala.util.control.NonFatal - -@implicitNotFound( - "Could not generate GpbProductConverter from {$GpbMessage} to ${A}, try to import or define some\nMaybe you're missing some Cactus imports?") -trait GpbProductConverter[GpbMessage, A] extends ProductConverter[A] - -object GpbProductConverter { - def apply[GpbMessage <: MessageLite]: GpbProductConverterDerivator[GpbMessage] = new GpbProductConverterDerivator[GpbMessage] { - override def derive[A: GpbProductConverter[GpbMessage, ?]](): GpbProductConverter[GpbMessage, A] = - implicitly[GpbProductConverter[GpbMessage, A]] - } - - trait GpbProductConverterDerivator[GpbMessage <: MessageLite] { - def derive[A: GpbProductConverter[GpbMessage, ?]](): GpbProductConverter[GpbMessage, A] - } - - implicit def createGpbDeliveryConverter[GpbMessage <: MessageLite: Converter[A, ?]: ClassTag, A: ClassTag] - : GpbProductConverter[GpbMessage, A] = new GpbProductConverter[GpbMessage, A] { - override def convert(p: A): Either[ConversionException, Bytes] = { - try { - p.asGpb[GpbMessage] - .map(gpb => ByteStringBytes.wrap(gpb.toByteString)) - .leftMap { fs => - ConversionException { - s"Errors while converting ${implicitly[ClassTag[A]].runtimeClass.getName} to " + - s"GPB ${implicitly[ClassTag[GpbMessage]].runtimeClass.getName}: ${fs.toList - .mkString("[", ", ", "]")}" - } - } - } catch { - case NonFatal(e) => - Left { - ConversionException( - s"Could not convert ${implicitly[ClassTag[A]].runtimeClass.getName} to GPB ${implicitly[ClassTag[GpbMessage]].runtimeClass.getName}", - e - ) - } - } - } - - override def fillProperties(properties: MessageProperties): MessageProperties = { - properties.copy( - contentType = Some("application/protobuf") - ) - } - } -} diff --git a/extras-protobuf/build.gradle b/extras-protobuf/build.gradle index 1407ab10..6e7c018c 100644 --- a/extras-protobuf/build.gradle +++ b/extras-protobuf/build.gradle @@ -5,7 +5,7 @@ archivesBaseName = "rabbitmq-client-extras-protobuf_$scalaVersion" dependencies { api project(":core") - api "com.avast.bytes:bytes-gpb:${bytesVersion}" + api "com.avast.bytes:bytes-gpb:$bytesVersion" api "com.google.protobuf:protobuf-java:$protobufVersion" api "com.google.protobuf:protobuf-java-util:$protobufVersion" diff --git a/extras-protobuf/src/main/scala/com/avast/clients/rabbitmq/extras/format/ProtobufConsumer.scala b/extras-protobuf/src/main/scala/com/avast/clients/rabbitmq/extras/format/ProtobufConsumer.scala index 182d250e..66d8e672 100644 --- a/extras-protobuf/src/main/scala/com/avast/clients/rabbitmq/extras/format/ProtobufConsumer.scala +++ b/extras-protobuf/src/main/scala/com/avast/clients/rabbitmq/extras/format/ProtobufConsumer.scala @@ -1,16 +1,15 @@ package com.avast.clients.rabbitmq.extras.format +import cats.effect.Sync import com.avast.bytes.Bytes import com.avast.clients.rabbitmq.MultiFormatConsumer import com.avast.clients.rabbitmq.api.{Delivery, DeliveryResult} import com.google.protobuf.GeneratedMessageV3 -import scala.language.higherKinds import scala.reflect.ClassTag object ProtobufConsumer { - def create[F[_], A <: GeneratedMessageV3: ClassTag](action: Delivery[A] => F[DeliveryResult]): (Delivery[Bytes] => F[DeliveryResult]) = - MultiFormatConsumer.forType[F, A]( - ProtobufAsJsonDeliveryConverter.derive(), - ProtobufAsBinaryDeliveryConverter.derive())(action) + def create[F[_]: Sync, A <: GeneratedMessageV3: ClassTag]( + action: Delivery[A] => F[DeliveryResult]): (Delivery[Bytes] => F[DeliveryResult]) = + MultiFormatConsumer.forType[F, A](ProtobufAsJsonDeliveryConverter.derive(), ProtobufAsBinaryDeliveryConverter.derive())(action) } diff --git a/extras-protobuf/src/test/scala/com/avast/clients/rabbitmq/extras/format/ProtobufConsumerTest.scala b/extras-protobuf/src/test/scala/com/avast/clients/rabbitmq/extras/format/ProtobufConsumerTest.scala index 8dc6b736..8703cb9b 100644 --- a/extras-protobuf/src/test/scala/com/avast/clients/rabbitmq/extras/format/ProtobufConsumerTest.scala +++ b/extras-protobuf/src/test/scala/com/avast/clients/rabbitmq/extras/format/ProtobufConsumerTest.scala @@ -1,25 +1,32 @@ package com.avast.clients.rabbitmq.extras.format import com.avast.bytes.Bytes -import com.avast.clients.rabbitmq.api.{Delivery, DeliveryResult, MessageProperties} +import com.avast.clients.rabbitmq.api._ import com.avast.clients.rabbitmq.test.ExampleEvents import com.google.protobuf.util.JsonFormat import monix.eval.Task +import monix.execution.Scheduler.Implicits.global import org.junit.runner.RunWith import org.scalatest.{FlatSpec, Matchers} import org.scalatestplus.junit.JUnitRunner -import monix.execution.Scheduler.Implicits.global @RunWith(classOf[JUnitRunner]) class ProtobufConsumerTest extends FlatSpec with Matchers { private val event = ExampleEvents.FileSource.newBuilder().setFileId("fileId").setSource("source").build() it must "consume JSON and binary events" in { - val consumer = ProtobufConsumer.create[Task, ExampleEvents.FileSource] { case Delivery.Ok(actual, _, _) => - actual shouldBe event - Task.pure(DeliveryResult.Ack) + val consumer = ProtobufConsumer.create[Task, ExampleEvents.FileSource] { + case Delivery.Ok(actual, _, _) => + actual shouldBe event + Task.pure(DeliveryResult.Ack) + + case _ => fail("malformed") } - consumer(Delivery.Ok(Bytes.copyFrom(event.toByteArray), MessageProperties.empty.copy(contentType = Some("application/protobuf")), "")).runSyncUnsafe() shouldBe DeliveryResult.Ack - consumer(Delivery.Ok(Bytes.copyFromUtf8(JsonFormat.printer().print(event)), MessageProperties.empty.copy(contentType = Some("application/json")), "")).runSyncUnsafe() shouldBe DeliveryResult.Ack + consumer(Delivery.Ok(Bytes.copyFrom(event.toByteArray), MessageProperties.empty.copy(contentType = Some("application/protobuf")), "")) + .runSyncUnsafe() shouldBe DeliveryResult.Ack + consumer( + Delivery.Ok(Bytes.copyFromUtf8(JsonFormat.printer().print(event)), + MessageProperties.empty.copy(contentType = Some("application/json")), + "")).runSyncUnsafe() shouldBe DeliveryResult.Ack } } diff --git a/extras-scalapb/build.gradle b/extras-scalapb/build.gradle index a4d45c80..06627b44 100644 --- a/extras-scalapb/build.gradle +++ b/extras-scalapb/build.gradle @@ -7,6 +7,7 @@ dependencies { api "com.avast.bytes:bytes-gpb:${bytesVersion}" + api "com.google.protobuf:protobuf-java:$protobufVersion" api "com.thesamet.scalapb:scalapb-runtime_$scalaVersion:$scalapbVersion" api "com.thesamet.scalapb:scalapb-json4s_$scalaVersion:$scalapbJson4sVersion" } diff --git a/extras-scalapb/src/main/scala/com/avast/clients/rabbitmq/extras/format/ScalaPBConsumer.scala b/extras-scalapb/src/main/scala/com/avast/clients/rabbitmq/extras/format/ScalaPBConsumer.scala index 1a54be2b..e5e4d503 100644 --- a/extras-scalapb/src/main/scala/com/avast/clients/rabbitmq/extras/format/ScalaPBConsumer.scala +++ b/extras-scalapb/src/main/scala/com/avast/clients/rabbitmq/extras/format/ScalaPBConsumer.scala @@ -1,16 +1,15 @@ package com.avast.clients.rabbitmq.extras.format +import cats.effect.Sync import com.avast.bytes.Bytes import com.avast.clients.rabbitmq.MultiFormatConsumer import com.avast.clients.rabbitmq.api.{Delivery, DeliveryResult} import scalapb.{GeneratedMessage, GeneratedMessageCompanion} -import scala.language.higherKinds import scala.reflect.ClassTag object ScalaPBConsumer { - def create[F[_], A <: GeneratedMessage: GeneratedMessageCompanion: ClassTag](action: Delivery[A] => F[DeliveryResult]): (Delivery[Bytes] => F[DeliveryResult]) = - MultiFormatConsumer.forType[F, A]( - ScalaPBAsJsonDeliveryConverter.derive(), - ScalaPBAsBinaryDeliveryConverter.derive())(action) + def create[F[_]: Sync, A <: GeneratedMessage: GeneratedMessageCompanion: ClassTag]( + action: Delivery[A] => F[DeliveryResult]): (Delivery[Bytes] => F[DeliveryResult]) = + MultiFormatConsumer.forType[F, A](ScalaPBAsJsonDeliveryConverter.derive(), ScalaPBAsBinaryDeliveryConverter.derive())(action) } diff --git a/extras-scalapb/src/test/scala/com/avast/clients/rabbitmq/extras/format/ScalaPBConsumerTest.scala b/extras-scalapb/src/test/scala/com/avast/clients/rabbitmq/extras/format/ScalaPBConsumerTest.scala index 265d492c..bde038ec 100644 --- a/extras-scalapb/src/test/scala/com/avast/clients/rabbitmq/extras/format/ScalaPBConsumerTest.scala +++ b/extras-scalapb/src/test/scala/com/avast/clients/rabbitmq/extras/format/ScalaPBConsumerTest.scala @@ -1,13 +1,13 @@ package com.avast.clients.rabbitmq.extras.format import com.avast.bytes.Bytes -import com.avast.clients.rabbitmq.api.{Delivery, DeliveryResult, MessageProperties} +import com.avast.clients.rabbitmq.api._ import com.avast.clients.rabbitmq.test.FileSource import monix.eval.Task +import monix.execution.Scheduler.Implicits.global import org.junit.runner.RunWith import org.scalatest.{FlatSpec, Matchers} import org.scalatestplus.junit.JUnitRunner -import monix.execution.Scheduler.Implicits.global import scalapb.json4s.Printer @RunWith(classOf[JUnitRunner]) @@ -15,11 +15,17 @@ class ScalaPBConsumerTest extends FlatSpec with Matchers { private val event = FileSource("fileId", "source") it must "consume JSON and binary events" in { - val consumer = ScalaPBConsumer.create[Task, FileSource] { case Delivery.Ok(actual, _, _) => - actual shouldBe event - Task.pure(DeliveryResult.Ack) + val consumer = ScalaPBConsumer.create[Task, FileSource] { + case Delivery.Ok(actual, _, _) => + actual shouldBe event + Task.pure(DeliveryResult.Ack) + + case _ => fail("malformed") } - consumer(Delivery.Ok(Bytes.copyFrom(event.toByteArray), MessageProperties.empty.copy(contentType = Some("application/protobuf")), "")).runSyncUnsafe() shouldBe DeliveryResult.Ack - consumer(Delivery.Ok(Bytes.copyFromUtf8(new Printer().print(event)), MessageProperties.empty.copy(contentType = Some("application/json")), "")).runSyncUnsafe() shouldBe DeliveryResult.Ack + consumer(Delivery.Ok(Bytes.copyFrom(event.toByteArray), MessageProperties.empty.copy(contentType = Some("application/protobuf")), "")) + .runSyncUnsafe() shouldBe DeliveryResult.Ack + consumer( + Delivery.Ok(Bytes.copyFromUtf8(new Printer().print(event)), MessageProperties.empty.copy(contentType = Some("application/json")), "")) + .runSyncUnsafe() shouldBe DeliveryResult.Ack } } diff --git a/extras/README.md b/extras/README.md index f1fc2f7a..a5880d8c 100644 --- a/extras/README.md +++ b/extras/README.md @@ -12,66 +12,3 @@ The library is not able to recover from all failures so it provides [HealthCheck class](src/main/scala/com/avast/clients/rabbitmq/extras/HealthCheck.scala) that indicates if the application is OK or not - then it should be restarted. To use that class, simply pass the `rabbitExceptionHandler` field as listener when constructing the RabbitMQ classes. Then you can call `getStatus` method. - -## Poisoned message handler - -It's quite often use-case we want to republish failed message but want to avoid the message to be republishing forever. Wrap your handler ( -readAction) -with [PoisonedMessageHandler](src/main/scala/com/avast/clients/rabbitmq/extras/PoisonedMessageHandler.scala) to solve this issue. It will -count no. of attempts and won't let the message to be republished again and again (above the limit you set). -_Note: it works ONLY for `Republish` and not for `Retry`!_ - -### Basic consumer - -```scala -val newReadAction = PoisonedMessageHandler[Task, MyDeliveryType](3)(myReadAction) -``` - -You can even pretend lower number of attempts when you want to rise the republishing count (for some special message): - -```scala -Republish(Map(PoisonedMessageHandler.RepublishCountHeaderName -> 1.asInstanceOf[AnyRef])) -``` - -Note you can provide your custom poisoned-message handle action: - -```scala -val newReadAction = PoisonedMessageHandler.withCustomPoisonedAction[Task, MyDeliveryType](3)(myReadAction) { delivery => - logger.warn(s"Delivery $delivery is poisoned!") - Task.unit -} -``` - -After the execution of the poisoned-message action (no matter whether default or custom one), the delivery is REJECTed. - -### Streaming consumer - -The usage and capabilities of poisoned message handler is very similar as for [basic consumer](#basic-consumer). - -You can either use it to wrap your "handle action": - -```scala -val cons: RabbitMQStreamingConsumer[IO, String] = ??? - -val poisonedHandler: fs2.Pipe[IO, StreamedDelivery[IO, String], StreamedResult] = StreamingPoisonedMessageHandler[IO, String](3) { delivery => - // do your stuff - delivery.handle(DeliveryResult.Ack) -} - -val stream: fs2.Stream[IO, StreamedResult] = cons.deliveryStream.through(poisonedHandler) -``` - -or have it as a transparent `Pipe`: - -```scala -val cons: RabbitMQStreamingConsumer[IO, String] = ??? - -val poisonedHandler: fs2.Pipe[IO, StreamedDelivery[IO, String], StreamedDelivery[IO, String]] = StreamingPoisonedMessageHandler.piped[IO, String](3) - -val deliveryStream: fs2.Stream[IO, StreamedDelivery[IO, String]] = cons.deliveryStream.through(poisonedHandler) - -val handleStream: fs2.Stream[IO, StreamedResult] = deliveryStream.evalMap { delivery => - // do your stuff - delivery.handle(DeliveryResult.Ack) -} -``` diff --git a/extras/build.gradle b/extras/build.gradle index 77d646ec..cd4ea29e 100644 --- a/extras/build.gradle +++ b/extras/build.gradle @@ -2,4 +2,6 @@ archivesBaseName = "rabbitmq-client-extras_$scalaVersion" dependencies { api project(":core") + + testImplementation project(":pureconfig") } diff --git a/extras/src/main/scala/com/avast/clients/rabbitmq/extras/HealthCheck.scala b/extras/src/main/scala/com/avast/clients/rabbitmq/extras/HealthCheck.scala index 8decc607..6c8abdb8 100644 --- a/extras/src/main/scala/com/avast/clients/rabbitmq/extras/HealthCheck.scala +++ b/extras/src/main/scala/com/avast/clients/rabbitmq/extras/HealthCheck.scala @@ -1,9 +1,12 @@ package com.avast.clients.rabbitmq.extras +import cats.effect.Sync +import cats.effect.concurrent.Ref +import cats.implicits.catsSyntaxFlatMapOps import com.avast.clients.rabbitmq.extras.HealthCheckStatus.{Failure, Ok} +import com.avast.clients.rabbitmq.logging.ImplicitContextLogger import com.avast.clients.rabbitmq.{ChannelListener, ConnectionListener, ConsumerListener} -import com.rabbitmq.client.{Channel, Connection, Consumer, ShutdownSignalException} -import com.typesafe.scalalogging.StrictLogging +import com.rabbitmq.client._ sealed trait HealthCheckStatus @@ -21,51 +24,52 @@ object HealthCheckStatus { * To use this class, simply pass the `rabbitExceptionHandler` as listener when constructing the RabbitMQ classes. * Then you can call `getStatus` method. */ -class HealthCheck extends StrictLogging { +class HealthCheck[F[_]: Sync] { + private val F: Sync[F] = Sync[F] // scalastyle:ignore - // scalastyle:off - private var status: HealthCheckStatus = Ok - // scalastyle:on + private val logger = ImplicitContextLogger.createLogger[F, HealthCheck[F]] - def getStatus: HealthCheckStatus = status + private val ref = Ref.unsafe[F, HealthCheckStatus](Ok) - def fail(e: Throwable): Unit = { + def getStatus: F[HealthCheckStatus] = ref.get + + def fail(e: Throwable): F[Unit] = { val s = Failure(e.getClass.getName + ": " + e.getMessage, e) - logger.warn(s"Failing HealthCheck with '${s.msg}'", e) - status = s + ref.set(s) >> + logger.plainWarn(e)(s"Failing HealthCheck with '${s.msg}'") } - val rabbitExceptionHandler: ConnectionListener with ChannelListener with ConsumerListener = new ConnectionListener with ChannelListener - with ConsumerListener { - override def onRecoveryCompleted(connection: Connection): Unit = () + def rabbitExceptionHandler: ConnectionListener[F] with ChannelListener[F] with ConsumerListener[F] = + new ConnectionListener[F] with ChannelListener[F] with ConsumerListener[F] { + override def onRecoveryCompleted(connection: Connection): F[Unit] = F.unit - override def onRecoveryStarted(connection: Connection): Unit = () + override def onRecoveryStarted(connection: Connection): F[Unit] = F.unit - override def onRecoveryFailure(connection: Connection, failure: Throwable): Unit = fail(failure) + override def onRecoveryFailure(connection: Connection, failure: Throwable): F[Unit] = fail(failure) - override def onCreate(connection: Connection): Unit = () + override def onCreate(connection: Connection): F[Unit] = F.unit - override def onCreateFailure(failure: Throwable): Unit = fail(failure) + override def onCreateFailure(failure: Throwable): F[Unit] = fail(failure) - override def onRecoveryCompleted(channel: Channel): Unit = () + override def onRecoveryCompleted(channel: Channel): F[Unit] = F.unit - override def onRecoveryStarted(channel: Channel): Unit = () + override def onRecoveryStarted(channel: Channel): F[Unit] = F.unit - override def onRecoveryFailure(channel: Channel, failure: Throwable): Unit = fail(failure) + override def onRecoveryFailure(channel: Channel, failure: Throwable): F[Unit] = fail(failure) - override def onCreate(channel: Channel): Unit = () + override def onCreate(channel: Channel): F[Unit] = F.unit - override def onError(consumer: Consumer, consumerName: String, channel: Channel, failure: Throwable): Unit = fail(failure) + override def onError(consumer: Consumer, consumerName: String, channel: Channel, failure: Throwable): F[Unit] = fail(failure) - override def onShutdown(consumer: Consumer, - channel: Channel, - consumerName: String, - consumerTag: String, - cause: ShutdownSignalException): Unit = fail(cause) + override def onShutdown(consumer: Consumer, + channel: Channel, + consumerName: String, + consumerTag: String, + cause: ShutdownSignalException): F[Unit] = fail(cause) - override def onShutdown(connection: Connection, cause: ShutdownSignalException): Unit = fail(cause) + override def onShutdown(connection: Connection, cause: ShutdownSignalException): F[Unit] = fail(cause) - override def onShutdown(cause: ShutdownSignalException, channel: Channel): Unit = fail(cause) - } + override def onShutdown(cause: ShutdownSignalException, channel: Channel): F[Unit] = fail(cause) + } } diff --git a/extras/src/main/scala/com/avast/clients/rabbitmq/extras/PoisonedMessageHandler.scala b/extras/src/main/scala/com/avast/clients/rabbitmq/extras/PoisonedMessageHandler.scala deleted file mode 100644 index ba2349dc..00000000 --- a/extras/src/main/scala/com/avast/clients/rabbitmq/extras/PoisonedMessageHandler.scala +++ /dev/null @@ -1,87 +0,0 @@ -package com.avast.clients.rabbitmq.extras - -import cats.Applicative -import cats.effect.Sync -import cats.implicits._ -import com.avast.clients.rabbitmq.api.DeliveryResult.{Reject, Republish} -import com.avast.clients.rabbitmq.api.{Delivery, DeliveryResult} -import com.avast.clients.rabbitmq.extras.PoisonedMessageHandler.{defaultHandlePoisonedMessage, handleResult} -import com.typesafe.scalalogging.StrictLogging - -import scala.language.higherKinds -import scala.util.Try -import scala.util.control.NonFatal - -trait PoisonedMessageHandler[F[_], A] extends (Delivery[A] => F[DeliveryResult]) - -private[rabbitmq] class DefaultPoisonedMessageHandler[F[_]: Sync, A](maxAttempts: Int)(wrappedAction: Delivery[A] => F[DeliveryResult]) - extends PoisonedMessageHandler[F, A] - with StrictLogging { - - override def apply(delivery: Delivery[A]): F[DeliveryResult] = { - wrappedAction(delivery).flatMap(handleResult(delivery, maxAttempts, (d, _) => handlePoisonedMessage(d))) - } - - /** This method logs the delivery by default but can be overridden. The delivery is always REJECTed after this method execution. - */ - protected def handlePoisonedMessage(delivery: Delivery[A]): F[Unit] = defaultHandlePoisonedMessage(maxAttempts)(delivery) -} - -object PoisonedMessageHandler extends StrictLogging { - final val RepublishCountHeaderName: String = "X-Republish-Count" - - def apply[F[_]: Sync, A](maxAttempts: Int)(wrappedAction: Delivery[A] => F[DeliveryResult]): PoisonedMessageHandler[F, A] = { - new DefaultPoisonedMessageHandler[F, A](maxAttempts)(wrappedAction) - } - - /** - * @param customPoisonedAction The delivery is always REJECTed after this method execution. - */ - def withCustomPoisonedAction[F[_]: Sync, A](maxAttempts: Int)(wrappedAction: Delivery[A] => F[DeliveryResult])( - customPoisonedAction: Delivery[A] => F[Unit]): PoisonedMessageHandler[F, A] = { - new DefaultPoisonedMessageHandler[F, A](maxAttempts)(wrappedAction) { - override protected def handlePoisonedMessage(delivery: Delivery[A]): F[Unit] = customPoisonedAction(delivery) - } - } - - private[rabbitmq] def defaultHandlePoisonedMessage[F[_]: Sync, A](maxAttempts: Int)(delivery: Delivery[A]): F[Unit] = Sync[F].delay { - logger.warn(s"Message failures reached the limit $maxAttempts attempts, throwing away: $delivery") - } - - private[rabbitmq] def handleResult[F[_]: Sync, A]( - delivery: Delivery[A], - maxAttempts: Int, - handlePoisonedMessage: (Delivery[A], Int) => F[Unit])(r: DeliveryResult): F[DeliveryResult] = { - r match { - case Republish(newHeaders) => adjustDeliveryResult(delivery, maxAttempts, newHeaders, handlePoisonedMessage) - case r => Applicative[F].pure(r) // keep other results as they are - } - } - - private def adjustDeliveryResult[F[_]: Sync, A](delivery: Delivery[A], - maxAttempts: Int, - newHeaders: Map[String, AnyRef], - handlePoisonedMessage: (Delivery[A], Int) => F[Unit]): F[DeliveryResult] = { - // get current attempt no. from passed headers with fallback to original (incoming) headers - the fallback will most likely happen - // but we're giving the programmer chance to programmatically _pretend_ lower attempt number - val attempt = (delivery.properties.headers ++ newHeaders) - .get(RepublishCountHeaderName) - .flatMap(v => Try(v.toString.toInt).toOption) - .getOrElse(0) + 1 - - logger.debug(s"Attempt $attempt/$maxAttempts") - - if (attempt < maxAttempts) { - Applicative[F].pure(Republish(newHeaders + (RepublishCountHeaderName -> attempt.asInstanceOf[AnyRef]))) - } else { - handlePoisonedMessage(delivery, maxAttempts) - .recover { - case NonFatal(e) => - logger.warn("Custom poisoned message handler failed", e) - () - } - .map(_ => Reject) // always REJECT the message - } - } - -} diff --git a/extras/src/main/scala/com/avast/clients/rabbitmq/extras/StreamingPoisonedMessageHandler.scala b/extras/src/main/scala/com/avast/clients/rabbitmq/extras/StreamingPoisonedMessageHandler.scala deleted file mode 100644 index 9a8e86c2..00000000 --- a/extras/src/main/scala/com/avast/clients/rabbitmq/extras/StreamingPoisonedMessageHandler.scala +++ /dev/null @@ -1,60 +0,0 @@ -package com.avast.clients.rabbitmq.extras - -import cats.effect.Effect -import com.avast.clients.rabbitmq.api._ -import com.avast.clients.rabbitmq.extras.PoisonedMessageHandler.defaultHandlePoisonedMessage -import fs2.Pipe - -import scala.language.higherKinds - -object StreamingPoisonedMessageHandler { - import cats.syntax.all._ - - def apply[F[_]: Effect, A](maxAttempts: Int)( - wrappedAction: Delivery[A] => F[DeliveryResult]): Pipe[F, StreamedDelivery[F, A], StreamedResult] = { - StreamingPoisonedMessageHandler { - PoisonedMessageHandler[F, A](maxAttempts)(wrappedAction) - } - } - - def withCustomPoisonedAction[F[_]: Effect, A](maxAttempts: Int)(wrappedAction: Delivery[A] => F[DeliveryResult])( - customPoisonedAction: Delivery[A] => F[Unit]): Pipe[F, StreamedDelivery[F, A], StreamedResult] = { - StreamingPoisonedMessageHandler { - PoisonedMessageHandler.withCustomPoisonedAction[F, A](maxAttempts)(wrappedAction)(customPoisonedAction) - } - } - - def piped[F[_]: Effect, A](maxAttempts: Int): Pipe[F, StreamedDelivery[F, A], StreamedDelivery[F, A]] = { - _.map(createStreamedDelivery(_, maxAttempts, defaultHandlePoisonedMessage[F, A](maxAttempts))) - } - - def pipedWithCustomPoisonedAction[F[_]: Effect, A](maxAttempts: Int)( - customPoisonedAction: Delivery[A] => F[Unit]): Pipe[F, StreamedDelivery[F, A], StreamedDelivery[F, A]] = { - _.map(createStreamedDelivery(_, maxAttempts, customPoisonedAction)) - } - - private def createStreamedDelivery[F[_]: Effect, A](d: StreamedDelivery[F, A], - maxAttempts: Int, - customPoisonedAction: Delivery[A] => F[Unit]): StreamedDelivery[F, A] = { - new StreamedDelivery[F, A] { - override def delivery: Delivery[A] = d.delivery - - override def handle(result: DeliveryResult): F[StreamedResult] = { - PoisonedMessageHandler.handleResult(d.delivery, maxAttempts, handlePoisonedMessage)(result).flatMap(d.handle) - } - - private def handlePoisonedMessage(delivery: Delivery[A], ma: Int): F[Unit] = customPoisonedAction(delivery) - } - } - - private def apply[F[_]: Effect, A](pmh: PoisonedMessageHandler[F, A]): Pipe[F, StreamedDelivery[F, A], StreamedResult] = { - _.evalMap { d => - for { - realResult <- pmh.apply(d.delivery) - streamedResult <- d.handle(realResult) - } yield { - streamedResult - } - } - } -} diff --git a/extras/src/test/resources/application.conf b/extras/src/test/resources/application.conf new file mode 100644 index 00000000..270e2838 --- /dev/null +++ b/extras/src/test/resources/application.conf @@ -0,0 +1,258 @@ +myConfig { + hosts = [${rabbit.host}":"${rabbit.tcp.5672}] + virtualHost = "/" + + name = "TestConnection" + + credentials { + enabled = true + + username = "guest" + password = "guest" + } + + connectionTimeout = 5s + + republishStrategy { + type = CustomExchange + + exchangeName = "EXCHANGE5" + + exchangeDeclare = true + exchangeAutoBind = true + } + + consumers { + testing { + name = "Testing" + + queueName = "QUEUE1" + + processTimeout = 500 ms + + declare { + enabled = true + } + + bindings = [ + { + routingKeys = ["test"] + + exchange { + name = "EXCHANGE1" + + declare { + enabled = true + + type = "direct" + } + } + }, { + routingKeys = ["test2"] + + exchange { + name = "EXCHANGE2" + + declare { + enabled = true + + type = "direct" + } + } + } + ] + } + + testingPull { + name = "Testing" + + queueName = "QUEUE1" + + declare { + enabled = true + } + + bindings = [ + { + routingKeys = ["test"] + + exchange { + name = "EXCHANGE1" + + declare { + enabled = true + + type = "direct" + } + } + }, { + routingKeys = ["test2"] + + exchange { + name = "EXCHANGE2" + + declare { + enabled = true + + type = "direct" + } + } + } + ] + } + + testingStreaming { + name = "Testing" + + queueName = "QUEUE1" + + declare { + enabled = true + } + + prefetchCount = 500 + + bindings = [ + { + routingKeys = ["test"] + + exchange { + name = "EXCHANGE1" + + declare { + enabled = true + + type = "direct" + } + } + }, { + routingKeys = ["test2"] + + exchange { + name = "EXCHANGE2" + + declare { + enabled = true + + type = "direct" + } + } + } + ] + } + + testingStreamingWithTimeout { + name = "Testing" + + queueName = "QUEUE1" + + declare { + enabled = true + } + + prefetchCount = 100 + queueBufferSize = 2 + + processTimeout = 500 ms + + bindings = [ + { + routingKeys = ["test"] + + exchange { + name = "EXCHANGE1" + + declare { + enabled = true + + type = "direct" + } + } + }, { + routingKeys = ["test2"] + + exchange { + name = "EXCHANGE2" + + declare { + enabled = true + + type = "direct" + } + } + } + ] + } + } + + producers { + testing { + name = "Testing" + + exchange = "EXCHANGE1" + + declare { + enabled = true + + type = "direct" + } + } + + testing2 { + name = "Testing2" + + exchange = "EXCHANGE2" + + declare { + enabled = true + + type = "direct" + } + } + + testing3 { + name = "Testing3" + + exchange = "EXCHANGE4" + + declare { + enabled = true + + type = "direct" + } + } + } + + declarations { + foo { + declareExchange { + name = "EXCHANGE3" + type = "direct" + } + } + + bindExchange1 { + sourceExchangeName = "EXCHANGE4" + routingKeys = ["test"] + destExchangeName = "EXCHANGE3" + } + + bindExchange2 { + sourceExchangeName = "EXCHANGE4" + routingKeys = ["test"] + destExchangeName = "EXCHANGE1" + } + + declareQueue { + name = "QUEUE2" + + arguments = {"x-max-length": 10000} + } + + bindQueue { + queueName = "QUEUE2" + routingKeys = ["test"] + exchangeName = "EXCHANGE3" + } + } +} diff --git a/extras/src/test/resources/logback.xml b/extras/src/test/resources/logback.xml new file mode 100644 index 00000000..8cb1efb2 --- /dev/null +++ b/extras/src/test/resources/logback.xml @@ -0,0 +1,18 @@ + + + + System.out + + + %d{yyyy-MM-dd HH:mm:ss.SSS} %-5level [%thread] %-35logger{35}: %msg\(%file:%line\)%n%xThrowable{full} + + + + + + + + + + + diff --git a/extras/src/test/scala/com/avast/clients/rabbitmq/extras/DefaultPoisonedMessageHandlerTest.scala b/extras/src/test/scala/com/avast/clients/rabbitmq/extras/DefaultPoisonedMessageHandlerTest.scala deleted file mode 100644 index 244aafea..00000000 --- a/extras/src/test/scala/com/avast/clients/rabbitmq/extras/DefaultPoisonedMessageHandlerTest.scala +++ /dev/null @@ -1,70 +0,0 @@ -package com.avast.clients.rabbitmq.extras - -import com.avast.bytes.Bytes -import com.avast.clients.rabbitmq.api.DeliveryResult.Republish -import com.avast.clients.rabbitmq.api.{Delivery, DeliveryResult, MessageProperties} -import com.avast.clients.rabbitmq.extras.PoisonedMessageHandler._ -import monix.eval.Task -import monix.execution.Scheduler.Implicits.global -import org.scalatest.concurrent.ScalaFutures - -class DefaultPoisonedMessageHandlerTest extends TestBase with ScalaFutures { - - test("basic") { - - def readAction(d: Delivery[Bytes]): Task[DeliveryResult] = { - Task.now(Republish()) - } - - val handler = PoisonedMessageHandler[Task, Bytes](5)(readAction) - - def run(properties: MessageProperties): DeliveryResult = { - handler(Delivery(Bytes.empty(), properties, "")).runSyncUnsafe() - } - - val properties = (1 to 4).foldLeft(MessageProperties.empty) { - case (p, _) => - run(p) match { - case Republish(h) => MessageProperties(headers = h) - case _ => MessageProperties.empty - } - } - - // check it increases the header with count - assertResult(MessageProperties(headers = Map(RepublishCountHeaderName -> 4.asInstanceOf[AnyRef])))(properties) - - // check it will Ack the message on 5th attempt - assertResult(DeliveryResult.Reject)(run(properties)) - - } - - test("pretend lower no. of attempts") { - - def readAction(d: Delivery[Bytes]): Task[DeliveryResult] = { - Task.now(Republish()) - } - - val handler = PoisonedMessageHandler[Task, Bytes](5)(readAction) - - def run(properties: MessageProperties): DeliveryResult = { - handler(Delivery(Bytes.empty(), properties, "")).runSyncUnsafe() - } - - val properties = (1 to 4).foldLeft(MessageProperties.empty) { - case (p, i) => - run(p) match { - case Republish(h) => - if (i == 3) { - MessageProperties(headers = h + (RepublishCountHeaderName -> 1.asInstanceOf[AnyRef])) - } else { - MessageProperties(headers = h) - } - case _ => MessageProperties.empty - } - } - - // attempts no. will be only 2 because programmer said that ;-) - assertResult(MessageProperties(headers = Map(RepublishCountHeaderName -> 2.asInstanceOf[AnyRef])))(properties) - - } -} diff --git a/extras/src/test/scala/com/avast/clients/rabbitmq/extras/StreamingPoisonedMessageHandlerTest.scala b/extras/src/test/scala/com/avast/clients/rabbitmq/extras/StreamingPoisonedMessageHandlerTest.scala deleted file mode 100644 index aff8d91b..00000000 --- a/extras/src/test/scala/com/avast/clients/rabbitmq/extras/StreamingPoisonedMessageHandlerTest.scala +++ /dev/null @@ -1,88 +0,0 @@ -package com.avast.clients.rabbitmq.extras - -import com.avast.bytes.Bytes -import com.avast.clients.rabbitmq.api.DeliveryResult.Republish -import com.avast.clients.rabbitmq.api._ -import com.avast.clients.rabbitmq.extras.PoisonedMessageHandler._ -import monix.eval.Task -import monix.execution.Scheduler.Implicits.global -import org.scalatest.concurrent.ScalaFutures - -class StreamingPoisonedMessageHandlerTest extends TestBase with ScalaFutures { - - private def streamedDelivery[A](body: A, properties: MessageProperties) = new StreamedDelivery[Task, A] { - override val delivery: Delivery[A] = Delivery(body, properties, "") - - override def handle(result: DeliveryResult): Task[StreamedResult] = Task.now { - this.result = Some(result) - StreamedResult - } - - //noinspection ScalaStyle - var result: Option[DeliveryResult] = None - } - - test("basic") { - - def readAction(d: Delivery[Bytes]): Task[DeliveryResult] = { - Task.now(Republish()) - } - - val handler = StreamingPoisonedMessageHandler[Task, Bytes](5)(readAction) - - def run(properties: MessageProperties): DeliveryResult = { - //noinspection ScalaStyle - val delivery: StreamedDelivery[Task, Bytes] { var result: Option[DeliveryResult] } = streamedDelivery(Bytes.empty(), properties) - - handler(fs2.Stream.eval(Task.now(delivery))).compile.last.runSyncUnsafe().getOrElse(fail()) - delivery.result.getOrElse(fail()) - } - - val properties = (1 to 4).foldLeft(MessageProperties.empty) { - case (p, _) => - run(p) match { - case Republish(h) => MessageProperties(headers = h) - case _ => MessageProperties.empty - } - } - - // check it increases the header with count - assertResult(MessageProperties(headers = Map(RepublishCountHeaderName -> 4.asInstanceOf[AnyRef])))(properties) - - // check it will Ack the message on 5th attempt - assertResult(DeliveryResult.Reject)(run(properties)) - } - - test("pretend lower no. of attempts") { - - def readAction(d: Delivery[Bytes]): Task[DeliveryResult] = { - Task.now(Republish()) - } - - val handler = StreamingPoisonedMessageHandler[Task, Bytes](5)(readAction) - - def run(properties: MessageProperties): DeliveryResult = { - //noinspection ScalaStyle - val delivery: StreamedDelivery[Task, Bytes] { var result: Option[DeliveryResult] } = streamedDelivery(Bytes.empty(), properties) - - handler(fs2.Stream.eval(Task.now(delivery))).compile.last.runSyncUnsafe().getOrElse(fail()) - delivery.result.getOrElse(fail()) - } - - val properties = (1 to 4).foldLeft(MessageProperties.empty) { - case (p, i) => - run(p) match { - case Republish(h) => - if (i == 3) { - MessageProperties(headers = h + (RepublishCountHeaderName -> 1.asInstanceOf[AnyRef])) - } else { - MessageProperties(headers = h) - } - case _ => MessageProperties.empty - } - } - - // attempts no. will be only 2 because programmer said that ;-) - assertResult(MessageProperties(headers = Map(RepublishCountHeaderName -> 2.asInstanceOf[AnyRef])))(properties) - } -} diff --git a/extras/src/test/scala/com/avast/clients/rabbitmq/extras/TestBase.scala b/extras/src/test/scala/com/avast/clients/rabbitmq/extras/TestBase.scala index b5b25044..739d80e6 100644 --- a/extras/src/test/scala/com/avast/clients/rabbitmq/extras/TestBase.scala +++ b/extras/src/test/scala/com/avast/clients/rabbitmq/extras/TestBase.scala @@ -1,23 +1,55 @@ package com.avast.clients.rabbitmq.extras +import cats.effect.{Blocker, IO, Resource} import com.typesafe.scalalogging.StrictLogging import monix.eval.Task +import monix.execution.Scheduler import monix.execution.Scheduler.Implicits.global +import monix.execution.schedulers.CanBlock import org.junit.runner.RunWith import org.scalatest.FunSuite import org.scalatest.concurrent.Eventually import org.scalatestplus.junit.JUnitRunner import org.scalatestplus.mockito.MockitoSugar -import scala.concurrent.duration.Duration +import java.util.concurrent.Executors +import scala.concurrent.TimeoutException +import scala.concurrent.duration._ import scala.language.implicitConversions @RunWith(classOf[JUnitRunner]) class TestBase extends FunSuite with MockitoSugar with Eventually with StrictLogging { protected implicit def taskToOps[A](t: Task[A]): TaskOps[A] = new TaskOps[A](t) + protected implicit def resourceToIOOps[A](t: Resource[IO, A]): ResourceIOOps[A] = new ResourceIOOps[A](t) + protected implicit def resourceToTaskOps[A](t: Resource[Task, A]): ResourceTaskOps[A] = new ResourceTaskOps[A](t) +} + +object TestBase { + val testBlockingScheduler: Scheduler = Scheduler.io(name = "test-blocking") + val testBlocker: Blocker = Blocker.liftExecutorService(Executors.newCachedThreadPool()) } class TaskOps[A](t: Task[A]) { def await(duration: Duration): A = t.runSyncUnsafe(duration) - def await: A = await(Duration.Inf) + def await: A = await(10.seconds) +} + +class ResourceIOOps[A](val r: Resource[IO, A]) extends AnyVal { + def withResource[B](f: A => B): B = { + withResource(f, Duration.Inf) + } + + def withResource[B](f: A => B, timeout: Duration): B = { + r.use(a => IO(f).map(_(a))).unsafeRunTimed(timeout).getOrElse(throw new TimeoutException("Timeout has occurred")) + } +} + +class ResourceTaskOps[A](val r: Resource[Task, A]) extends AnyVal { + def withResource[B](f: A => B): B = { + withResource(f, Duration.Inf) + } + + def withResource[B](f: A => B, timeout: Duration): B = { + r.use(a => Task.delay(f(a))).runSyncUnsafe(timeout)(TestBase.testBlockingScheduler, CanBlock.permit) + } } diff --git a/extras/src/test/scala/com/avast/clients/rabbitmq/extras/TestHelper.scala b/extras/src/test/scala/com/avast/clients/rabbitmq/extras/TestHelper.scala new file mode 100644 index 00000000..6889851a --- /dev/null +++ b/extras/src/test/scala/com/avast/clients/rabbitmq/extras/TestHelper.scala @@ -0,0 +1,120 @@ +package com.avast.clients.rabbitmq.extras + +import io.circe.Decoder +import io.circe.generic.auto._ +import io.circe.parser._ +import scalaj.http.Http + +import java.net.URLEncoder +import java.nio.charset.StandardCharsets +import scala.util.Success + +//noinspection ScalaStyle +class TestHelper(host: String, port: Int) { + + final val RootUri = s"http://$host:$port/api" + + object queue { + + def getMessagesCount(queueName: String): Int = { + val encoded = URLEncoder.encode(queueName, StandardCharsets.UTF_8.toString) + + val resp = Http(s"$RootUri/queues/%2f/$encoded").auth("guest", "guest").asString.body + + println("MESSAGES COUNT:") + println(resp) + + decode[QueueProperties](resp) match { + case Right(p) => p.messages + case r => throw new IllegalStateException(s"Wrong response $r") + } + } + + def getPublishedCount(queueName: String): Int = { + val encoded = URLEncoder.encode(queueName, StandardCharsets.UTF_8.toString) + + val resp = Http(s"$RootUri/queues/%2f/$encoded").auth("guest", "guest").asString.body + + println("PUBLISHED COUNT:") + println(resp) + + decode[QueueProperties](resp) match { + case Right(p) => + p.message_stats.map(_.publish).getOrElse { + Console.err.println(s"Could not extract published_count for $queueName!") + 0 + } + case r => throw new IllegalStateException(s"Wrong response $r") + } + } + + def getArguments(queueName: String): Map[String, Any] = { + val encoded = URLEncoder.encode(queueName, StandardCharsets.UTF_8.toString) + + val resp = Http(s"$RootUri/queues/%2f/$encoded").auth("guest", "guest").asString.body + + decode[QueueProperties](resp) match { + case Right(p) => + p.arguments.getOrElse { + Console.err.println(s"Could not extract arguments for $queueName!") + Map.empty + } + case r => throw new IllegalStateException(s"Wrong response $r") + } + } + + def delete(queueName: String, ifEmpty: Boolean = false, ifUnused: Boolean = false): Unit = { + println(s"Deleting queue: $queueName") + val encoded = URLEncoder.encode(queueName, StandardCharsets.UTF_8.toString) + + val resp = + Http(s"$RootUri/queues/%2f/$encoded?if-empty=$ifEmpty&if-unused=$ifUnused").method("DELETE").auth("guest", "guest").asString + + val content = resp.body + + val message = s"Delete queue response ${resp.statusLine}: '$content'" + println(message) + + if (!resp.isSuccess && resp.code != 404) { + throw new IllegalStateException(message) + } + } + + private implicit val anyDecoder: Decoder[Any] = Decoder.decodeJson.emapTry { json => + // we are in test, it's enough to support just Int and String here + if (json.isNumber) { + json.as[Int].toTry + } else if (json.isString) { + json.as[String].toTry + } else Success(null) + } + + private case class QueueProperties(messages: Int, message_stats: Option[MessagesStats], arguments: Option[Map[String, Any]]) + private case class MessagesStats(publish: Int, ack: Option[Int]) + } + + object exchange { + + def getPublishedCount(exchangeName: String): Int = { + val encoded = URLEncoder.encode(exchangeName, StandardCharsets.UTF_8.toString) + + val resp = Http(s"$RootUri/exchanges/%2f/$encoded").auth("guest", "guest").asString.body + + println("PUBLISHED COUNT:") + println(resp) + + decode[ExchangeProperties](resp) match { + case Right(p) => + p.message_stats.map(_.publish_in).getOrElse { + Console.err.println(s"Could not extract published_count for $exchangeName!") + 0 + } + case r => throw new IllegalStateException(s"Wrong response $r") + } + } + + private case class ExchangeProperties(message_stats: Option[MessagesStats]) + private case class MessagesStats(publish_in: Int, publish_out: Int) + } + +} diff --git a/extras/src/test/scala/com/avast/clients/rabbitmq/extras/TestMonitor.scala b/extras/src/test/scala/com/avast/clients/rabbitmq/extras/TestMonitor.scala new file mode 100644 index 00000000..975639fc --- /dev/null +++ b/extras/src/test/scala/com/avast/clients/rabbitmq/extras/TestMonitor.scala @@ -0,0 +1,39 @@ +package com.avast.clients.rabbitmq.extras + +import cats.effect.Sync +import com.avast.metrics.api.Naming +import com.avast.metrics.dropwizard.MetricsMonitor +import com.avast.metrics.scalaapi +import com.avast.metrics.scalaeffectapi._ +import com.codahale.metrics.MetricRegistry + +import scala.jdk.CollectionConverters._ + +class TestMonitor[F[_]: Sync] extends Monitor[F] { + + private val metricsRegistry = new MetricRegistry + val underlying: Monitor[F] = Monitor.wrapJava[F](new MetricsMonitor(metricsRegistry, Naming.defaultNaming())) + + override def named(name: String): Monitor[F] = underlying.named(name) + override def named(name1: String, name2: String, restOfNames: String*): Monitor[F] = underlying.named(name1, name2, restOfNames: _*) + override def meter(name: String): Meter[F] = underlying.meter(name) + override def counter(name: String): Counter[F] = underlying.counter(name) + override def histogram(name: String): Histogram[F] = underlying.histogram(name) + override def timer(name: String): Timer[F] = underlying.timer(name) + override def timerPair(name: String): TimerPair[F] = underlying.timerPair(name) + override def gauge: GaugeFactory[F] = underlying.gauge + override def asPlainScala: scalaapi.Monitor = underlying.asPlainScala + override def asJava: com.avast.metrics.api.Monitor = underlying.asJava + override def getName: String = underlying.getName + + override def close(): Unit = underlying.close() + + val registry: Registry = new Registry(metricsRegistry) +} + +class Registry(registry: MetricRegistry) { + def meterCount(path: String): Long = registry.getMeters.asScala(path.replace('.', '/')).getCount + def timerCount(path: String): Long = registry.getTimers.asScala(path.replace('.', '/')).getCount + def timerPairCountSuccesses(path: String): Long = registry.getTimers.asScala(path.replace('.', '/') + "Successes").getCount + def timerPairCountFailures(path: String): Long = registry.getTimers.asScala(path.replace('.', '/') + "Failures").getCount +} diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index b1159fc5..d7e66b5c 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,5 +1,5 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-7.4-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-7.4.1-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists diff --git a/pureconfig/README.md b/pureconfig/README.md index ad8f7051..96691e4c 100644 --- a/pureconfig/README.md +++ b/pureconfig/README.md @@ -183,7 +183,7 @@ import cats.effect.Resource import com.avast.bytes.Bytes import com.avast.clients.rabbitmq._ import com.avast.clients.rabbitmq.api._ -import com.avast.metrics.scalaapi.Monitor +import com.avast.metrics.scalaeffectapi.Monitor import com.typesafe.config.ConfigFactory import monix.eval._ import monix.execution.Scheduler @@ -193,7 +193,7 @@ import com.avast.clients.rabbitmq.pureconfig._ // <-- this is needed in order to val config = ConfigFactory.load().getConfig("myConfig") implicit val sch: Scheduler = ??? // required by Task -val monitor: Monitor = ??? +val monitor: Monitor[Task] = ??? val blockingExecutor: ExecutorService = ??? diff --git a/pureconfig/build.gradle b/pureconfig/build.gradle index 864c93c8..b6012ea5 100644 --- a/pureconfig/build.gradle +++ b/pureconfig/build.gradle @@ -3,6 +3,6 @@ archivesBaseName = "rabbitmq-client-pureconfig_$scalaVersion" dependencies { api project(":core") - api 'com.typesafe:config:1.4.2' - api "com.github.pureconfig:pureconfig_${scalaVersion}:0.15.0" + api "com.typesafe:config:$typesafeConfigVersion" + api "com.github.pureconfig:pureconfig_${scalaVersion}:$pureconfigVersion" } diff --git a/pureconfig/src/main/scala/com/avast/clients/rabbitmq/pureconfig/ConfigRabbitMQConnection.scala b/pureconfig/src/main/scala/com/avast/clients/rabbitmq/pureconfig/ConfigRabbitMQConnection.scala index 3a67cb37..b2abc6ab 100644 --- a/pureconfig/src/main/scala/com/avast/clients/rabbitmq/pureconfig/ConfigRabbitMQConnection.scala +++ b/pureconfig/src/main/scala/com/avast/clients/rabbitmq/pureconfig/ConfigRabbitMQConnection.scala @@ -22,9 +22,8 @@ import com.avast.clients.rabbitmq.{ ServerChannel, StreamingConsumerConfig } -import com.avast.metrics.scalaapi.Monitor +import com.avast.metrics.scalaeffectapi.Monitor -import scala.language.higherKinds import scala.reflect.ClassTag trait ConfigRabbitMQConnection[F[_]] { @@ -34,32 +33,32 @@ trait ConfigRabbitMQConnection[F[_]] { /** Creates new instance of consumer, using the TypeSafe configuration passed to the factory and consumer name. * * @param configName Name of configuration of the consumer. - * @param monitor Monitor for metrics. + * @param monitor Monitor[F] for metrics. * @param readAction Action executed for each delivered message. You should never return a failed F. */ - def newConsumer[A: DeliveryConverter](configName: String, monitor: Monitor)( + def newConsumer[A: DeliveryConverter](configName: String, monitor: Monitor[F])( readAction: DeliveryReadAction[F, A]): Resource[F, RabbitMQConsumer[F]] /** Creates new instance of producer, using the TypeSafe configuration passed to the factory and producer name. * * @param configName Name of configuration of the producer. - * @param monitor Monitor for metrics. + * @param monitor Monitor[F] for metrics. */ - def newProducer[A: ProductConverter](configName: String, monitor: Monitor): Resource[F, RabbitMQProducer[F, A]] + def newProducer[A: ProductConverter](configName: String, monitor: Monitor[F]): Resource[F, RabbitMQProducer[F, A]] /** Creates new instance of pull consumer, using the TypeSafe configuration passed to the factory and consumer name. * * @param configName Name of configuration of the consumer. - * @param monitor Monitor for metrics. + * @param monitor Monitor[F] for metrics. */ - def newPullConsumer[A: DeliveryConverter](configName: String, monitor: Monitor): Resource[F, RabbitMQPullConsumer[F, A]] + def newPullConsumer[A: DeliveryConverter](configName: String, monitor: Monitor[F]): Resource[F, RabbitMQPullConsumer[F, A]] /** Creates new instance of streaming consumer, using the TypeSafe configuration passed to the factory and consumer name. * * @param configName Name of configuration of the consumer. - * @param monitor Monitor for metrics. + * @param monitor Monitor[F] for metrics. */ - def newStreamingConsumer[A: DeliveryConverter](configName: String, monitor: Monitor): Resource[F, RabbitMQStreamingConsumer[F, A]] + def newStreamingConsumer[A: DeliveryConverter](configName: String, monitor: Monitor[F]): Resource[F, RabbitMQStreamingConsumer[F, A]] /** * Declares and additional exchange, using the TypeSafe configuration passed to the factory and config name. @@ -90,9 +89,9 @@ trait ConfigRabbitMQConnection[F[_]] { */ def withChannel[A](f: ServerChannel => F[A]): F[A] - def connectionListener: ConnectionListener - def channelListener: ChannelListener - def consumerListener: ConsumerListener + def connectionListener: ConnectionListener[F] + def channelListener: ChannelListener[F] + def consumerListener: ConsumerListener[F] } class DefaultConfigRabbitMQConnection[F[_]](config: ConfigCursor, wrapped: RabbitMQConnection[F])( @@ -112,28 +111,28 @@ class DefaultConfigRabbitMQConnection[F[_]](config: ConfigCursor, wrapped: Rabbi override def withChannel[A](f: ServerChannel => F[A]): F[A] = wrapped.withChannel(f) - override val connectionListener: ConnectionListener = wrapped.connectionListener + override val connectionListener: ConnectionListener[F] = wrapped.connectionListener - override val channelListener: ChannelListener = wrapped.channelListener + override val channelListener: ChannelListener[F] = wrapped.channelListener - override val consumerListener: ConsumerListener = wrapped.consumerListener + override val consumerListener: ConsumerListener[F] = wrapped.consumerListener - override def newConsumer[A: DeliveryConverter](configName: String, monitor: Monitor)( + override def newConsumer[A: DeliveryConverter](configName: String, monitor: Monitor[F])( readAction: DeliveryReadAction[F, A]): Resource[F, RabbitMQConsumer[F]] = { - Resource.liftF(loadConfig[ConsumerConfig](ConsumersRootName, configName)) >>= (wrapped.newConsumer(_, monitor)(readAction)) + Resource.eval(loadConfig[ConsumerConfig](ConsumersRootName, configName)) >>= (wrapped.newConsumer(_, monitor)(readAction)) } - override def newProducer[A: ProductConverter](configName: String, monitor: Monitor): Resource[F, RabbitMQProducer[F, A]] = { - Resource.liftF(loadConfig[ProducerConfig](ProducersRootName, configName)) >>= (wrapped.newProducer(_, monitor)) + override def newProducer[A: ProductConverter](configName: String, monitor: Monitor[F]): Resource[F, RabbitMQProducer[F, A]] = { + Resource.eval(loadConfig[ProducerConfig](ProducersRootName, configName)) >>= (wrapped.newProducer(_, monitor)) } - override def newPullConsumer[A: DeliveryConverter](configName: String, monitor: Monitor): Resource[F, RabbitMQPullConsumer[F, A]] = { - Resource.liftF(loadConfig[PullConsumerConfig](ConsumersRootName, configName)) >>= (wrapped.newPullConsumer(_, monitor)) + override def newPullConsumer[A: DeliveryConverter](configName: String, monitor: Monitor[F]): Resource[F, RabbitMQPullConsumer[F, A]] = { + Resource.eval(loadConfig[PullConsumerConfig](ConsumersRootName, configName)) >>= (wrapped.newPullConsumer(_, monitor)) } override def newStreamingConsumer[A: DeliveryConverter](configName: String, - monitor: Monitor): Resource[F, RabbitMQStreamingConsumer[F, A]] = { - Resource.liftF(loadConfig[StreamingConsumerConfig](ConsumersRootName, configName)) >>= (wrapped.newStreamingConsumer(_, monitor)) + monitor: Monitor[F]): Resource[F, RabbitMQStreamingConsumer[F, A]] = { + Resource.eval(loadConfig[StreamingConsumerConfig](ConsumersRootName, configName)) >>= (wrapped.newStreamingConsumer(_, monitor)) } override def declareExchange(configName: String): F[Unit] = { diff --git a/pureconfig/src/main/scala/com/avast/clients/rabbitmq/pureconfig/PureconfigImplicits.scala b/pureconfig/src/main/scala/com/avast/clients/rabbitmq/pureconfig/PureconfigImplicits.scala index cf7ccbd4..c5bfff61 100644 --- a/pureconfig/src/main/scala/com/avast/clients/rabbitmq/pureconfig/PureconfigImplicits.scala +++ b/pureconfig/src/main/scala/com/avast/clients/rabbitmq/pureconfig/PureconfigImplicits.scala @@ -103,6 +103,11 @@ class PureconfigImplicits(implicit namingConvention: NamingConvention = CamelCas case unknownName => Failure(new IllegalArgumentException(s"Unknown addressResolverType: $unknownName")) } + implicit val deadQueueProducerConfigReader: ConfigReader[DeadQueueProducerConfig] = deriveReader + implicit val poisonedMessageHandlingConfigReader: ConfigReader[PoisonedMessageHandlingConfig] = PoisonedMessageHandlingConfigReader + implicit val loggingPoisonedMessageHandlingConfigReader: ConfigReader[LoggingPoisonedMessageHandling] = deriveReader + implicit val deadQueuePoisonedMessageHandlingConfigReader: ConfigReader[DeadQueuePoisonedMessageHandling] = deriveReader + implicit val deliveryResultReader: ConfigReader[DeliveryResult] = ConfigReader.stringConfigReader.map { _.toLowerCase match { case "ack" => Ack @@ -204,6 +209,22 @@ class PureconfigImplicits(implicit namingConvention: NamingConvention = CamelCas } } } + + private object PoisonedMessageHandlingConfigReader extends ConfigReader[PoisonedMessageHandlingConfig] { + override def from(cur: ConfigCursor): Result[PoisonedMessageHandlingConfig] = { + withType(cur) { (config, `type`) => + `type`.toLowerCase match { + case "noop" => Right(NoOpPoisonedMessageHandling) + case "logging" => ConfigReader[LoggingPoisonedMessageHandling].from(config.root()) + case "deadqueue" => ConfigReader[DeadQueuePoisonedMessageHandling].from(config.root()) + case t => + cur.fluent.at("type").cursor.flatMap { cursor => // because of correct location + Left(ConfigReaderFailures(CannotParse(s"Unknown poisoned message handler type: $t", cursor.origin))) + } + } + } + } + } } object PureconfigImplicits { diff --git a/pureconfig/src/main/scala/com/avast/clients/rabbitmq/pureconfig/pureconfig.scala b/pureconfig/src/main/scala/com/avast/clients/rabbitmq/pureconfig/pureconfig.scala index 6372e5ba..51ee4977 100644 --- a/pureconfig/src/main/scala/com/avast/clients/rabbitmq/pureconfig/pureconfig.scala +++ b/pureconfig/src/main/scala/com/avast/clients/rabbitmq/pureconfig/pureconfig.scala @@ -1,15 +1,13 @@ package com.avast.clients.rabbitmq -import java.util.concurrent.ExecutorService - import _root_.pureconfig._ import _root_.pureconfig.error.ConfigReaderException import cats.effect.{ConcurrentEffect, ContextShift, Resource, Sync, Timer} -import com.avast.clients.rabbitmq.RabbitMQConnection.DefaultListeners import com.typesafe.config.Config -import javax.net.ssl.SSLContext -import scala.language.{higherKinds, implicitConversions} +import java.util.concurrent.ExecutorService +import javax.net.ssl.SSLContext +import scala.language.implicitConversions package object pureconfig { @@ -18,13 +16,12 @@ package object pureconfig { private[pureconfig] val DeclarationsRootName = "declarations" implicit class RabbitMQConnectionOps(val f: RabbitMQConnection.type) extends AnyVal { - def fromConfig[F[_]: ConcurrentEffect: Timer: ContextShift]( - config: Config, - blockingExecutor: ExecutorService, - sslContext: Option[SSLContext] = None, - connectionListener: ConnectionListener = DefaultListeners.DefaultConnectionListener, - channelListener: ChannelListener = DefaultListeners.DefaultChannelListener, - consumerListener: ConsumerListener = DefaultListeners.DefaultConsumerListener)( + def fromConfig[F[_]: ConcurrentEffect: Timer: ContextShift](config: Config, + blockingExecutor: ExecutorService, + sslContext: Option[SSLContext] = None, + connectionListener: Option[ConnectionListener[F]] = None, + channelListener: Option[ChannelListener[F]] = None, + consumerListener: Option[ConsumerListener[F]] = None)( implicit connectionConfigReader: ConfigReader[RabbitMQConnectionConfig] = implicits.CamelCase.connectionConfigReader, consumerConfigReader: ConfigReader[ConsumerConfig] = implicits.CamelCase.consumerConfigReader, producerConfigReader: ConfigReader[ProducerConfig] = implicits.CamelCase.producerConfigReader, @@ -38,7 +35,7 @@ package object pureconfig { val configSource = ConfigSource.fromConfig(config) for { - connectionConfig <- Resource.liftF(Sync[F].delay { configSource.loadOrThrow[RabbitMQConnectionConfig] }) + connectionConfig <- Resource.eval(Sync[F].delay { configSource.loadOrThrow[RabbitMQConnectionConfig] }) connection <- RabbitMQConnection.make(connectionConfig, blockingExecutor, sslContext, diff --git a/settings.gradle b/settings.gradle index c98c2363..036cd63c 100644 --- a/settings.gradle +++ b/settings.gradle @@ -5,6 +5,5 @@ include 'core' include 'pureconfig' include 'extras' include 'extras-circe' -include 'extras-cactus' include 'extras-protobuf' include 'extras-scalapb' From ab68694752ba297224bcbe1207df03c8a5171fca Mon Sep 17 00:00:00 2001 From: Jenda Kolena Date: Wed, 23 Feb 2022 17:56:00 +0100 Subject: [PATCH 2/3] Use rabbitmq:3.9 Docker image in tests For some unknown reason, longer timeouts are needed in the tests while calling REST API... --- .../avast/clients/rabbitmq/BasicLiveTest.scala | 16 ++++++++++------ docker-compose.override.yml | 10 ++++++---- docker-compose.yml | 6 ++++-- 3 files changed, 20 insertions(+), 12 deletions(-) diff --git a/core/src/test/scala/com/avast/clients/rabbitmq/BasicLiveTest.scala b/core/src/test/scala/com/avast/clients/rabbitmq/BasicLiveTest.scala index 591d1c86..d1ef399e 100644 --- a/core/src/test/scala/com/avast/clients/rabbitmq/BasicLiveTest.scala +++ b/core/src/test/scala/com/avast/clients/rabbitmq/BasicLiveTest.scala @@ -179,7 +179,9 @@ class BasicLiveTest extends TestBase with ScalaFutures { } assertResult(true, latch.getCount)(latch.await(1000, TimeUnit.MILLISECONDS)) - assertResult(0)(testHelper.queue.getMessagesCount(queueName1)) + eventually { + assertResult(0)(testHelper.queue.getMessagesCount(queueName1)) + } } } } @@ -207,7 +209,7 @@ class BasicLiveTest extends TestBase with ScalaFutures { sender.send("test", Bytes.copyFromUtf8(Random.nextString(10))).await } - eventually(timeout(Span(3, Seconds)), interval(Span(0.5, Seconds))) { + eventually(timeout(Span(5, Seconds)), interval(Span(0.5, Seconds))) { assert(cnt.get() >= 40) assert(testHelper.queue.getMessagesCount(queueName1) <= 20) } @@ -280,16 +282,18 @@ class BasicLiveTest extends TestBase with ScalaFutures { _ <- rabbitConnection.bindQueue("bindQueue") } yield ()).unsafeRunSync() - assertResult(Map("x-max-length" -> 1000000))(testHelper.queue.getArguments(queueName2)) + eventually { + assertResult(Map("x-max-length" -> 1000000))(testHelper.queue.getArguments(queueName2)) - assertResult(0)(testHelper.queue.getMessagesCount(queueName1)) - assertResult(0)(testHelper.queue.getMessagesCount(queueName2)) + assertResult(0)(testHelper.queue.getMessagesCount(queueName1)) + assertResult(0)(testHelper.queue.getMessagesCount(queueName2)) + } for (_ <- 1 to 10) { sender.send("test", Bytes.copyFromUtf8(Random.nextString(10))).await } - eventually(timeout(Span(2, Seconds)), interval(Span(200, Milliseconds))) { + eventually(timeout(Span(5, Seconds)), interval(Span(200, Milliseconds))) { assertResult(true)(latch.await(500, TimeUnit.MILLISECONDS)) assertResult(0)(testHelper.queue.getMessagesCount(queueName1)) diff --git a/docker-compose.override.yml b/docker-compose.override.yml index 1c817fb9..2bcf4b84 100644 --- a/docker-compose.override.yml +++ b/docker-compose.override.yml @@ -1,4 +1,6 @@ -rabbit: - ports: - - "5672:5672" - - "15672:15672" +version: "3" +services: + rabbit: + ports: + - "5672:5672" + - "15672:15672" diff --git a/docker-compose.yml b/docker-compose.yml index 118cf9e7..a6f8637c 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,2 +1,4 @@ -rabbit: - image: rabbitmq:3.6.1-management \ No newline at end of file +version: "3" +services: + rabbit: + image: rabbitmq:3.9.13-management From bb5788c020101141549b932b826b0c941185b8f4 Mon Sep 17 00:00:00 2001 From: Jan Kolena Date: Sat, 8 Jan 2022 11:55:32 +0100 Subject: [PATCH 3/3] Limit number of channels (taking over #57/#58) --- .../rabbitmq/DefaultRabbitMQConnection.scala | 4 ++- .../clients/rabbitmq/RabbitMQConnection.scala | 1 + .../clients/rabbitmq/configuration.scala | 1 + core/src/test/resources/application.conf | 1 + .../clients/rabbitmq/BasicLiveTest.scala | 34 ++++++++++++++++++- pureconfig/README.md | 1 + 6 files changed, 40 insertions(+), 2 deletions(-) diff --git a/core/src/main/scala/com/avast/clients/rabbitmq/DefaultRabbitMQConnection.scala b/core/src/main/scala/com/avast/clients/rabbitmq/DefaultRabbitMQConnection.scala index 548d54df..71feb1f1 100644 --- a/core/src/main/scala/com/avast/clients/rabbitmq/DefaultRabbitMQConnection.scala +++ b/core/src/main/scala/com/avast/clients/rabbitmq/DefaultRabbitMQConnection.scala @@ -37,8 +37,10 @@ class DefaultRabbitMQConnection[F[_]] private (connection: ServerConnection, channelListener.onCreate(channel).unsafeStartAndForget() channel + case null => throw new IllegalStateException(s"New channel could not be created, maybe the max. count limit was reached?") + // since the connection is `Recoverable`, the channel should always be `Recoverable` too (based on docs), so the exception will never be thrown - case _ => throw new IllegalStateException(s"Required Recoverable Channel") + case n => throw new IllegalStateException(s"Required Recoverable Channel") } } catch { case NonFatal(e) => diff --git a/core/src/main/scala/com/avast/clients/rabbitmq/RabbitMQConnection.scala b/core/src/main/scala/com/avast/clients/rabbitmq/RabbitMQConnection.scala index 8d6d4537..f4baaefa 100644 --- a/core/src/main/scala/com/avast/clients/rabbitmq/RabbitMQConnection.scala +++ b/core/src/main/scala/com/avast/clients/rabbitmq/RabbitMQConnection.scala @@ -198,6 +198,7 @@ object RabbitMQConnection { factory.setAutomaticRecoveryEnabled(networkRecovery.enabled) factory.setExceptionHandler(exceptionHandler) factory.setRequestedHeartbeat(heartBeatInterval.toSeconds.toInt) + factory.setRequestedChannelMax(channelMax) if (networkRecovery.enabled) factory.setRecoveryDelayHandler(networkRecovery.handler) diff --git a/core/src/main/scala/com/avast/clients/rabbitmq/configuration.scala b/core/src/main/scala/com/avast/clients/rabbitmq/configuration.scala index eab68980..907ed698 100644 --- a/core/src/main/scala/com/avast/clients/rabbitmq/configuration.scala +++ b/core/src/main/scala/com/avast/clients/rabbitmq/configuration.scala @@ -17,6 +17,7 @@ final case class RabbitMQConnectionConfig(name: String, heartBeatInterval: FiniteDuration = 30.seconds, topologyRecovery: Boolean = true, networkRecovery: NetworkRecoveryConfig = NetworkRecoveryConfig(), + channelMax: Int = 2047, credentials: CredentialsConfig, republishStrategy: RepublishStrategyConfig = RepublishStrategyConfig.DefaultExchange) diff --git a/core/src/test/resources/application.conf b/core/src/test/resources/application.conf index 1897c310..279ac8e1 100644 --- a/core/src/test/resources/application.conf +++ b/core/src/test/resources/application.conf @@ -12,6 +12,7 @@ myConfig { } connectionTimeout = 5s + channelMax = 20 republishStrategy { type = CustomExchange diff --git a/core/src/test/scala/com/avast/clients/rabbitmq/BasicLiveTest.scala b/core/src/test/scala/com/avast/clients/rabbitmq/BasicLiveTest.scala index d1ef399e..f98e5331 100644 --- a/core/src/test/scala/com/avast/clients/rabbitmq/BasicLiveTest.scala +++ b/core/src/test/scala/com/avast/clients/rabbitmq/BasicLiveTest.scala @@ -1,6 +1,6 @@ package com.avast.clients.rabbitmq -import cats.effect.{ContextShift, IO, Timer} +import cats.effect.{ContextShift, IO, Resource, Timer} import com.avast.bytes.Bytes import com.avast.clients.rabbitmq.api.DeliveryResult._ import com.avast.clients.rabbitmq.api._ @@ -465,4 +465,36 @@ class BasicLiveTest extends TestBase with ScalaFutures { } } } + + test("limits number of channels") { + val c = createConfig() + import c._ + + RabbitMQConnection.fromConfig[Task](config, ex).withResource { rabbitConnection => + def createChannels(count: Int): Resource[Task, List[ServerChannel]] = + (1 to count) + .map(_ => rabbitConnection.newChannel()) + .foldLeft(Resource.pure[Task, List[ServerChannel]](List.empty)) { + case (prev, ch) => + ch.flatMap { nch => + prev.map { nch +: _ } + } + } + + // max is set to 20... + createChannels(19).withResource { channels => + assertResult(19)(channels.size) + } + + try { + createChannels(21).withResource { _ => + fail("should have failed") + } + fail("should have failed") + } catch { + case e: IllegalStateException => + assertResult("New channel could not be created, maybe the max. count limit was reached?")(e.getMessage) + } + } + } } diff --git a/pureconfig/README.md b/pureconfig/README.md index 96691e4c..69de3da8 100644 --- a/pureconfig/README.md +++ b/pureconfig/README.md @@ -82,6 +82,7 @@ myConfig { } connectionTimeout = 5s + channelMax = 200 // default 2047 networkRecovery { enabled = true // enabled by default