Skip to content

Commit

Permalink
Merge pull request #13 from d3rky/feature/LITE-17357-shared-rabbit-co…
Browse files Browse the repository at this point in the history
…nnection-on-produce

LITE-17357 Add shared connection to RabbitMQ transport for producer
  • Loading branch information
d3rky authored Mar 1, 2021
2 parents 10679d5 + 2fdd3dc commit 1b73302
Show file tree
Hide file tree
Showing 8 changed files with 67 additions and 28 deletions.
5 changes: 5 additions & 0 deletions dj_cqrs/transport/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,3 +30,8 @@ def produce(payload):
def consume(*args, **kwargs):
"""Receive data from master model."""
raise NotImplementedError

@staticmethod
def clean_connection(*args, **kwargs):
"""Clean transport connection. Here you can close all connections that you have"""
raise NotImplementedError
5 changes: 5 additions & 0 deletions dj_cqrs/transport/kombu.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,11 @@ def get_consumers(self, Consumer, channel):
class KombuTransport(LoggingMixin, BaseTransport):
CONSUMER_RETRY_TIMEOUT = 5

@classmethod
def clean_connection(cls):
"""Nothing to do here"""
pass

@classmethod
def consume(cls):
queue_name, prefetch_count = cls._get_consumer_settings()
Expand Down
52 changes: 33 additions & 19 deletions dj_cqrs/transport/rabbit_mq.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
from socket import gaierror
from urllib.parse import unquote, urlparse


import ujson
from django.conf import settings
from pika import exceptions, BasicProperties, BlockingConnection, ConnectionParameters, credentials
Expand All @@ -24,6 +23,16 @@
class RabbitMQTransport(LoggingMixin, BaseTransport):
CONSUMER_RETRY_TIMEOUT = 5

_producer_connection = None
_producer_channel = None

@classmethod
def clean_connection(cls):
if cls._producer_connection and not cls._producer_connection.is_closed:
cls._producer_connection.close()
cls._producer_connection = None
cls._producer_channel = None

@classmethod
def consume(cls):
consumer_rabbit_settings = cls._get_consumer_settings()
Expand All @@ -43,28 +52,29 @@ def consume(cls):
logger.error('AMQP connection error. Reconnecting...')
time.sleep(cls.CONSUMER_RETRY_TIMEOUT)
finally:
if connection:
if connection and not connection.is_closed:
connection.close()

@classmethod
def produce(cls, payload):
# TODO: try to produce and reconnect several times, now leave as before
# if cannot publish message - drop it and try to reconnect on next event
rmq_settings = cls._get_common_settings()
exchange = rmq_settings[-1]

connection = None
try:
# Decided not to create context-manager to stay within the class
connection, channel = cls._get_producer_rmq_objects(*rmq_settings)
_, channel = cls._get_producer_rmq_objects(*rmq_settings)

cls._produce_message(channel, exchange, payload)
cls.log_produced(payload)
except (exceptions.AMQPError, exceptions.ChannelError, exceptions.ReentrancyError):
logger.error("CQRS couldn't be published: pk = {} ({}).".format(
payload.pk, payload.cqrs_id,
))
finally:
if connection:
connection.close()

# in case of any error - close connection and try to reconnect
cls.clean_connection()

@classmethod
def _consume_message(cls, ch, method, properties, body):
Expand Down Expand Up @@ -114,7 +124,7 @@ def _produce_message(cls, channel, exchange, payload):
properties=BasicProperties(
content_type='text/plain',
delivery_mode=2, # make message persistent
expiration='60000', # milliseconds
expiration=settings.CQRS.get('MESSAGE_TTL', '60000'), # milliseconds
)
)

Expand Down Expand Up @@ -159,18 +169,22 @@ def _get_consumer_rmq_objects(cls, host, port, creds, exchange, queue_name, pref

@classmethod
def _get_producer_rmq_objects(cls, host, port, creds, exchange):
connection = BlockingConnection(
ConnectionParameters(
host=host,
port=port,
credentials=creds,
blocked_connection_timeout=10,
),
)
channel = connection.channel()
cls._declare_exchange(channel, exchange)
if cls._producer_connection is None:
connection = BlockingConnection(
ConnectionParameters(
host=host,
port=port,
credentials=creds,
blocked_connection_timeout=10,
),
)
channel = connection.channel()
cls._declare_exchange(channel, exchange)

return connection, channel
cls._producer_connection = connection
cls._producer_channel = channel

return cls._producer_connection, cls._producer_channel

@staticmethod
def _declare_exchange(channel, exchange):
Expand Down
9 changes: 9 additions & 0 deletions integration_tests/tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@

from integration_tests.tests.utils import REPLICA_TABLES

from dj_cqrs.transport import current_transport


@pytest.fixture
def replica_cursor():
Expand All @@ -21,3 +23,10 @@ def replica_cursor():

cursor.close()
connection.close()


@pytest.fixture
def clean_rabbit_transport_connection():
current_transport.clean_connection()

yield
18 changes: 12 additions & 6 deletions integration_tests/tests/test_asynchronous_consuming.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,8 @@


@pytest.mark.django_db(transaction=True)
def test_both_consumers_consume(replica_cursor):
def test_both_consumers_consume(settings, replica_cursor, clean_rabbit_transport_connection):
settings.CQRS['MESSAGE_TTL'] = '4000'
assert count_replica_rows(replica_cursor, REPLICA_BASIC_TABLE) == 0
assert count_replica_rows(replica_cursor, REPLICA_EVENT_TABLE) == 0

Expand All @@ -23,7 +24,7 @@ def test_both_consumers_consume(replica_cursor):
])
BasicFieldsModel.call_post_bulk_create(master_instances)

transport_delay(3)
transport_delay(5)
assert count_replica_rows(replica_cursor, REPLICA_BASIC_TABLE) == 9
assert count_replica_rows(replica_cursor, REPLICA_EVENT_TABLE) == 9

Expand All @@ -32,13 +33,18 @@ def test_both_consumers_consume(replica_cursor):


@pytest.mark.django_db(transaction=True)
def test_de_duplication(replica_cursor):
def test_de_duplication(settings, replica_cursor, clean_rabbit_transport_connection):
settings.CQRS['MESSAGE_TTL'] = '4000'
assert count_replica_rows(replica_cursor, REPLICA_BASIC_TABLE) == 0
assert count_replica_rows(replica_cursor, REPLICA_EVENT_TABLE) == 0

master_instance = BasicFieldsModel.objects.create(int_field=1, char_field='text')
BasicFieldsModel.call_post_bulk_create([master_instance for _ in range(9)])
master_instance = BasicFieldsModel.objects.create(int_field=21, char_field='text')
BasicFieldsModel.call_post_bulk_create([master_instance])
transport_delay(5)

transport_delay(3)
replica_cursor.execute('TRUNCATE TABLE {};'.format(REPLICA_EVENT_TABLE))
BasicFieldsModel.call_post_bulk_create([master_instance for _ in range(10)])

transport_delay(5)
assert count_replica_rows(replica_cursor, REPLICA_BASIC_TABLE) == 1
assert count_replica_rows(replica_cursor, REPLICA_EVENT_TABLE) == 10
2 changes: 1 addition & 1 deletion integration_tests/tests/test_bulk_operations.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@


@pytest.mark.django_db(transaction=True)
def test_flow(replica_cursor):
def test_flow(replica_cursor, clean_rabbit_transport_connection):
assert count_replica_rows(replica_cursor, REPLICA_BASIC_TABLE) == 0

# Create
Expand Down
2 changes: 1 addition & 1 deletion integration_tests/tests/test_single_basic_instance.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@


@pytest.mark.django_db(transaction=True)
def test_flow(replica_cursor):
def test_flow(replica_cursor, clean_rabbit_transport_connection):
assert count_replica_rows(replica_cursor, REPLICA_BASIC_TABLE) == 0

# Create
Expand Down
2 changes: 1 addition & 1 deletion integration_tests/tests/test_sync_to_a_certain_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@


@pytest.mark.django_db(transaction=True)
def test_flow(replica_cursor, mocker):
def test_flow(replica_cursor, mocker, clean_rabbit_transport_connection):
assert count_replica_rows(replica_cursor, REPLICA_BASIC_TABLE) == 0

# Create
Expand Down

0 comments on commit 1b73302

Please sign in to comment.