Skip to content

Commit

Permalink
added integration tests for purge decommissioned
Browse files Browse the repository at this point in the history
  • Loading branch information
RachanKaur committed Aug 10, 2023
1 parent 296fb0a commit 4f6fe3e
Show file tree
Hide file tree
Showing 4 changed files with 111 additions and 7 deletions.
8 changes: 6 additions & 2 deletions medusa/purge_decommissioned.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,13 +23,13 @@ def main(config):
live_nodes = get_live_nodes(cassandra)

# Get decommissioned nodes
decommissioned_nodes = all_nodes - live_nodes
decommissioned_nodes = get_decommissioned_nodes(all_nodes, live_nodes)

for node in decommissioned_nodes:
logging.info('Decommissioned node backups to purge: {}'.format(node))
backups = storage.list_node_backups(fqdn=node)
(nb_objects_purged, total_purged_size, total_objects_within_grace) \
= purge_backups(storage, backups, config.storage.backup_grace_period_in_days, config.storage.fqdn)
= purge_backups(storage, backups, config.storage.backup_grace_period_in_days, node)

logging.debug('Emitting metrics')
tags = ['medusa-decommissioned-node-backup', 'purge-error', 'PURGE-ERROR']
Expand All @@ -53,3 +53,7 @@ def get_live_nodes(cassandra):
for host in cassandra.tokenmap.items():
nodes.add(host)
return nodes


def get_decommissioned_nodes(all_nodes, live_nodes):
return all_nodes.difference(live_nodes)
50 changes: 47 additions & 3 deletions tests/integration/features/integration_tests.feature
Original file line number Diff line number Diff line change
Expand Up @@ -964,7 +964,51 @@ Feature: Integration tests
Then I can verify the backup named "first_backup" with md5 checks "enabled" successfully


@local
Examples: Local storage
@local
Examples: Local storage
| storage | client encryption |
| local | with_client_encryption |

@26
Scenario Outline: Test purge of decommissioned nodes
Given I have a fresh ccm cluster "<client encryption>" running named "scenario26"
Given I am using "<storage>" as storage provider in ccm cluster "<client encryption>"
When node "127.0.0.2" fakes a complete backup named "backup1" on "2019-04-15 12:12:00"
Then I can see the backup named "backup1" when I list the backups
When I create the "test" table in keyspace "medusa"
When I perform a backup in "differential" mode of the node named "backup2" with md5 checks "disabled"
Then checking the list of decommissioned nodes returns "127.0.0.2"
When I run a purge on decommissioned nodes
Then I cannot see the backup named "backup1" when I list the backups
Then I can see the backup named "backup2" when I list the backups


@local
Examples: Local storage
| storage | client encryption |
| local | with_client_encryption |

@s3
Examples: S3 storage
| storage | client encryption |
| s3_us_west_oregon | without_client_encryption |

@gcs
Examples: Google Cloud Storage
| storage | client encryption |
| local | with_client_encryption |
| google_storage | without_client_encryption |

@azure
Examples: Azure Blob Storage
| storage | client encryption |
| azure_blobs | without_client_encryption |

@ibm
Examples: IBM Cloud Object Storage
| storage | client encryption |
| ibm_storage | without_client_encryption |

@minio
Examples: MinIO storage
| storage | client encryption |
| minio | without_client_encryption |
50 changes: 50 additions & 0 deletions tests/integration/features/steps/integration_steps.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@
import medusa.index
import medusa.listing
import medusa.purge
import medusa.purge_decommissioned
import medusa.report_latest
import medusa.restore_node
import medusa.service.grpc.client
Expand All @@ -69,6 +70,7 @@
from medusa.monitoring import LocalMonitoring
from medusa.service.grpc import medusa_pb2
from medusa.storage import Storage
from medusa.cassandra_utils import Cassandra

storage_prefix = "{}-{}".format(datetime.datetime.now().isoformat(), str(uuid.uuid4()))
os.chdir("..")
Expand Down Expand Up @@ -1323,6 +1325,54 @@ def _i_modify_a_statistics_db_file(context, table, keyspace):
file.write('Adding some additional characters')


@then(r'checking the list of decommissioned nodes returns "{expected_node}"')
def _checking_list_of_decommissioned_nodes(context, expected_node):
# Get all nodes having backups
storage = Storage(config=context.medusa_config.storage)
blobs = storage.list_root_blobs()
all_nodes = medusa.purge_decommissioned.get_all_nodes(blobs)

# Get live nodes
cassandra = Cassandra(config=context.medusa_config.cassandra)
live_nodes = medusa.purge_decommissioned.get_live_nodes(cassandra)

# Get decommissioned nodes
decommissioned_nodes = medusa.purge_decommissioned.get_decommissioned_nodes(all_nodes, live_nodes)

assert expected_node in decommissioned_nodes


@when(r'I run a purge on decommissioned nodes')
def _run_purge_on_decommissioned_nodes(context):
try:
logging.info('Starting decommissioned purge')
storage = Storage(config=context.medusa_config.storage)
cassandra = Cassandra(config=context.medusa_config.cassandra)

# Get all nodes having backups
blobs = storage.list_root_blobs()
all_nodes = medusa.purge_decommissioned.get_all_nodes(blobs)

# Get live nodes
live_nodes = medusa.purge_decommissioned.get_live_nodes(cassandra)

# Get decommissioned nodes
decommissioned_nodes = medusa.purge_decommissioned.get_decommissioned_nodes(all_nodes, live_nodes)

for node in decommissioned_nodes:
logging.info('Decommissioned node backups to purge: {}'.format(node))
backups = storage.list_node_backups(fqdn=node)
(nb_objects_purged, total_purged_size, total_objects_within_grace) \
= medusa.purge.purge_backups(storage,
backups,
context.medusa_config.storage.backup_grace_period_in_days,
node)

except Exception as e:
logging.error('This error happened during the purge of decommissioned nodes: {}'.format(str(e)))
raise e


def connect_cassandra(is_client_encryption_enable, tls_version=PROTOCOL_TLS):
connected = False
attempt = 0
Expand Down
10 changes: 8 additions & 2 deletions tests/purge_decommissioned_test.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
import unittest
from unittest.mock import patch
from medusa.purge_decommissioned import get_all_nodes, get_live_nodes
from medusa.purge_decommissioned import get_all_nodes, get_decommissioned_nodes, get_live_nodes


class TestGetNodes(unittest.TestCase):

def test_get_all_nodes(self):
blobs = [
'index/',
'node1/',
'node2/',
'index/',
'node3/',
]
nodes = get_all_nodes(blobs)
Expand All @@ -22,6 +22,12 @@ def test_get_live_nodes(self, mock_cassandra):
nodes = get_live_nodes(mock_cassandra_instance)
self.assertEqual(nodes, {"node1", "node3"})

def test_get_decommissioned_nodes(self):
all_nodes = {'node1', 'node2', 'node3', 'node4'}
live_nodes = {'node1', 'node3'}
decommissioned_nodes = get_decommissioned_nodes(all_nodes, live_nodes)
self.assertEqual(decommissioned_nodes, {'node2', 'node4'})


if __name__ == '__main__':
unittest.main()

0 comments on commit 4f6fe3e

Please sign in to comment.