diff --git a/astacus/coordinator/plugins/clickhouse/engines.py b/astacus/coordinator/plugins/clickhouse/engines.py index 5a84d93d..6b546a12 100644 --- a/astacus/coordinator/plugins/clickhouse/engines.py +++ b/astacus/coordinator/plugins/clickhouse/engines.py @@ -11,4 +11,3 @@ class TableEngine(enum.Enum): PostgreSQL = "PostgreSQL" S3 = "S3" KeeperMap = "KeeperMap" - AzureBlobStorage = "AzureBlobStorage" diff --git a/tests/integration/coordinator/plugins/clickhouse/test_plugin.py b/tests/integration/coordinator/plugins/clickhouse/test_plugin.py index a8147f2c..9341621d 100644 --- a/tests/integration/coordinator/plugins/clickhouse/test_plugin.py +++ b/tests/integration/coordinator/plugins/clickhouse/test_plugin.py @@ -251,12 +251,6 @@ async def setup_cluster_content(clients: Sequence[HttpClickHouseClient], clickho ) if await is_engine_available(clients[0], TableEngine.S3): await clients[0].execute(b"CREATE TABLE default.s3 (a Int) ENGINE = S3('http://bucket.s3.amazonaws.com/key.json')") - - if await is_engine_available(clients[0], TableEngine.AzureBlobStorage): - await clients[0].execute( - b"CREATE TABLE default.azureblobstorage (a Int) ENGINE = AzureBlobStorage('DefaultEndpointsProtocol=', 'test_container', 'test_table', 'CSV')" - ) - # add a function table await clients[0].execute(b"CREATE TABLE default.from_function_table AS numbers(3)") # add a table with data in object storage @@ -584,7 +578,6 @@ async def test_cleanup_does_not_break_object_storage_disk_files( ("default.postgresql", TableEngine.PostgreSQL), ("default.mysql", TableEngine.MySQL), ("default.s3", TableEngine.S3), - ("default.azureblobstorage", TableEngine.AzureBlobStorage), ], ) async def test_restores_integration_tables(