From e9668f8f12f0e16944dd7f8ce5d4e20529e0c19f Mon Sep 17 00:00:00 2001 From: nk1506 Date: Wed, 25 Oct 2023 16:25:47 +0530 Subject: [PATCH] Hive: Refactor TestHiveCatalog tests to use the core CatalogTests --- build.gradle | 1 + .../apache/iceberg/catalog/CatalogTests.java | 8 + .../org/apache/iceberg/hive/HiveCatalog.java | 14 +- .../iceberg/hive/HiveTableOperations.java | 2 +- .../iceberg/hive/HiveMetastoreSetup.java | 73 ++++ .../apache/iceberg/hive/TestHiveCatalog.java | 311 +++++++++--------- 6 files changed, 253 insertions(+), 156 deletions(-) create mode 100644 hive-metastore/src/test/java/org/apache/iceberg/hive/HiveMetastoreSetup.java diff --git a/build.gradle b/build.gradle index 2698b00d384d..fd2665855bef 100644 --- a/build.gradle +++ b/build.gradle @@ -717,6 +717,7 @@ project(':iceberg-hive-metastore') { } testImplementation project(path: ':iceberg-api', configuration: 'testArtifacts') + testImplementation project(path: ':iceberg-core', configuration: 'testArtifacts') } } diff --git a/core/src/test/java/org/apache/iceberg/catalog/CatalogTests.java b/core/src/test/java/org/apache/iceberg/catalog/CatalogTests.java index 835115ad4d9c..28a2a8b9c86a 100644 --- a/core/src/test/java/org/apache/iceberg/catalog/CatalogTests.java +++ b/core/src/test/java/org/apache/iceberg/catalog/CatalogTests.java @@ -163,6 +163,10 @@ protected boolean supportsNamesWithSlashes() { return true; } + protected boolean supportsNamesWithDot() { + return true; + } + @Test public void testCreateNamespace() { C catalog = catalog(); @@ -469,6 +473,8 @@ public void testNamespaceWithSlash() { @Test public void testNamespaceWithDot() { + Assumptions.assumeTrue(supportsNamesWithDot()); + C catalog = catalog(); Namespace withDot = Namespace.of("new.db"); @@ -546,6 +552,8 @@ public void testTableNameWithSlash() { @Test public void testTableNameWithDot() { + Assumptions.assumeTrue(supportsNamesWithDot()); + C catalog = catalog(); TableIdentifier ident = TableIdentifier.of("ns", "ta.ble"); diff --git a/hive-metastore/src/main/java/org/apache/iceberg/hive/HiveCatalog.java b/hive-metastore/src/main/java/org/apache/iceberg/hive/HiveCatalog.java index 22f5b0b5cf37..33954c7f792c 100644 --- a/hive-metastore/src/main/java/org/apache/iceberg/hive/HiveCatalog.java +++ b/hive-metastore/src/main/java/org/apache/iceberg/hive/HiveCatalog.java @@ -261,6 +261,15 @@ public void renameTable(TableIdentifier from, TableIdentifier originalTo) { } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException("Interrupted in call to rename", e); + } catch (RuntimeException e) { + // in case of table already exists, + // Hive rename operation throws exception as + // java.lang.RuntimeException:InvalidOperationException(message:new table <> already exists) + if (e.getMessage().contains(String.format("new table %s already exists)", to))) { + throw new org.apache.iceberg.exceptions.AlreadyExistsException( + "Table already exists: %s", to); + } + throw new RuntimeException("Failed to rename " + from + " to " + to, e); } } @@ -288,7 +297,7 @@ public void createNamespace(Namespace namespace, Map meta) { } catch (AlreadyExistsException e) { throw new org.apache.iceberg.exceptions.AlreadyExistsException( - e, "Namespace '%s' already exists!", namespace); + e, "Namespace already exists: %s", namespace); } catch (TException e) { throw new RuntimeException( @@ -500,6 +509,9 @@ protected String defaultWarehouseLocation(TableIdentifier tableIdentifier) { return String.format("%s/%s", databaseData.getLocationUri(), tableIdentifier.name()); } + } catch (NoSuchObjectException e) { + throw new NoSuchNamespaceException( + e, "Namespace does not exist: %s", tableIdentifier.namespace().levels()[0]); } catch (TException e) { throw new RuntimeException( String.format("Metastore operation failed for %s", tableIdentifier), e); diff --git a/hive-metastore/src/main/java/org/apache/iceberg/hive/HiveTableOperations.java b/hive-metastore/src/main/java/org/apache/iceberg/hive/HiveTableOperations.java index f4b96822d42c..34ef1a796106 100644 --- a/hive-metastore/src/main/java/org/apache/iceberg/hive/HiveTableOperations.java +++ b/hive-metastore/src/main/java/org/apache/iceberg/hive/HiveTableOperations.java @@ -217,7 +217,7 @@ protected void doCommit(TableMetadata base, TableMetadata metadata) { String baseMetadataLocation = base != null ? base.metadataFileLocation() : null; if (!Objects.equals(baseMetadataLocation, metadataLocation)) { throw new CommitFailedException( - "Base metadata location '%s' is not same as the current table metadata location '%s' for %s.%s", + "Cannot commit, Base metadata location '%s' is not same as the current table metadata location '%s' for %s.%s", baseMetadataLocation, metadataLocation, database, tableName); } diff --git a/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveMetastoreSetup.java b/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveMetastoreSetup.java new file mode 100644 index 000000000000..3d806a0a5925 --- /dev/null +++ b/hive-metastore/src/test/java/org/apache/iceberg/hive/HiveMetastoreSetup.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg.hive; + +import java.util.Map; +import java.util.concurrent.TimeUnit; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; +import org.apache.iceberg.CatalogProperties; +import org.apache.iceberg.CatalogUtil; +import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap; + +/** + * Setup HiveMetastore. It does not create any database. All the tests should create a database + * accordingly. It should replace the existing setUp class {@link HiveMetastoreTest} + */ +class HiveMetastoreSetup { + + protected HiveMetaStoreClient metastoreClient; + protected TestHiveMetastore metastore; + protected HiveConf hiveConf; + HiveCatalog catalog; + + HiveMetastoreSetup(Map hiveConfOverride) throws Exception { + metastore = new TestHiveMetastore(); + HiveConf hiveConfWithOverrides = new HiveConf(TestHiveMetastore.class); + if (hiveConfOverride != null) { + for (Map.Entry kv : hiveConfOverride.entrySet()) { + hiveConfWithOverrides.set(kv.getKey(), kv.getValue()); + } + } + + metastore.start(hiveConfWithOverrides); + hiveConf = metastore.hiveConf(); + metastoreClient = new HiveMetaStoreClient(hiveConfWithOverrides); + catalog = + (HiveCatalog) + CatalogUtil.loadCatalog( + HiveCatalog.class.getName(), + CatalogUtil.ICEBERG_CATALOG_TYPE_HIVE, + ImmutableMap.of( + CatalogProperties.CLIENT_POOL_CACHE_EVICTION_INTERVAL_MS, + String.valueOf(TimeUnit.SECONDS.toMillis(10))), + hiveConfWithOverrides); + } + + void stopMetastore() throws Exception { + try { + metastoreClient.close(); + metastore.stop(); + } finally { + catalog = null; + metastoreClient = null; + metastore = null; + } + } +} diff --git a/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCatalog.java b/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCatalog.java index 7ff2bd78a665..fea216c0b420 100644 --- a/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCatalog.java +++ b/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCatalog.java @@ -37,6 +37,7 @@ import java.io.IOException; import java.nio.file.Path; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Set; @@ -68,11 +69,11 @@ import org.apache.iceberg.Transaction; import org.apache.iceberg.UpdateSchema; import org.apache.iceberg.catalog.Catalog; +import org.apache.iceberg.catalog.CatalogTests; import org.apache.iceberg.catalog.Namespace; import org.apache.iceberg.catalog.TableIdentifier; import org.apache.iceberg.exceptions.AlreadyExistsException; import org.apache.iceberg.exceptions.NamespaceNotEmptyException; -import org.apache.iceberg.exceptions.NoSuchNamespaceException; import org.apache.iceberg.exceptions.NoSuchTableException; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet; @@ -82,12 +83,20 @@ import org.apache.iceberg.types.Types; import org.apache.iceberg.util.JsonUtil; import org.apache.thrift.TException; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; -public class TestHiveCatalog extends HiveMetastoreTest { +/** + * Run all the tests from abstract of {@link CatalogTests}. Also, a few specific tests for HIVE too. + * There could be some duplicated tests that are already being covered with {@link CatalogTests} + * //TODO: remove duplicate tests with {@link CatalogTests}.Also use the DB/TABLE/SCHEMA from {@link + * CatalogTests} + */ +public class TestHiveCatalog extends CatalogTests { private static ImmutableMap meta = ImmutableMap.of( "owner", "apache", @@ -96,6 +105,43 @@ public class TestHiveCatalog extends HiveMetastoreTest { @TempDir private Path temp; + private HiveMetastoreSetup hiveMetastoreSetup; + + private static final String HIVE_DB_NAME = "hivedb"; + + @BeforeEach + public void before() throws Exception { + hiveMetastoreSetup = new HiveMetastoreSetup(Collections.emptyMap()); + /* + * Since this test was using hive metastore initialization from {@link HiveMetastoreTest} + * it requires a database creation before running any test. + * Unlike {@link CatalogTests} which creates a database with every test. + * */ + String dbPath = hiveMetastoreSetup.metastore.getDatabasePath(HIVE_DB_NAME); + Database db = new Database(HIVE_DB_NAME, "description", dbPath, Maps.newHashMap()); + hiveMetastoreSetup.metastoreClient.createDatabase(db); + } + + @AfterEach + public void after() throws Exception { + hiveMetastoreSetup.stopMetastore(); + } + + @Override + protected boolean requiresNamespaceCreate() { + return true; + } + + @Override + protected boolean supportsNamesWithSlashes() { + return false; + } + + @Override + protected boolean supportsNamesWithDot() { + return false; + } + private Schema getTestSchema() { return new Schema( required(1, "id", Types.IntegerType.get(), "unique ID"), @@ -106,12 +152,12 @@ private Schema getTestSchema() { public void testCreateTableBuilder() throws Exception { Schema schema = getTestSchema(); PartitionSpec spec = PartitionSpec.builderFor(schema).bucket("data", 16).build(); - TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl"); + TableIdentifier tableIdent = TableIdentifier.of(HIVE_DB_NAME, "tbl"); String location = temp.resolve("tbl").toString(); try { Table table = - catalog + catalog() .buildTable(tableIdent, schema) .withPartitionSpec(spec) .withLocation(location) @@ -130,7 +176,7 @@ public void testCreateTableBuilder() throws Exception { TableProperties.PARQUET_COMPRESSION, TableProperties.PARQUET_COMPRESSION_DEFAULT_SINCE_1_4_0); } finally { - catalog.dropTable(tableIdent); + catalog().dropTable(tableIdent); } } @@ -138,10 +184,10 @@ public void testCreateTableBuilder() throws Exception { public void testCreateTableWithCaching() throws Exception { Schema schema = getTestSchema(); PartitionSpec spec = PartitionSpec.builderFor(schema).bucket("data", 16).build(); - TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl"); + TableIdentifier tableIdent = TableIdentifier.of(HIVE_DB_NAME, "tbl"); String location = temp.resolve("tbl").toString(); ImmutableMap properties = ImmutableMap.of("key1", "value1", "key2", "value2"); - Catalog cachingCatalog = CachingCatalog.wrap(catalog); + Catalog cachingCatalog = CachingCatalog.wrap(catalog()); try { Table table = cachingCatalog.createTable(tableIdent, schema, spec, location, properties); @@ -197,20 +243,20 @@ public void testInitializeCatalogWithProperties() { @Test public void testCreateTableTxnBuilder() throws Exception { Schema schema = getTestSchema(); - TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl"); + TableIdentifier tableIdent = TableIdentifier.of(HIVE_DB_NAME, "tbl"); String location = temp.resolve("tbl").toString(); try { Transaction txn = - catalog.buildTable(tableIdent, schema).withLocation(location).createTransaction(); + catalog().buildTable(tableIdent, schema).withLocation(location).createTransaction(); txn.commitTransaction(); - Table table = catalog.loadTable(tableIdent); + Table table = catalog().loadTable(tableIdent); assertThat(table.location()).isEqualTo(location); assertThat(table.schema().columns()).hasSize(2); assertThat(table.spec().isUnpartitioned()).isTrue(); } finally { - catalog.dropTable(tableIdent); + catalog().dropTable(tableIdent); } } @@ -219,12 +265,12 @@ public void testCreateTableTxnBuilder() throws Exception { public void testReplaceTxnBuilder(int formatVersion) { Schema schema = getTestSchema(); PartitionSpec spec = PartitionSpec.builderFor(schema).bucket("data", 16).build(); - TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl"); + TableIdentifier tableIdent = TableIdentifier.of(HIVE_DB_NAME, "tbl"); String location = temp.resolve("tbl").toString(); try { Transaction createTxn = - catalog + catalog() .buildTable(tableIdent, schema) .withPartitionSpec(spec) .withLocation(location) @@ -233,20 +279,20 @@ public void testReplaceTxnBuilder(int formatVersion) { .createOrReplaceTransaction(); createTxn.commitTransaction(); - Table table = catalog.loadTable(tableIdent); + Table table = catalog().loadTable(tableIdent); assertThat(table.spec().fields()).hasSize(1); String newLocation = temp.resolve("tbl-2").toString(); Transaction replaceTxn = - catalog + catalog() .buildTable(tableIdent, schema) .withProperty("key2", "value2") .withLocation(newLocation) .replaceTransaction(); replaceTxn.commitTransaction(); - table = catalog.loadTable(tableIdent); + table = catalog().loadTable(tableIdent); assertThat(table.location()).isEqualTo(newLocation); assertThat(table.currentSnapshot()).isNull(); if (formatVersion == 1) { @@ -265,19 +311,19 @@ public void testReplaceTxnBuilder(int formatVersion) { assertThat(table.properties()).containsEntry("key1", "value1"); assertThat(table.properties()).containsEntry("key2", "value2"); } finally { - catalog.dropTable(tableIdent); + catalog().dropTable(tableIdent); } } @Test public void testCreateTableWithOwner() throws Exception { createTableAndVerifyOwner( - DB_NAME, + HIVE_DB_NAME, "tbl_specified_owner", ImmutableMap.of(HiveCatalog.HMS_TABLE_OWNER, "some_owner"), "some_owner"); createTableAndVerifyOwner( - DB_NAME, + HIVE_DB_NAME, "tbl_default_owner", ImmutableMap.of(), UserGroupInformation.getCurrentUser().getShortUserName()); @@ -291,13 +337,14 @@ private void createTableAndVerifyOwner( TableIdentifier tableIdent = TableIdentifier.of(db, tbl); String location = temp.resolve(tbl).toString(); try { - Table table = catalog.createTable(tableIdent, schema, spec, location, properties); - org.apache.hadoop.hive.metastore.api.Table hmsTable = metastoreClient.getTable(db, tbl); + Table table = catalog().createTable(tableIdent, schema, spec, location, properties); + org.apache.hadoop.hive.metastore.api.Table hmsTable = + hiveMetastoreSetup.metastoreClient.getTable(db, tbl); assertThat(hmsTable.getOwner()).isEqualTo(owner); Map hmsTableParams = hmsTable.getParameters(); assertThat(hmsTableParams).doesNotContainKey(HiveCatalog.HMS_TABLE_OWNER); } finally { - catalog.dropTable(tableIdent); + catalog().dropTable(tableIdent); } } @@ -305,10 +352,10 @@ private void createTableAndVerifyOwner( public void testCreateTableDefaultSortOrder() throws Exception { Schema schema = getTestSchema(); PartitionSpec spec = PartitionSpec.builderFor(schema).bucket("data", 16).build(); - TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl"); + TableIdentifier tableIdent = TableIdentifier.of(HIVE_DB_NAME, "tbl"); try { - Table table = catalog.createTable(tableIdent, schema, spec); + Table table = catalog().createTable(tableIdent, schema, spec); assertThat(table.sortOrder().orderId()).as("Order ID must match").isEqualTo(0); assertThat(table.sortOrder().isUnsorted()).as("Order must unsorted").isTrue(); @@ -316,7 +363,7 @@ public void testCreateTableDefaultSortOrder() throws Exception { .as("Must not have default sort order in catalog") .doesNotContainKey(DEFAULT_SORT_ORDER); } finally { - catalog.dropTable(tableIdent); + catalog().dropTable(tableIdent); } } @@ -325,11 +372,11 @@ public void testCreateTableCustomSortOrder() throws Exception { Schema schema = getTestSchema(); PartitionSpec spec = PartitionSpec.builderFor(schema).bucket("data", 16).build(); SortOrder order = SortOrder.builderFor(schema).asc("id", NULLS_FIRST).build(); - TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl"); + TableIdentifier tableIdent = TableIdentifier.of(HIVE_DB_NAME, "tbl"); try { Table table = - catalog + catalog() .buildTable(tableIdent, schema) .withPartitionSpec(spec) .withSortOrder(order) @@ -349,26 +396,31 @@ public void testCreateTableCustomSortOrder() throws Exception { assertThat(hmsTableParameters()) .containsEntry(DEFAULT_SORT_ORDER, SortOrderParser.toJson(table.sortOrder())); } finally { - catalog.dropTable(tableIdent); + catalog().dropTable(tableIdent); } } + @Override + protected HiveCatalog catalog() { + return hiveMetastoreSetup.catalog; + } + @Test - public void testCreateNamespace() throws Exception { + public void testDatabaseAndNamespaceWithLocation() throws Exception { Namespace namespace1 = Namespace.of("noLocation"); - catalog.createNamespace(namespace1, meta); - Database database1 = metastoreClient.getDatabase(namespace1.toString()); + catalog().createNamespace(namespace1, meta); + Database database1 = hiveMetastoreSetup.metastoreClient.getDatabase(namespace1.toString()); assertThat(database1.getParameters()).containsEntry("owner", "apache"); assertThat(database1.getParameters()).containsEntry("group", "iceberg"); assertThat(defaultUri(namespace1)) - .as("There no same location for db and namespace") + .as("Database and namespace don't have the same location") .isEqualTo(database1.getLocationUri()); - assertThatThrownBy(() -> catalog.createNamespace(namespace1)) + assertThatThrownBy(() -> catalog().createNamespace(namespace1)) .isInstanceOf(AlreadyExistsException.class) - .hasMessage("Namespace '" + namespace1 + "' already exists!"); + .hasMessage(String.format("Namespace already exists: %s", namespace1)); String hiveLocalDir = temp.toFile().toURI().toString(); // remove the trailing slash of the URI hiveLocalDir = hiveLocalDir.substring(0, hiveLocalDir.length() - 1); @@ -379,10 +431,10 @@ public void testCreateNamespace() throws Exception { .buildOrThrow(); Namespace namespace2 = Namespace.of("haveLocation"); - catalog.createNamespace(namespace2, newMeta); - Database database2 = metastoreClient.getDatabase(namespace2.toString()); + catalog().createNamespace(namespace2, newMeta); + Database database2 = hiveMetastoreSetup.metastoreClient.getDatabase(namespace2.toString()); assertThat(hiveLocalDir) - .as("There no same location for db and namespace") + .as("Database and namespace don't have the same location") .isEqualTo(database2.getLocationUri()); } @@ -459,8 +511,8 @@ private void createNamespaceAndVerifyOwnership( throws TException { Namespace namespace = Namespace.of(name); - catalog.createNamespace(namespace, prop); - Database db = metastoreClient.getDatabase(namespace.toString()); + catalog().createNamespace(namespace, prop); + Database db = hiveMetastoreSetup.metastoreClient.getDatabase(namespace.toString()); assertThat(db.getOwnerName()).isEqualTo(expectedOwner); assertThat(db.getOwnerType()).isEqualTo(expectedOwnerType); @@ -470,13 +522,13 @@ private void createNamespaceAndVerifyOwnership( public void testListNamespace() throws TException { List namespaces; Namespace namespace1 = Namespace.of("dbname1"); - catalog.createNamespace(namespace1, meta); - namespaces = catalog.listNamespaces(namespace1); + catalog().createNamespace(namespace1, meta); + namespaces = catalog().listNamespaces(namespace1); assertThat(namespaces).as("Hive db not hive the namespace 'dbname1'").isEmpty(); Namespace namespace2 = Namespace.of("dbname2"); - catalog.createNamespace(namespace2, meta); - namespaces = catalog.listNamespaces(); + catalog().createNamespace(namespace2, meta); + namespaces = catalog().listNamespaces(); assertThat(namespaces).as("Hive db not hive the namespace 'dbname2'").contains(namespace2); } @@ -485,12 +537,12 @@ public void testListNamespace() throws TException { public void testLoadNamespaceMeta() throws TException { Namespace namespace = Namespace.of("dbname_load"); - catalog.createNamespace(namespace, meta); + catalog().createNamespace(namespace, meta); - Map nameMata = catalog.loadNamespaceMetadata(namespace); + Map nameMata = catalog().loadNamespaceMetadata(namespace); assertThat(nameMata).containsEntry("owner", "apache"); assertThat(nameMata).containsEntry("group", "iceberg"); - assertThat(catalog.convertToDatabase(namespace, meta).getLocationUri()) + assertThat(catalog().convertToDatabase(namespace, meta).getLocationUri()) .as("There no same location for db and namespace") .isEqualTo(nameMata.get("location")); } @@ -499,38 +551,14 @@ public void testLoadNamespaceMeta() throws TException { public void testNamespaceExists() throws TException { Namespace namespace = Namespace.of("dbname_exists"); - catalog.createNamespace(namespace, meta); + catalog().createNamespace(namespace, meta); - assertThat(catalog.namespaceExists(namespace)).as("Should true to namespace exist").isTrue(); - assertThat(catalog.namespaceExists(Namespace.of("db2", "db2", "ns2"))) + assertThat(catalog().namespaceExists(namespace)).as("Should true to namespace exist").isTrue(); + assertThat(catalog().namespaceExists(Namespace.of("db2", "db2", "ns2"))) .as("Should false to namespace doesn't exist") .isFalse(); } - @Test - public void testSetNamespaceProperties() throws TException { - Namespace namespace = Namespace.of("dbname_set"); - - catalog.createNamespace(namespace, meta); - catalog.setProperties( - namespace, - ImmutableMap.of( - "owner", "alter_apache", - "test", "test", - "location", "file:/data/tmp", - "comment", "iceberg test")); - - Database database = metastoreClient.getDatabase(namespace.level(0)); - assertThat(database.getParameters()).containsEntry("owner", "alter_apache"); - assertThat(database.getParameters()).containsEntry("test", "test"); - assertThat(database.getParameters()).containsEntry("group", "iceberg"); - - assertThatThrownBy( - () -> catalog.setProperties(Namespace.of("db2", "db2", "ns2"), ImmutableMap.of())) - .isInstanceOf(NoSuchNamespaceException.class) - .hasMessage("Namespace does not exist: db2.db2.ns2"); - } - @Test public void testSetNamespaceOwnership() throws TException { setNamespaceOwnershipAndVerify( @@ -705,34 +733,13 @@ private void setNamespaceOwnershipAndVerify( createNamespaceAndVerifyOwnership( name, propToCreate, expectedOwnerPostCreate, expectedOwnerTypePostCreate); - catalog.setProperties(Namespace.of(name), propToSet); - Database database = metastoreClient.getDatabase(name); + catalog().setProperties(Namespace.of(name), propToSet); + Database database = hiveMetastoreSetup.metastoreClient.getDatabase(name); assertThat(database.getOwnerName()).isEqualTo(expectedOwnerPostSet); assertThat(database.getOwnerType()).isEqualTo(expectedOwnerTypePostSet); } - @Test - public void testRemoveNamespaceProperties() throws TException { - Namespace namespace = Namespace.of("dbname_remove"); - - catalog.createNamespace(namespace, meta); - - catalog.removeProperties(namespace, ImmutableSet.of("comment", "owner")); - - Database database = metastoreClient.getDatabase(namespace.level(0)); - - assertThat(database.getParameters()).doesNotContainKey("owner"); - assertThat(database.getParameters()).containsEntry("group", "iceberg"); - - assertThatThrownBy( - () -> - catalog.removeProperties( - Namespace.of("db2", "db2", "ns2"), ImmutableSet.of("comment", "owner"))) - .isInstanceOf(NoSuchNamespaceException.class) - .hasMessage("Namespace does not exist: db2.db2.ns2"); - } - @Test public void testRemoveNamespaceOwnership() throws TException, IOException { removeNamespaceOwnershipAndVerify( @@ -850,51 +857,46 @@ private void removeNamespaceOwnershipAndVerify( createNamespaceAndVerifyOwnership( name, propToCreate, expectedOwnerPostCreate, expectedOwnerTypePostCreate); - catalog.removeProperties(Namespace.of(name), propToRemove); + catalog().removeProperties(Namespace.of(name), propToRemove); - Database database = metastoreClient.getDatabase(name); + Database database = hiveMetastoreSetup.metastoreClient.getDatabase(name); assertThat(database.getOwnerName()).isEqualTo(expectedOwnerPostRemove); assertThat(database.getOwnerType()).isEqualTo(expectedOwnerTypePostRemove); } @Test - public void testDropNamespace() throws TException { + public void testDropEmptyNamespace() throws TException { Namespace namespace = Namespace.of("dbname_drop"); TableIdentifier identifier = TableIdentifier.of(namespace, "table"); Schema schema = getTestSchema(); - catalog.createNamespace(namespace, meta); - catalog.createTable(identifier, schema); - Map nameMata = catalog.loadNamespaceMetadata(namespace); + catalog().createNamespace(namespace, meta); + catalog().createTable(identifier, schema); + Map nameMata = catalog().loadNamespaceMetadata(namespace); assertThat(nameMata).containsEntry("owner", "apache"); assertThat(nameMata).containsEntry("group", "iceberg"); - assertThatThrownBy(() -> catalog.dropNamespace(namespace)) + assertThatThrownBy(() -> catalog().dropNamespace(namespace)) .isInstanceOf(NamespaceNotEmptyException.class) .hasMessage("Namespace dbname_drop is not empty. One or more tables exist."); - assertThat(catalog.dropTable(identifier, true)).isTrue(); - assertThat(catalog.dropNamespace(namespace)) + assertThat(catalog().dropTable(identifier, true)).isTrue(); + assertThat(catalog().dropNamespace(namespace)) .as("Should fail to drop namespace if it is not empty") .isTrue(); - assertThat(catalog.dropNamespace(Namespace.of("db.ns1"))) - .as("Should fail to drop when namespace doesn't exist") - .isFalse(); - assertThatThrownBy(() -> catalog.loadNamespaceMetadata(namespace)) - .isInstanceOf(NoSuchNamespaceException.class) - .hasMessage("Namespace does not exist: dbname_drop"); } @Test public void testDropTableWithoutMetadataFile() { - TableIdentifier identifier = TableIdentifier.of(DB_NAME, "tbl"); + TableIdentifier identifier = TableIdentifier.of(HIVE_DB_NAME, "tbl"); Schema tableSchema = getTestSchema(); - catalog.createTable(identifier, tableSchema); - String metadataFileLocation = catalog.newTableOps(identifier).current().metadataFileLocation(); - TableOperations ops = catalog.newTableOps(identifier); + catalog().createTable(identifier, tableSchema); + String metadataFileLocation = + catalog().newTableOps(identifier).current().metadataFileLocation(); + TableOperations ops = catalog().newTableOps(identifier); ops.io().deleteFile(metadataFileLocation); - assertThat(catalog.dropTable(identifier)).isTrue(); - assertThatThrownBy(() -> catalog.loadTable(identifier)) + assertThat(catalog().dropTable(identifier)).isTrue(); + assertThatThrownBy(() -> catalog().loadTable(identifier)) .isInstanceOf(NoSuchTableException.class) .hasMessageContaining("Table does not exist:"); } @@ -903,26 +905,26 @@ public void testDropTableWithoutMetadataFile() { public void testTableName() { Schema schema = getTestSchema(); PartitionSpec spec = PartitionSpec.builderFor(schema).bucket("data", 16).build(); - TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl"); + TableIdentifier tableIdent = TableIdentifier.of(HIVE_DB_NAME, "tbl"); try { - catalog.buildTable(tableIdent, schema).withPartitionSpec(spec).create(); + catalog().buildTable(tableIdent, schema).withPartitionSpec(spec).create(); - Table table = catalog.loadTable(tableIdent); + Table table = catalog().loadTable(tableIdent); assertThat(table.name()).as("Name must match").isEqualTo("hive.hivedb.tbl"); - TableIdentifier snapshotsTableIdent = TableIdentifier.of(DB_NAME, "tbl", "snapshots"); - Table snapshotsTable = catalog.loadTable(snapshotsTableIdent); + TableIdentifier snapshotsTableIdent = TableIdentifier.of(HIVE_DB_NAME, "tbl", "snapshots"); + Table snapshotsTable = catalog().loadTable(snapshotsTableIdent); assertThat(snapshotsTable.name()) .as("Name must match") .isEqualTo("hive.hivedb.tbl.snapshots"); } finally { - catalog.dropTable(tableIdent); + catalog().dropTable(tableIdent); } } private String defaultUri(Namespace namespace) throws TException { - return metastoreClient.getConfigValue("hive.metastore.warehouse.dir", "") + return hiveMetastoreSetup.metastoreClient.getConfigValue("hive.metastore.warehouse.dir", "") + "/" + namespace.level(0) + ".db"; @@ -931,26 +933,26 @@ private String defaultUri(Namespace namespace) throws TException { @Test public void testUUIDinTableProperties() throws Exception { Schema schema = getTestSchema(); - TableIdentifier tableIdentifier = TableIdentifier.of(DB_NAME, "tbl"); + TableIdentifier tableIdentifier = TableIdentifier.of(HIVE_DB_NAME, "tbl"); String location = temp.resolve("tbl").toString(); try { - catalog.buildTable(tableIdentifier, schema).withLocation(location).create(); + catalog().buildTable(tableIdentifier, schema).withLocation(location).create(); assertThat(hmsTableParameters()).containsKey(TableProperties.UUID); } finally { - catalog.dropTable(tableIdentifier); + catalog().dropTable(tableIdentifier); } } @Test public void testSnapshotStatsTableProperties() throws Exception { Schema schema = getTestSchema(); - TableIdentifier tableIdentifier = TableIdentifier.of(DB_NAME, "tbl"); + TableIdentifier tableIdentifier = TableIdentifier.of(HIVE_DB_NAME, "tbl"); String location = temp.resolve("tbl").toString(); try { - catalog.buildTable(tableIdentifier, schema).withLocation(location).create(); + catalog().buildTable(tableIdentifier, schema).withLocation(location).create(); // check whether parameters are in expected state Map parameters = hmsTableParameters(); @@ -961,7 +963,7 @@ public void testSnapshotStatsTableProperties() throws Exception { .doesNotContainKey(CURRENT_SNAPSHOT_TIMESTAMP); // create a snapshot - Table icebergTable = catalog.loadTable(tableIdentifier); + Table icebergTable = catalog().loadTable(tableIdentifier); String fileName = UUID.randomUUID().toString(); DataFile file = DataFiles.builder(icebergTable.spec()) @@ -984,7 +986,7 @@ public void testSnapshotStatsTableProperties() throws Exception { CURRENT_SNAPSHOT_TIMESTAMP, String.valueOf(icebergTable.currentSnapshot().timestampMillis())); } finally { - catalog.dropTable(tableIdentifier); + catalog().dropTable(tableIdentifier); } } @@ -993,7 +995,7 @@ public void testSetSnapshotSummary() throws Exception { Configuration conf = new Configuration(); conf.set("iceberg.hive.table-property-max-size", "4000"); HiveTableOperations ops = - new HiveTableOperations(conf, null, null, catalog.name(), DB_NAME, "tbl"); + new HiveTableOperations(conf, null, null, catalog().name(), HIVE_DB_NAME, "tbl"); Snapshot snapshot = mock(Snapshot.class); Map summary = Maps.newHashMap(); when(snapshot.summary()).thenReturn(summary); @@ -1026,7 +1028,7 @@ public void testNotExposeTableProperties() { Configuration conf = new Configuration(); conf.set("iceberg.hive.table-property-max-size", "0"); HiveTableOperations ops = - new HiveTableOperations(conf, null, null, catalog.name(), DB_NAME, "tbl"); + new HiveTableOperations(conf, null, null, catalog().name(), HIVE_DB_NAME, "tbl"); TableMetadata metadata = mock(TableMetadata.class); Map parameters = Maps.newHashMap(); parameters.put(CURRENT_SNAPSHOT_SUMMARY, "summary"); @@ -1055,10 +1057,10 @@ public void testNotExposeTableProperties() { @Test public void testSetDefaultPartitionSpec() throws Exception { Schema schema = getTestSchema(); - TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl"); + TableIdentifier tableIdent = TableIdentifier.of(HIVE_DB_NAME, "tbl"); try { - Table table = catalog.buildTable(tableIdent, schema).create(); + Table table = catalog().buildTable(tableIdent, schema).create(); assertThat(hmsTableParameters()) .as("Must not have default partition spec") .doesNotContainKey(TableProperties.DEFAULT_PARTITION_SPEC); @@ -1068,17 +1070,17 @@ public void testSetDefaultPartitionSpec() throws Exception { .containsEntry( TableProperties.DEFAULT_PARTITION_SPEC, PartitionSpecParser.toJson(table.spec())); } finally { - catalog.dropTable(tableIdent); + catalog().dropTable(tableIdent); } } @Test public void testSetCurrentSchema() throws Exception { Schema schema = getTestSchema(); - TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl"); + TableIdentifier tableIdent = TableIdentifier.of(HIVE_DB_NAME, "tbl"); try { - Table table = catalog.buildTable(tableIdent, schema).create(); + Table table = catalog().buildTable(tableIdent, schema).create(); assertThat(hmsTableParameters()) .containsEntry(CURRENT_SCHEMA, SchemaParser.toJson(table.schema())); @@ -1093,12 +1095,13 @@ public void testSetCurrentSchema() throws Exception { assertThat(SchemaParser.toJson(table.schema()).length()).isGreaterThan(32672); assertThat(hmsTableParameters()).doesNotContainKey(CURRENT_SCHEMA); } finally { - catalog.dropTable(tableIdent); + catalog().dropTable(tableIdent); } } private Map hmsTableParameters() throws TException { - org.apache.hadoop.hive.metastore.api.Table hmsTable = metastoreClient.getTable(DB_NAME, "tbl"); + org.apache.hadoop.hive.metastore.api.Table hmsTable = + hiveMetastoreSetup.metastoreClient.getTable(HIVE_DB_NAME, "tbl"); return hmsTable.getParameters(); } @@ -1117,7 +1120,7 @@ public void testConstructorWarehousePathWithEndSlash() { @Test public void testTablePropsDefinedAtCatalogLevel() { Schema schema = getTestSchema(); - TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl"); + TableIdentifier tableIdent = TableIdentifier.of(HIVE_DB_NAME, "tbl"); ImmutableMap catalogProps = ImmutableMap.of( @@ -1131,7 +1134,7 @@ public void testTablePropsDefinedAtCatalogLevel() { HiveCatalog.class.getName(), CatalogUtil.ICEBERG_CATALOG_TYPE_HIVE, catalogProps, - hiveConf); + hiveMetastoreSetup.hiveConf); try { Table table = @@ -1182,32 +1185,32 @@ public void testDatabaseLocationWithSlashInWarehouseDir() { @Test public void testRegisterTable() { - TableIdentifier identifier = TableIdentifier.of(DB_NAME, "t1"); - catalog.createTable(identifier, getTestSchema()); - Table registeringTable = catalog.loadTable(identifier); - catalog.dropTable(identifier, false); + TableIdentifier identifier = TableIdentifier.of(HIVE_DB_NAME, "t1"); + catalog().createTable(identifier, getTestSchema()); + Table registeringTable = catalog().loadTable(identifier); + catalog().dropTable(identifier, false); TableOperations ops = ((HasTableOperations) registeringTable).operations(); String metadataLocation = ((HiveTableOperations) ops).currentMetadataLocation(); - Table registeredTable = catalog.registerTable(identifier, metadataLocation); + Table registeredTable = catalog().registerTable(identifier, metadataLocation); assertThat(registeredTable).isNotNull(); TestHelpers.assertSerializedAndLoadedMetadata(registeringTable, registeredTable); String expectedMetadataLocation = ((HasTableOperations) registeredTable).operations().current().metadataFileLocation(); assertThat(metadataLocation).isEqualTo(expectedMetadataLocation); - assertThat(catalog.loadTable(identifier)).isNotNull(); - assertThat(catalog.dropTable(identifier)).isTrue(); + assertThat(catalog().loadTable(identifier)).isNotNull(); + assertThat(catalog().dropTable(identifier)).isTrue(); } @Test public void testRegisterExistingTable() { - TableIdentifier identifier = TableIdentifier.of(DB_NAME, "t1"); - catalog.createTable(identifier, getTestSchema()); - Table registeringTable = catalog.loadTable(identifier); + TableIdentifier identifier = TableIdentifier.of(HIVE_DB_NAME, "t1"); + catalog().createTable(identifier, getTestSchema()); + Table registeringTable = catalog().loadTable(identifier); TableOperations ops = ((HasTableOperations) registeringTable).operations(); String metadataLocation = ((HiveTableOperations) ops).currentMetadataLocation(); - assertThatThrownBy(() -> catalog.registerTable(identifier, metadataLocation)) + assertThatThrownBy(() -> catalog().registerTable(identifier, metadataLocation)) .isInstanceOf(AlreadyExistsException.class) .hasMessage("Table already exists: hivedb.t1"); - assertThat(catalog.dropTable(identifier, true)).isTrue(); + assertThat(catalog().dropTable(identifier, true)).isTrue(); } }