diff --git a/hudi-sync/hudi-hive-sync/src/test/java/org/apache/hudi/hive/TestHiveSyncTool.java b/hudi-sync/hudi-hive-sync/src/test/java/org/apache/hudi/hive/TestHiveSyncTool.java index c38a6ed4c15f6..75ba97c835897 100644 --- a/hudi-sync/hudi-hive-sync/src/test/java/org/apache/hudi/hive/TestHiveSyncTool.java +++ b/hudi-sync/hudi-hive-sync/src/test/java/org/apache/hudi/hive/TestHiveSyncTool.java @@ -158,7 +158,7 @@ public void testSchemaConvertTimestampMicros() throws IOException { MessageType schema = Types.buildMessage().optional(PrimitiveType.PrimitiveTypeName.INT64) .as(OriginalType.TIMESTAMP_MICROS).named("my_element").named("my_timestamp"); String schemaString = HiveSchemaUtil.generateSchemaString(schema); - // verify backward compability - int64 converted to bigint type + // verify backward compatibility - int64 converted to bigint type assertEquals("`my_element` bigint", schemaString); // verify new functionality - int64 converted to timestamp type when 'supportTimestamp' is enabled schemaString = HiveSchemaUtil.generateSchemaString(schema, Collections.emptyList(), true); @@ -169,7 +169,7 @@ public void testSchemaConvertTimestampMicros() throws IOException { public void testSchemaDiffForTimestampMicros() { MessageType schema = Types.buildMessage().optional(PrimitiveType.PrimitiveTypeName.INT64) .as(OriginalType.TIMESTAMP_MICROS).named("my_element").named("my_timestamp"); - // verify backward compability - int64 converted to bigint type + // verify backward compatibility - int64 converted to bigint type SchemaDifference schemaDifference = HiveSchemaUtil.getSchemaDifference(schema, Collections.emptyMap(), Collections.emptyList(), false); assertEquals("bigint", schemaDifference.getAddColumnTypes().get("`my_element`")); @@ -207,7 +207,7 @@ public void testBasicSync(boolean useJdbc, boolean useSchemaFromCommitMetadata) assertEquals(5, hiveClient.scanTablePartitions(HiveTestUtil.hiveSyncConfig.tableName).size(), "Table partitions should match the number of partitions we wrote"); assertEquals(instantTime, hiveClient.getLastCommitTimeSynced(HiveTestUtil.hiveSyncConfig.tableName).get(), - "The last commit that was sycned should be updated in the TBLPROPERTIES"); + "The last commit that was synced should be updated in the TBLPROPERTIES"); // Adding of new partitions List newPartition = Arrays.asList("2050/01/01"); @@ -228,7 +228,7 @@ public void testBasicSync(boolean useJdbc, boolean useSchemaFromCommitMetadata) // Alter partitions // Manually change a hive partition location to check if the sync will detect - // it and generage a partition update event for it. + // it and generate a partition update event for it. hiveClient.updateHiveSQL("ALTER TABLE `" + HiveTestUtil.hiveSyncConfig.tableName + "` PARTITION (`datestr`='2050-01-01') SET LOCATION '/some/new/location'"); @@ -237,7 +237,7 @@ public void testBasicSync(boolean useJdbc, boolean useSchemaFromCommitMetadata) List writtenPartitionsSince = hiveClient.getPartitionsWrittenToSince(Option.empty()); writtenPartitionsSince.add(newPartition.get(0)); List partitionEvents = hiveClient.getPartitionEvents(hivePartitions, writtenPartitionsSince); - assertEquals(1, partitionEvents.size(), "There should be only one paritition event"); + assertEquals(1, partitionEvents.size(), "There should be only one partition event"); assertEquals(PartitionEventType.UPDATE, partitionEvents.iterator().next().eventType, "The one partition event must of type UPDATE"); @@ -247,7 +247,7 @@ public void testBasicSync(boolean useJdbc, boolean useSchemaFromCommitMetadata) List tablePartitions = hiveClient.scanTablePartitions(HiveTestUtil.hiveSyncConfig.tableName); assertEquals(6, tablePartitions.size(), "The one partition we wrote should be added to hive"); assertEquals(instantTime, hiveClient.getLastCommitTimeSynced(HiveTestUtil.hiveSyncConfig.tableName).get(), - "The last commit that was sycned should be 100"); + "The last commit that was synced should be 100"); } @ParameterizedTest @@ -264,9 +264,9 @@ public void testSyncIncremental(boolean useJdbc) throws Exception { assertEquals(5, hiveClient.scanTablePartitions(HiveTestUtil.hiveSyncConfig.tableName).size(), "Table partitions should match the number of partitions we wrote"); assertEquals(commitTime1, hiveClient.getLastCommitTimeSynced(HiveTestUtil.hiveSyncConfig.tableName).get(), - "The last commit that was sycned should be updated in the TBLPROPERTIES"); + "The last commit that was synced should be updated in the TBLPROPERTIES"); - // Now lets create more parititions and these are the only ones which needs to be synced + // Now lets create more partitions and these are the only ones which needs to be synced DateTime dateTime = DateTime.now().plusDays(6); String commitTime2 = "101"; HiveTestUtil.addCOWPartitions(1, true, true, dateTime, commitTime2); @@ -277,7 +277,7 @@ public void testSyncIncremental(boolean useJdbc) throws Exception { assertEquals(1, writtenPartitionsSince.size(), "We should have one partition written after 100 commit"); List hivePartitions = hiveClient.scanTablePartitions(HiveTestUtil.hiveSyncConfig.tableName); List partitionEvents = hiveClient.getPartitionEvents(hivePartitions, writtenPartitionsSince); - assertEquals(1, partitionEvents.size(), "There should be only one paritition event"); + assertEquals(1, partitionEvents.size(), "There should be only one partition event"); assertEquals(PartitionEventType.ADD, partitionEvents.iterator().next().eventType, "The one partition event must of type ADD"); tool = new HiveSyncTool(HiveTestUtil.hiveSyncConfig, HiveTestUtil.getHiveConf(), HiveTestUtil.fileSystem); @@ -286,7 +286,7 @@ public void testSyncIncremental(boolean useJdbc) throws Exception { assertEquals(6, hiveClient.scanTablePartitions(HiveTestUtil.hiveSyncConfig.tableName).size(), "The one partition we wrote should be added to hive"); assertEquals(commitTime2, hiveClient.getLastCommitTimeSynced(HiveTestUtil.hiveSyncConfig.tableName).get(), - "The last commit that was sycned should be 101"); + "The last commit that was synced should be 101"); } @ParameterizedTest @@ -303,7 +303,7 @@ public void testSyncIncrementalWithSchemaEvolution(boolean useJdbc) throws Excep int fields = hiveClient.getTableSchema(HiveTestUtil.hiveSyncConfig.tableName).size(); - // Now lets create more parititions and these are the only ones which needs to be synced + // Now lets create more partitions and these are the only ones which needs to be synced DateTime dateTime = DateTime.now().plusDays(6); String commitTime2 = "101"; HiveTestUtil.addCOWPartitions(1, false, true, dateTime, commitTime2); @@ -323,7 +323,7 @@ public void testSyncIncrementalWithSchemaEvolution(boolean useJdbc) throws Excep assertEquals(6, hiveClient.scanTablePartitions(HiveTestUtil.hiveSyncConfig.tableName).size(), "The one partition we wrote should be added to hive"); assertEquals(commitTime2, hiveClient.getLastCommitTimeSynced(HiveTestUtil.hiveSyncConfig.tableName).get(), - "The last commit that was sycned should be 101"); + "The last commit that was synced should be 101"); } @ParameterizedTest @@ -359,7 +359,7 @@ public void testSyncMergeOnRead(boolean useJdbc, boolean useSchemaFromCommitMeta assertEquals(5, hiveClient.scanTablePartitions(roTableName).size(), "Table partitions should match the number of partitions we wrote"); assertEquals(deltaCommitTime, hiveClient.getLastCommitTimeSynced(roTableName).get(), - "The last commit that was sycned should be updated in the TBLPROPERTIES"); + "The last commit that was synced should be updated in the TBLPROPERTIES"); // Now lets create more partitions and these are the only ones which needs to be synced DateTime dateTime = DateTime.now().plusDays(6); @@ -432,7 +432,7 @@ public void testSyncMergeOnReadRT(boolean useJdbc, boolean useSchemaFromCommitMe assertEquals(deltaCommitTime, hiveClientRT.getLastCommitTimeSynced(snapshotTableName).get(), "The last commit that was synced should be updated in the TBLPROPERTIES"); - // Now lets create more parititions and these are the only ones which needs to be synced + // Now lets create more partitions and these are the only ones which needs to be synced DateTime dateTime = DateTime.now().plusDays(6); String commitTime2 = "102"; String deltaCommitTime2 = "103"; @@ -459,7 +459,7 @@ public void testSyncMergeOnReadRT(boolean useJdbc, boolean useSchemaFromCommitMe assertEquals(6, hiveClientRT.scanTablePartitions(snapshotTableName).size(), "The 2 partitions we wrote should be added to hive"); assertEquals(deltaCommitTime2, hiveClientRT.getLastCommitTimeSynced(snapshotTableName).get(), - "The last commit that was sycned should be 103"); + "The last commit that was synced should be 103"); } @ParameterizedTest @@ -489,7 +489,7 @@ public void testMultiPartitionKeySync(boolean useJdbc) throws Exception { assertEquals(5, hiveClient.scanTablePartitions(hiveSyncConfig.tableName).size(), "Table partitions should match the number of partitions we wrote"); assertEquals(instantTime, hiveClient.getLastCommitTimeSynced(hiveSyncConfig.tableName).get(), - "The last commit that was sycned should be updated in the TBLPROPERTIES"); + "The last commit that was synced should be updated in the TBLPROPERTIES"); // HoodieHiveClient had a bug where partition vals were sorted // and stored as keys in a map. The following tests this particular case. @@ -502,7 +502,7 @@ public void testMultiPartitionKeySync(boolean useJdbc) throws Exception { assertEquals(1, writtenPartitionsSince.size(), "We should have one partition written after 100 commit"); List hivePartitions = hiveClient.scanTablePartitions(hiveSyncConfig.tableName); List partitionEvents = hiveClient.getPartitionEvents(hivePartitions, writtenPartitionsSince); - assertEquals(1, partitionEvents.size(), "There should be only one paritition event"); + assertEquals(1, partitionEvents.size(), "There should be only one partition event"); assertEquals(PartitionEventType.ADD, partitionEvents.iterator().next().eventType, "The one partition event must of type ADD"); tool = new HiveSyncTool(hiveSyncConfig, HiveTestUtil.getHiveConf(), HiveTestUtil.fileSystem); @@ -512,7 +512,7 @@ public void testMultiPartitionKeySync(boolean useJdbc) throws Exception { assertEquals(6, hiveClient.scanTablePartitions(hiveSyncConfig.tableName).size(), "Table partitions should match the number of partitions we wrote"); assertEquals(commitTime2, hiveClient.getLastCommitTimeSynced(hiveSyncConfig.tableName).get(), - "The last commit that was sycned should be 101"); + "The last commit that was synced should be 101"); // create partition "2010/02/01" and ensure sync works String commitTime3 = "102"; @@ -532,7 +532,7 @@ public void testMultiPartitionKeySync(boolean useJdbc) throws Exception { assertEquals(7, hiveClient.scanTablePartitions(hiveSyncConfig.tableName).size(), "Table partitions should match the number of partitions we wrote"); assertEquals(commitTime3, hiveClient.getLastCommitTimeSynced(hiveSyncConfig.tableName).get(), - "The last commit that was sycned should be updated in the TBLPROPERTIES"); + "The last commit that was synced should be updated in the TBLPROPERTIES"); assertEquals(1, hiveClient.getPartitionsWrittenToSince(Option.of(commitTime2)).size()); } @@ -611,7 +611,7 @@ public void testReadSchemaForMOR(boolean useJdbc) throws Exception { // Sync should add the one partition assertEquals(6, hiveClientRT.scanTablePartitions(snapshotTableName).size(), "The 1 partition we wrote should be added to hive"); assertEquals(deltaCommitTime2, hiveClientRT.getLastCommitTimeSynced(snapshotTableName).get(), - "The last commit that was sycned should be 103"); + "The last commit that was synced should be 103"); } @Test