Skip to content

Commit

Permalink
[MINOR][hudi-sync] Fix typos (apache#2844)
Browse files Browse the repository at this point in the history
  • Loading branch information
RocMarshal authored Apr 19, 2021
1 parent 4e050cc commit f7b6b68
Showing 1 changed file with 20 additions and 20 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ public void testSchemaConvertTimestampMicros() throws IOException {
MessageType schema = Types.buildMessage().optional(PrimitiveType.PrimitiveTypeName.INT64)
.as(OriginalType.TIMESTAMP_MICROS).named("my_element").named("my_timestamp");
String schemaString = HiveSchemaUtil.generateSchemaString(schema);
// verify backward compability - int64 converted to bigint type
// verify backward compatibility - int64 converted to bigint type
assertEquals("`my_element` bigint", schemaString);
// verify new functionality - int64 converted to timestamp type when 'supportTimestamp' is enabled
schemaString = HiveSchemaUtil.generateSchemaString(schema, Collections.emptyList(), true);
Expand All @@ -169,7 +169,7 @@ public void testSchemaConvertTimestampMicros() throws IOException {
public void testSchemaDiffForTimestampMicros() {
MessageType schema = Types.buildMessage().optional(PrimitiveType.PrimitiveTypeName.INT64)
.as(OriginalType.TIMESTAMP_MICROS).named("my_element").named("my_timestamp");
// verify backward compability - int64 converted to bigint type
// verify backward compatibility - int64 converted to bigint type
SchemaDifference schemaDifference = HiveSchemaUtil.getSchemaDifference(schema,
Collections.emptyMap(), Collections.emptyList(), false);
assertEquals("bigint", schemaDifference.getAddColumnTypes().get("`my_element`"));
Expand Down Expand Up @@ -207,7 +207,7 @@ public void testBasicSync(boolean useJdbc, boolean useSchemaFromCommitMetadata)
assertEquals(5, hiveClient.scanTablePartitions(HiveTestUtil.hiveSyncConfig.tableName).size(),
"Table partitions should match the number of partitions we wrote");
assertEquals(instantTime, hiveClient.getLastCommitTimeSynced(HiveTestUtil.hiveSyncConfig.tableName).get(),
"The last commit that was sycned should be updated in the TBLPROPERTIES");
"The last commit that was synced should be updated in the TBLPROPERTIES");

// Adding of new partitions
List<String> newPartition = Arrays.asList("2050/01/01");
Expand All @@ -228,7 +228,7 @@ public void testBasicSync(boolean useJdbc, boolean useSchemaFromCommitMetadata)

// Alter partitions
// Manually change a hive partition location to check if the sync will detect
// it and generage a partition update event for it.
// it and generate a partition update event for it.
hiveClient.updateHiveSQL("ALTER TABLE `" + HiveTestUtil.hiveSyncConfig.tableName
+ "` PARTITION (`datestr`='2050-01-01') SET LOCATION '/some/new/location'");

Expand All @@ -237,7 +237,7 @@ public void testBasicSync(boolean useJdbc, boolean useSchemaFromCommitMetadata)
List<String> writtenPartitionsSince = hiveClient.getPartitionsWrittenToSince(Option.empty());
writtenPartitionsSince.add(newPartition.get(0));
List<PartitionEvent> partitionEvents = hiveClient.getPartitionEvents(hivePartitions, writtenPartitionsSince);
assertEquals(1, partitionEvents.size(), "There should be only one paritition event");
assertEquals(1, partitionEvents.size(), "There should be only one partition event");
assertEquals(PartitionEventType.UPDATE, partitionEvents.iterator().next().eventType,
"The one partition event must of type UPDATE");

Expand All @@ -247,7 +247,7 @@ public void testBasicSync(boolean useJdbc, boolean useSchemaFromCommitMetadata)
List<Partition> tablePartitions = hiveClient.scanTablePartitions(HiveTestUtil.hiveSyncConfig.tableName);
assertEquals(6, tablePartitions.size(), "The one partition we wrote should be added to hive");
assertEquals(instantTime, hiveClient.getLastCommitTimeSynced(HiveTestUtil.hiveSyncConfig.tableName).get(),
"The last commit that was sycned should be 100");
"The last commit that was synced should be 100");
}

@ParameterizedTest
Expand All @@ -264,9 +264,9 @@ public void testSyncIncremental(boolean useJdbc) throws Exception {
assertEquals(5, hiveClient.scanTablePartitions(HiveTestUtil.hiveSyncConfig.tableName).size(),
"Table partitions should match the number of partitions we wrote");
assertEquals(commitTime1, hiveClient.getLastCommitTimeSynced(HiveTestUtil.hiveSyncConfig.tableName).get(),
"The last commit that was sycned should be updated in the TBLPROPERTIES");
"The last commit that was synced should be updated in the TBLPROPERTIES");

// Now lets create more parititions and these are the only ones which needs to be synced
// Now lets create more partitions and these are the only ones which needs to be synced
DateTime dateTime = DateTime.now().plusDays(6);
String commitTime2 = "101";
HiveTestUtil.addCOWPartitions(1, true, true, dateTime, commitTime2);
Expand All @@ -277,7 +277,7 @@ public void testSyncIncremental(boolean useJdbc) throws Exception {
assertEquals(1, writtenPartitionsSince.size(), "We should have one partition written after 100 commit");
List<Partition> hivePartitions = hiveClient.scanTablePartitions(HiveTestUtil.hiveSyncConfig.tableName);
List<PartitionEvent> partitionEvents = hiveClient.getPartitionEvents(hivePartitions, writtenPartitionsSince);
assertEquals(1, partitionEvents.size(), "There should be only one paritition event");
assertEquals(1, partitionEvents.size(), "There should be only one partition event");
assertEquals(PartitionEventType.ADD, partitionEvents.iterator().next().eventType, "The one partition event must of type ADD");

tool = new HiveSyncTool(HiveTestUtil.hiveSyncConfig, HiveTestUtil.getHiveConf(), HiveTestUtil.fileSystem);
Expand All @@ -286,7 +286,7 @@ public void testSyncIncremental(boolean useJdbc) throws Exception {
assertEquals(6, hiveClient.scanTablePartitions(HiveTestUtil.hiveSyncConfig.tableName).size(),
"The one partition we wrote should be added to hive");
assertEquals(commitTime2, hiveClient.getLastCommitTimeSynced(HiveTestUtil.hiveSyncConfig.tableName).get(),
"The last commit that was sycned should be 101");
"The last commit that was synced should be 101");
}

@ParameterizedTest
Expand All @@ -303,7 +303,7 @@ public void testSyncIncrementalWithSchemaEvolution(boolean useJdbc) throws Excep

int fields = hiveClient.getTableSchema(HiveTestUtil.hiveSyncConfig.tableName).size();

// Now lets create more parititions and these are the only ones which needs to be synced
// Now lets create more partitions and these are the only ones which needs to be synced
DateTime dateTime = DateTime.now().plusDays(6);
String commitTime2 = "101";
HiveTestUtil.addCOWPartitions(1, false, true, dateTime, commitTime2);
Expand All @@ -323,7 +323,7 @@ public void testSyncIncrementalWithSchemaEvolution(boolean useJdbc) throws Excep
assertEquals(6, hiveClient.scanTablePartitions(HiveTestUtil.hiveSyncConfig.tableName).size(),
"The one partition we wrote should be added to hive");
assertEquals(commitTime2, hiveClient.getLastCommitTimeSynced(HiveTestUtil.hiveSyncConfig.tableName).get(),
"The last commit that was sycned should be 101");
"The last commit that was synced should be 101");
}

@ParameterizedTest
Expand Down Expand Up @@ -359,7 +359,7 @@ public void testSyncMergeOnRead(boolean useJdbc, boolean useSchemaFromCommitMeta
assertEquals(5, hiveClient.scanTablePartitions(roTableName).size(),
"Table partitions should match the number of partitions we wrote");
assertEquals(deltaCommitTime, hiveClient.getLastCommitTimeSynced(roTableName).get(),
"The last commit that was sycned should be updated in the TBLPROPERTIES");
"The last commit that was synced should be updated in the TBLPROPERTIES");

// Now lets create more partitions and these are the only ones which needs to be synced
DateTime dateTime = DateTime.now().plusDays(6);
Expand Down Expand Up @@ -432,7 +432,7 @@ public void testSyncMergeOnReadRT(boolean useJdbc, boolean useSchemaFromCommitMe
assertEquals(deltaCommitTime, hiveClientRT.getLastCommitTimeSynced(snapshotTableName).get(),
"The last commit that was synced should be updated in the TBLPROPERTIES");

// Now lets create more parititions and these are the only ones which needs to be synced
// Now lets create more partitions and these are the only ones which needs to be synced
DateTime dateTime = DateTime.now().plusDays(6);
String commitTime2 = "102";
String deltaCommitTime2 = "103";
Expand All @@ -459,7 +459,7 @@ public void testSyncMergeOnReadRT(boolean useJdbc, boolean useSchemaFromCommitMe
assertEquals(6, hiveClientRT.scanTablePartitions(snapshotTableName).size(),
"The 2 partitions we wrote should be added to hive");
assertEquals(deltaCommitTime2, hiveClientRT.getLastCommitTimeSynced(snapshotTableName).get(),
"The last commit that was sycned should be 103");
"The last commit that was synced should be 103");
}

@ParameterizedTest
Expand Down Expand Up @@ -489,7 +489,7 @@ public void testMultiPartitionKeySync(boolean useJdbc) throws Exception {
assertEquals(5, hiveClient.scanTablePartitions(hiveSyncConfig.tableName).size(),
"Table partitions should match the number of partitions we wrote");
assertEquals(instantTime, hiveClient.getLastCommitTimeSynced(hiveSyncConfig.tableName).get(),
"The last commit that was sycned should be updated in the TBLPROPERTIES");
"The last commit that was synced should be updated in the TBLPROPERTIES");

// HoodieHiveClient had a bug where partition vals were sorted
// and stored as keys in a map. The following tests this particular case.
Expand All @@ -502,7 +502,7 @@ public void testMultiPartitionKeySync(boolean useJdbc) throws Exception {
assertEquals(1, writtenPartitionsSince.size(), "We should have one partition written after 100 commit");
List<Partition> hivePartitions = hiveClient.scanTablePartitions(hiveSyncConfig.tableName);
List<PartitionEvent> partitionEvents = hiveClient.getPartitionEvents(hivePartitions, writtenPartitionsSince);
assertEquals(1, partitionEvents.size(), "There should be only one paritition event");
assertEquals(1, partitionEvents.size(), "There should be only one partition event");
assertEquals(PartitionEventType.ADD, partitionEvents.iterator().next().eventType, "The one partition event must of type ADD");

tool = new HiveSyncTool(hiveSyncConfig, HiveTestUtil.getHiveConf(), HiveTestUtil.fileSystem);
Expand All @@ -512,7 +512,7 @@ public void testMultiPartitionKeySync(boolean useJdbc) throws Exception {
assertEquals(6, hiveClient.scanTablePartitions(hiveSyncConfig.tableName).size(),
"Table partitions should match the number of partitions we wrote");
assertEquals(commitTime2, hiveClient.getLastCommitTimeSynced(hiveSyncConfig.tableName).get(),
"The last commit that was sycned should be 101");
"The last commit that was synced should be 101");

// create partition "2010/02/01" and ensure sync works
String commitTime3 = "102";
Expand All @@ -532,7 +532,7 @@ public void testMultiPartitionKeySync(boolean useJdbc) throws Exception {
assertEquals(7, hiveClient.scanTablePartitions(hiveSyncConfig.tableName).size(),
"Table partitions should match the number of partitions we wrote");
assertEquals(commitTime3, hiveClient.getLastCommitTimeSynced(hiveSyncConfig.tableName).get(),
"The last commit that was sycned should be updated in the TBLPROPERTIES");
"The last commit that was synced should be updated in the TBLPROPERTIES");
assertEquals(1, hiveClient.getPartitionsWrittenToSince(Option.of(commitTime2)).size());
}

Expand Down Expand Up @@ -611,7 +611,7 @@ public void testReadSchemaForMOR(boolean useJdbc) throws Exception {
// Sync should add the one partition
assertEquals(6, hiveClientRT.scanTablePartitions(snapshotTableName).size(), "The 1 partition we wrote should be added to hive");
assertEquals(deltaCommitTime2, hiveClientRT.getLastCommitTimeSynced(snapshotTableName).get(),
"The last commit that was sycned should be 103");
"The last commit that was synced should be 103");
}

@Test
Expand Down

0 comments on commit f7b6b68

Please sign in to comment.