Skip to content

Commit

Permalink
[RFC-15][HUDI-1325] Merge updates of unsynced instants to metadata table
Browse files Browse the repository at this point in the history
  • Loading branch information
Ryan Pifer committed Dec 2, 2020
1 parent 7f84b12 commit 022a44c
Show file tree
Hide file tree
Showing 8 changed files with 542 additions and 253 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -44,18 +44,15 @@
import org.apache.hudi.common.table.timeline.HoodieActiveTimeline;
import org.apache.hudi.common.table.timeline.HoodieInstant;
import org.apache.hudi.common.table.timeline.HoodieTimeline;
import org.apache.hudi.common.table.timeline.TimelineMetadataUtils;
import org.apache.hudi.common.table.timeline.versioning.TimelineLayoutVersion;
import org.apache.hudi.common.table.view.TableFileSystemView.SliceView;
import org.apache.hudi.common.util.CleanerUtils;
import org.apache.hudi.common.util.HoodieTimer;
import org.apache.hudi.common.util.Option;
import org.apache.hudi.common.util.ValidationUtils;
import org.apache.hudi.config.HoodieCompactionConfig;
import org.apache.hudi.config.HoodieMetadataConfig;
import org.apache.hudi.config.HoodieMetricsConfig;
import org.apache.hudi.config.HoodieWriteConfig;
import org.apache.hudi.exception.HoodieException;
import org.apache.hudi.exception.HoodieIOException;
import org.apache.hudi.exception.HoodieMetadataException;
import org.apache.hudi.metrics.DistributedRegistry;
Expand Down Expand Up @@ -85,7 +82,6 @@
import scala.Tuple2;

import static org.apache.hudi.metadata.HoodieTableMetadata.METADATA_TABLE_NAME_SUFFIX;
import static org.apache.hudi.metadata.HoodieTableMetadata.NON_PARTITIONED_NAME;
import static org.apache.hudi.metadata.HoodieTableMetadata.SOLO_COMMIT_TIMESTAMP;

/**
Expand Down Expand Up @@ -388,59 +384,11 @@ private void syncFromInstants(JavaSparkContext jsc, HoodieTableMetaClient datase
LOG.info("Syncing " + instantsToSync.size() + " instants to metadata table: " + instantsToSync);

// Read each instant in order and sync it to metadata table
final HoodieActiveTimeline timeline = datasetMetaClient.getActiveTimeline();
for (HoodieInstant instant : instantsToSync) {
LOG.info("Syncing instant " + instant + " to metadata table");

switch (instant.getAction()) {
case HoodieTimeline.CLEAN_ACTION: {
// CLEAN is synced from the
// - inflight instant which contains the HoodieCleanerPlan, or
// - complete instant which contains the HoodieCleanMetadata
try {
HoodieInstant inflightCleanInstant = new HoodieInstant(true, instant.getAction(), instant.getTimestamp());
ValidationUtils.checkArgument(inflightCleanInstant.isInflight());
HoodieCleanerPlan cleanerPlan = CleanerUtils.getCleanerPlan(datasetMetaClient, inflightCleanInstant);
update(cleanerPlan, instant.getTimestamp());
} catch (HoodieIOException e) {
HoodieInstant cleanInstant = new HoodieInstant(false, instant.getAction(), instant.getTimestamp());
ValidationUtils.checkArgument(cleanInstant.isCompleted());
HoodieCleanMetadata cleanMetadata = CleanerUtils.getCleanerMetadata(datasetMetaClient, cleanInstant);
update(cleanMetadata, instant.getTimestamp());
}
break;
}
case HoodieTimeline.DELTA_COMMIT_ACTION:
case HoodieTimeline.COMMIT_ACTION:
case HoodieTimeline.COMPACTION_ACTION: {
ValidationUtils.checkArgument(instant.isCompleted());
HoodieCommitMetadata commitMetadata = HoodieCommitMetadata.fromBytes(
timeline.getInstantDetails(instant).get(), HoodieCommitMetadata.class);
update(commitMetadata, instant.getTimestamp());
break;
}
case HoodieTimeline.ROLLBACK_ACTION: {
ValidationUtils.checkArgument(instant.isCompleted());
HoodieRollbackMetadata rollbackMetadata = TimelineMetadataUtils.deserializeHoodieRollbackMetadata(
timeline.getInstantDetails(instant).get());
update(rollbackMetadata, instant.getTimestamp());
break;
}
case HoodieTimeline.RESTORE_ACTION: {
ValidationUtils.checkArgument(instant.isCompleted());
HoodieRestoreMetadata restoreMetadata = TimelineMetadataUtils.deserializeHoodieRestoreMetadata(
timeline.getInstantDetails(instant).get());
update(restoreMetadata, instant.getTimestamp());
break;
}
case HoodieTimeline.SAVEPOINT_ACTION: {
ValidationUtils.checkArgument(instant.isCompleted());
// Nothing to be done here
break;
}
default: {
throw new HoodieException("Unknown type of action " + instant.getAction());
}
Option<List<HoodieRecord>> records = HoodieTableMetadataTimelineUtil.convertInstantToMetaRecords(datasetMetaClient, instant);
if (records.isPresent()) {
commit(jsc, prepRecords(jsc, records.get(), MetadataPartitionType.FILES.partitionPath()), instant.getTimestamp());
}
}
// re-init the table metadata, for any future writes.
Expand All @@ -462,38 +410,7 @@ public void update(HoodieCommitMetadata commitMetadata, String instantTime) {
return;
}

List<HoodieRecord> records = new LinkedList<>();
List<String> allPartitions = new LinkedList<>();
commitMetadata.getPartitionToWriteStats().forEach((partitionStatName, writeStats) -> {
final String partition = partitionStatName.equals("") ? NON_PARTITIONED_NAME : partitionStatName;
allPartitions.add(partition);

Map<String, Long> newFiles = new HashMap<>(writeStats.size());
writeStats.forEach(hoodieWriteStat -> {
String pathWithPartition = hoodieWriteStat.getPath();
if (pathWithPartition == null) {
// Empty partition
return;
}

int offset = partition.equals(NON_PARTITIONED_NAME) ? 0 : partition.length() + 1;
String filename = pathWithPartition.substring(offset);
ValidationUtils.checkState(!newFiles.containsKey(filename), "Duplicate files in HoodieCommitMetadata");
newFiles.put(filename, hoodieWriteStat.getTotalWriteBytes());
});

// New files added to a partition
HoodieRecord record = HoodieMetadataPayload.createPartitionFilesRecord(
partition, Option.of(newFiles), Option.empty());
records.add(record);
});

// New partitions created
HoodieRecord record = HoodieMetadataPayload.createPartitionListRecord(new ArrayList<>(allPartitions));
records.add(record);

LOG.info("Updating at " + instantTime + " from Commit/" + commitMetadata.getOperationType()
+ ". #partitions_updated=" + records.size());
List<HoodieRecord> records = HoodieTableMetadataTimelineUtil.convertMetadataToRecords(commitMetadata, instantTime);
commit(jsc, prepRecords(jsc, records, MetadataPartitionType.FILES.partitionPath()), instantTime);
}

Expand All @@ -516,21 +433,7 @@ public void update(HoodieCleanerPlan cleanerPlan, String instantTime) {
return;
}

List<HoodieRecord> records = new LinkedList<>();
int[] fileDeleteCount = {0};
cleanerPlan.getFilePathsToBeDeletedPerPartition().forEach((partition, deletedPathInfo) -> {
fileDeleteCount[0] += deletedPathInfo.size();

// Files deleted from a partition
List<String> deletedFilenames = deletedPathInfo.stream().map(p -> new Path(p.getFilePath()).getName())
.collect(Collectors.toList());
HoodieRecord record = HoodieMetadataPayload.createPartitionFilesRecord(partition, Option.empty(),
Option.of(deletedFilenames));
records.add(record);
});

LOG.info("Updating at " + instantTime + " from CleanerPlan. #partitions_updated=" + records.size()
+ ", #files_deleted=" + fileDeleteCount[0]);
List<HoodieRecord> records = HoodieTableMetadataTimelineUtil.convertMetadataToRecords(cleanerPlan, instantTime);
commit(jsc, prepRecords(jsc, records, MetadataPartitionType.FILES.partitionPath()), instantTime);
}

Expand All @@ -546,21 +449,7 @@ public void update(HoodieCleanMetadata cleanMetadata, String instantTime) {
return;
}

List<HoodieRecord> records = new LinkedList<>();
int[] fileDeleteCount = {0};

cleanMetadata.getPartitionMetadata().forEach((partition, partitionMetadata) -> {
// Files deleted from a partition
List<String> deletedFiles = partitionMetadata.getSuccessDeleteFiles();
HoodieRecord record = HoodieMetadataPayload.createPartitionFilesRecord(partition, Option.empty(),
Option.of(new ArrayList<>(deletedFiles)));

records.add(record);
fileDeleteCount[0] += deletedFiles.size();
});

LOG.info("Updating at " + instantTime + " from Clean. #partitions_updated=" + records.size()
+ ", #files_deleted=" + fileDeleteCount[0]);
List<HoodieRecord> records = HoodieTableMetadataTimelineUtil.convertMetadataToRecords(cleanMetadata, instantTime);
commit(jsc, prepRecords(jsc, records, MetadataPartitionType.FILES.partitionPath()), instantTime);
}

Expand All @@ -576,12 +465,8 @@ public void update(HoodieRestoreMetadata restoreMetadata, String instantTime) {
return;
}

Map<String, Map<String, Long>> partitionToAppendedFiles = new HashMap<>();
Map<String, List<String>> partitionToDeletedFiles = new HashMap<>();
restoreMetadata.getHoodieRestoreMetadata().values().forEach(rms -> {
rms.forEach(rm -> processRollbackMetadata(rm, partitionToDeletedFiles, partitionToAppendedFiles));
});
commitRollback(jsc, partitionToDeletedFiles, partitionToAppendedFiles, instantTime, "Restore");
List<HoodieRecord> records = HoodieTableMetadataTimelineUtil.convertMetadataToRecords(restoreMetadata, instantTime);
commit(jsc, prepRecords(jsc, records, MetadataPartitionType.FILES.partitionPath()), instantTime);
}

/**
Expand All @@ -596,114 +481,7 @@ public void update(HoodieRollbackMetadata rollbackMetadata, String instantTime)
return;
}

Map<String, Map<String, Long>> partitionToAppendedFiles = new HashMap<>();
Map<String, List<String>> partitionToDeletedFiles = new HashMap<>();
processRollbackMetadata(rollbackMetadata, partitionToDeletedFiles, partitionToAppendedFiles);
commitRollback(jsc, partitionToDeletedFiles, partitionToAppendedFiles, instantTime, "Rollback");
}

/**
* Extracts information about the deleted and append files from the {@code HoodieRollbackMetadata}.
*
* During a rollback files may be deleted (COW, MOR) or rollback blocks be appended (MOR only) to files. This
* function will extract this change file for each partition.
*
* @param rollbackMetadata {@code HoodieRollbackMetadata}
* @param partitionToDeletedFiles The {@code Map} to fill with files deleted per partition.
* @param partitionToAppendedFiles The {@code Map} to fill with files appended per partition and their sizes.
*/
private void processRollbackMetadata(HoodieRollbackMetadata rollbackMetadata,
Map<String, List<String>> partitionToDeletedFiles,
Map<String, Map<String, Long>> partitionToAppendedFiles) {
rollbackMetadata.getPartitionMetadata().values().forEach(pm -> {
final String partition = pm.getPartitionPath();

if (!pm.getSuccessDeleteFiles().isEmpty()) {
if (!partitionToDeletedFiles.containsKey(partition)) {
partitionToDeletedFiles.put(partition, new ArrayList<>());
}

// Extract deleted file name from the absolute paths saved in getSuccessDeleteFiles()
List<String> deletedFiles = pm.getSuccessDeleteFiles().stream().map(p -> new Path(p).getName())
.collect(Collectors.toList());
partitionToDeletedFiles.get(partition).addAll(deletedFiles);
}

if (!pm.getAppendFiles().isEmpty()) {
if (!partitionToAppendedFiles.containsKey(partition)) {
partitionToAppendedFiles.put(partition, new HashMap<>());
}

// Extract appended file name from the absolute paths saved in getAppendFiles()
pm.getAppendFiles().forEach((path, size) -> {
partitionToAppendedFiles.get(partition).merge(new Path(path).getName(), size, (oldSize, newSizeCopy) -> {
return size + oldSize;
});
});
}
});
}

/**
* Create file delete records and commit.
*
* @param partitionToDeletedFiles {@code Map} of partitions and the deleted files
* @param instantTime Timestamp at which the deletes took place
* @param operation Type of the operation which caused the files to be deleted
*/
private void commitRollback(JavaSparkContext jsc, Map<String, List<String>> partitionToDeletedFiles,
Map<String, Map<String, Long>> partitionToAppendedFiles, String instantTime,
String operation) {
List<HoodieRecord> records = new LinkedList<>();
int[] fileChangeCount = {0, 0}; // deletes, appends

partitionToDeletedFiles.forEach((partition, deletedFiles) -> {
// Rollbacks deletes instants from timeline. The instant being rolled-back may not have been synced to the
// metadata table. Hence, the deleted filed need to be checked against the metadata.
try {
FileStatus[] existingStatuses = metadata.fetchAllFilesInPartition(new Path(metadata.getDatasetBasePath(), partition));
Set<String> currentFiles =
Arrays.stream(existingStatuses).map(s -> s.getPath().getName()).collect(Collectors.toSet());

int origCount = deletedFiles.size();
deletedFiles.removeIf(f -> !currentFiles.contains(f));
if (deletedFiles.size() != origCount) {
LOG.warn("Some Files to be deleted as part of " + operation + " at " + instantTime + " were not found in the "
+ " metadata for partition " + partition
+ ". To delete = " + origCount + ", found=" + deletedFiles.size());
}

fileChangeCount[0] += deletedFiles.size();

Option<Map<String, Long>> filesAdded = Option.empty();
if (partitionToAppendedFiles.containsKey(partition)) {
filesAdded = Option.of(partitionToAppendedFiles.remove(partition));
}

HoodieRecord record = HoodieMetadataPayload.createPartitionFilesRecord(partition, filesAdded,
Option.of(new ArrayList<>(deletedFiles)));
records.add(record);
} catch (IOException e) {
throw new HoodieMetadataException("Failed to commit rollback deletes at instant " + instantTime, e);
}
});

partitionToAppendedFiles.forEach((partition, appendedFileMap) -> {
fileChangeCount[1] += appendedFileMap.size();

// Validate that no appended file has been deleted
ValidationUtils.checkState(
!appendedFileMap.keySet().removeAll(partitionToDeletedFiles.getOrDefault(partition, Collections.emptyList())),
"Rollback file cannot both be appended and deleted");

// New files added to a partition
HoodieRecord record = HoodieMetadataPayload.createPartitionFilesRecord(partition, Option.of(appendedFileMap),
Option.empty());
records.add(record);
});

LOG.info("Updating at " + instantTime + " from " + operation + ". #partitions_updated=" + records.size()
+ ", #files_deleted=" + fileChangeCount[0] + ", #files_appended=" + fileChangeCount[1]);
List<HoodieRecord> records = HoodieTableMetadataTimelineUtil.convertMetadataToRecords(rollbackMetadata, instantTime);
commit(jsc, prepRecords(jsc, records, MetadataPartitionType.FILES.partitionPath()), instantTime);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@

import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;

Expand Down Expand Up @@ -70,6 +71,11 @@ public void setUp() throws Exception {
client = new CompactionAdminClient(jsc, basePath);
}

@AfterEach
public void cleanUp() throws Exception {
cleanupResources();
}

@Test
public void testUnscheduleCompactionPlan() throws Exception {
int numEntriesPerInstant = 10;
Expand Down
Loading

0 comments on commit 022a44c

Please sign in to comment.