Skip to content

Commit

Permalink
[spark][core] spark compact with deletion vector
Browse files Browse the repository at this point in the history
  • Loading branch information
YannByron committed Aug 8, 2024
1 parent 265937e commit 49c2ba1
Show file tree
Hide file tree
Showing 25 changed files with 549 additions and 160 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
package org.apache.paimon;

import org.apache.paimon.data.InternalRow;
import org.apache.paimon.deletionvectors.DeletionVectorsMaintainer;
import org.apache.paimon.format.FileFormatDiscover;
import org.apache.paimon.fs.FileIO;
import org.apache.paimon.manifest.ManifestCacheFilter;
Expand Down Expand Up @@ -93,6 +94,11 @@ public AppendOnlyFileStoreWrite newWrite(String commitUser) {
@Override
public AppendOnlyFileStoreWrite newWrite(
String commitUser, ManifestCacheFilter manifestFilter) {
DeletionVectorsMaintainer.Factory deletionVectorsMaintainerFactory = null;
if (options.deletionVectorsEnabled()) {
deletionVectorsMaintainerFactory =
new DeletionVectorsMaintainer.Factory(newIndexFileHandler());
}
return new AppendOnlyFileStoreWrite(
fileIO,
newRead(),
Expand All @@ -103,6 +109,8 @@ public AppendOnlyFileStoreWrite newWrite(
snapshotManager(),
newScan(true).withManifestCacheFilter(manifestFilter),
options,
bucketMode(),
deletionVectorsMaintainerFactory,
tableName);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,15 @@
import org.apache.paimon.compact.CompactFutureManager;
import org.apache.paimon.compact.CompactResult;
import org.apache.paimon.compact.CompactTask;
import org.apache.paimon.deletionvectors.DeletionVectorIndexFileMaintainer;
import org.apache.paimon.index.IndexFileMeta;
import org.apache.paimon.io.DataFileMeta;
import org.apache.paimon.io.IndexIncrement;
import org.apache.paimon.manifest.FileKind;
import org.apache.paimon.manifest.IndexManifestEntry;
import org.apache.paimon.operation.metrics.CompactionMetrics;
import org.apache.paimon.operation.metrics.MetricUtils;
import org.apache.paimon.table.source.DeletionFile;
import org.apache.paimon.utils.Preconditions;

import org.slf4j.Logger;
Expand All @@ -36,6 +42,7 @@
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.LinkedList;
import java.util.List;
Expand All @@ -52,6 +59,7 @@ public class AppendOnlyCompactManager extends CompactFutureManager {
private static final int FULL_COMPACT_MIN_FILE = 3;

private final ExecutorService executor;
private final DeletionVectorIndexFileMaintainer dvIndexFileMaintainer;
private final TreeSet<DataFileMeta> toCompact;
private final int minFileNum;
private final int maxFileNum;
Expand All @@ -65,12 +73,14 @@ public class AppendOnlyCompactManager extends CompactFutureManager {
public AppendOnlyCompactManager(
ExecutorService executor,
List<DataFileMeta> restored,
@Nullable DeletionVectorIndexFileMaintainer dvIndexFileMaintainer,
int minFileNum,
int maxFileNum,
long targetFileSize,
CompactRewriter rewriter,
@Nullable CompactionMetrics.Reporter metricsReporter) {
this.executor = executor;
this.dvIndexFileMaintainer = dvIndexFileMaintainer;
this.toCompact = new TreeSet<>(fileComparator(false));
this.toCompact.addAll(restored);
this.minFileNum = minFileNum;
Expand All @@ -94,13 +104,20 @@ private void triggerFullCompaction() {
taskFuture == null,
"A compaction task is still running while the user "
+ "forces a new compaction. This is unexpected.");
if (toCompact.size() < FULL_COMPACT_MIN_FILE) {
// if deletion vector enables, always trigger compaction.
if (toCompact.isEmpty()
|| (dvIndexFileMaintainer == null && toCompact.size() < FULL_COMPACT_MIN_FILE)) {
return;
}

taskFuture =
executor.submit(
new FullCompactTask(toCompact, targetFileSize, rewriter, metricsReporter));
new FullCompactTask(
dvIndexFileMaintainer,
toCompact,
targetFileSize,
rewriter,
metricsReporter));
compacting = new ArrayList<>(toCompact);
toCompact.clear();
}
Expand All @@ -113,7 +130,9 @@ private void triggerCompactionWithBestEffort() {
if (picked.isPresent()) {
compacting = picked.get();
taskFuture =
executor.submit(new AutoCompactTask(compacting, rewriter, metricsReporter));
executor.submit(
new AutoCompactTask(
dvIndexFileMaintainer, compacting, rewriter, metricsReporter));
}
}

Expand Down Expand Up @@ -207,52 +226,63 @@ public void close() throws IOException {
/** A {@link CompactTask} impl for full compaction of append-only table. */
public static class FullCompactTask extends CompactTask {

private final LinkedList<DataFileMeta> inputs;
private final DeletionVectorIndexFileMaintainer dvIndexFileMaintainer;
private final LinkedList<DataFileMeta> toCompact;
private final long targetFileSize;
private final CompactRewriter rewriter;

public FullCompactTask(
DeletionVectorIndexFileMaintainer dvIndexFileMaintainer,
Collection<DataFileMeta> inputs,
long targetFileSize,
CompactRewriter rewriter,
@Nullable CompactionMetrics.Reporter metricsReporter) {
super(metricsReporter);
this.inputs = new LinkedList<>(inputs);
this.dvIndexFileMaintainer = dvIndexFileMaintainer;
this.toCompact = new LinkedList<>(inputs);
this.targetFileSize = targetFileSize;
this.rewriter = rewriter;
}

@Override
protected CompactResult doCompact() throws Exception {
// remove large files
while (!inputs.isEmpty()) {
DataFileMeta file = inputs.peekFirst();
if (file.fileSize() >= targetFileSize) {
inputs.poll();
while (!toCompact.isEmpty()) {
DataFileMeta file = toCompact.peekFirst();
// the data file with deletion file always need to be compacted.
if (file.fileSize() >= targetFileSize && !hasDeletionFile(file)) {
toCompact.poll();
continue;
}
break;
}

// compute small files
int big = 0;
int small = 0;
for (DataFileMeta file : inputs) {
if (file.fileSize() >= targetFileSize) {
big++;
// do compaction
if (dvIndexFileMaintainer != null) {
// if deletion vector enables, always trigger compaction.
return compact(dvIndexFileMaintainer, toCompact, rewriter);
} else {
// compute small files
int big = 0;
int small = 0;
for (DataFileMeta file : toCompact) {
if (file.fileSize() >= targetFileSize) {
big++;
} else {
small++;
}
}
if (small > big && toCompact.size() >= FULL_COMPACT_MIN_FILE) {
return compact(dvIndexFileMaintainer, toCompact, rewriter);
} else {
small++;
return result(Collections.emptyList(), Collections.emptyList());
}
}
}

// do compaction
List<DataFileMeta> compactBefore = new ArrayList<>();
List<DataFileMeta> compactAfter = new ArrayList<>();
if (small > big && inputs.size() >= FULL_COMPACT_MIN_FILE) {
compactBefore = new ArrayList<>(inputs);
compactAfter = rewriter.rewrite(inputs);
}
return result(new ArrayList<>(compactBefore), compactAfter);
private boolean hasDeletionFile(DataFileMeta file) {
return dvIndexFileMaintainer != null
&& dvIndexFileMaintainer.getDeletionFile(file.fileName()) == null;
}
}

Expand All @@ -265,41 +295,87 @@ protected CompactResult doCompact() throws Exception {
*/
public static class AutoCompactTask extends CompactTask {

private final DeletionVectorIndexFileMaintainer dvIndexFileMaintainer;
private final List<DataFileMeta> toCompact;
private final CompactRewriter rewriter;

public AutoCompactTask(
DeletionVectorIndexFileMaintainer dvIndexFileMaintainer,
List<DataFileMeta> toCompact,
CompactRewriter rewriter,
@Nullable CompactionMetrics.Reporter metricsReporter) {
super(metricsReporter);
this.dvIndexFileMaintainer = dvIndexFileMaintainer;
this.toCompact = toCompact;
this.rewriter = rewriter;
}

@Override
protected CompactResult doCompact() throws Exception {
return compact(dvIndexFileMaintainer, toCompact, rewriter);
}
}

private static CompactResult compact(
DeletionVectorIndexFileMaintainer dvIndexFileMaintainer,
List<DataFileMeta> toCompact,
CompactRewriter rewriter)
throws Exception {
if (dvIndexFileMaintainer == null) {
return result(toCompact, rewriter.rewrite(toCompact));
} else {
List<DeletionFile> deletionFiles = new ArrayList<>();
for (DataFileMeta dataFile : toCompact) {
// is the file name right?
deletionFiles.add(dvIndexFileMaintainer.getDeletionFile(dataFile.fileName()));
}
List<DataFileMeta> compactAfter = rewriter.rewrite(toCompact, deletionFiles);
toCompact.forEach(f -> dvIndexFileMaintainer.notify(f.fileName()));

List<IndexManifestEntry> indexManifestEntries = dvIndexFileMaintainer.persist();
if (indexManifestEntries.isEmpty()) {
return result(toCompact, compactAfter);
} else {
List<IndexFileMeta> indexFilesBefore = new ArrayList<>();
List<IndexFileMeta> indexFilesAfter = new ArrayList<>();
for (IndexManifestEntry entry : dvIndexFileMaintainer.persist()) {
if (entry.kind() == FileKind.ADD) {
indexFilesAfter.add(entry.indexFile());
} else {
indexFilesBefore.add(entry.indexFile());
}
}
return result(toCompact, indexFilesBefore, compactAfter, indexFilesAfter);
}
}
}

private static CompactResult result(List<DataFileMeta> before, List<DataFileMeta> after) {
return new CompactResult() {
@Override
public List<DataFileMeta> before() {
return before;
}
return new CompactResult(before, after);
}

@Override
public List<DataFileMeta> after() {
return after;
}
};
private static CompactResult result(
List<DataFileMeta> before,
@Nullable List<IndexFileMeta> indexFilesBefore,
List<DataFileMeta> after,
@Nullable List<IndexFileMeta> indexFilesAfter) {
CompactResult result = new CompactResult(before, after);
if (indexFilesBefore != null || indexFilesAfter != null) {
IndexIncrement indexIncrement = new IndexIncrement(indexFilesAfter, indexFilesBefore);
result.setIndexIncrement(indexIncrement);
}
return result;
}

/** Compact rewriter for append-only table. */
public interface CompactRewriter {
List<DataFileMeta> rewrite(List<DataFileMeta> compactBefore) throws Exception;

default List<DataFileMeta> rewrite(
List<DataFileMeta> compactBefore, List<DeletionFile> deletionFiles)
throws Exception {
throw new Exception("If call here, please implement it first");
}
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,18 +19,26 @@
package org.apache.paimon.append;

import org.apache.paimon.data.BinaryRow;
import org.apache.paimon.deletionvectors.DeletionVectorIndexFileMaintainer;
import org.apache.paimon.index.IndexFileMeta;
import org.apache.paimon.io.CompactIncrement;
import org.apache.paimon.io.DataFileMeta;
import org.apache.paimon.io.DataIncrement;
import org.apache.paimon.io.IndexIncrement;
import org.apache.paimon.manifest.FileKind;
import org.apache.paimon.manifest.IndexManifestEntry;
import org.apache.paimon.operation.AppendOnlyFileStoreWrite;
import org.apache.paimon.table.FileStoreTable;
import org.apache.paimon.table.sink.CommitMessage;
import org.apache.paimon.table.sink.CommitMessageImpl;
import org.apache.paimon.table.source.DeletionFile;
import org.apache.paimon.utils.Preconditions;

import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.stream.Collectors;

/** Compaction task generated by {@link AppendOnlyTableCompactionCoordinator}. */
public class AppendOnlyCompactionTask {
Expand All @@ -40,9 +48,7 @@ public class AppendOnlyCompactionTask {
private final List<DataFileMeta> compactAfter;

public AppendOnlyCompactionTask(BinaryRow partition, List<DataFileMeta> files) {
Preconditions.checkArgument(
files != null && files.size() > 1,
"AppendOnlyCompactionTask need more than one file input.");
Preconditions.checkArgument(files != null);
this.partition = partition;
compactBefore = new ArrayList<>(files);
compactAfter = new ArrayList<>();
Expand All @@ -60,16 +66,56 @@ public List<DataFileMeta> compactAfter() {
return compactAfter;
}

public CommitMessage doCompact(AppendOnlyFileStoreWrite write) throws Exception {
compactAfter.addAll(write.compactRewriter(partition, 0).rewrite(compactBefore));
public CommitMessage doCompact(FileStoreTable table, AppendOnlyFileStoreWrite write)
throws Exception {
boolean dvEnabled = table.coreOptions().deletionVectorsEnabled();
Preconditions.checkArgument(
dvEnabled || compactBefore.size() > 1,
"AppendOnlyCompactionTask need more than one file input.");
IndexIncrement indexIncrement;
if (dvEnabled) {
DeletionVectorIndexFileMaintainer dvIndexFileMaintainer =
table.store()
.newIndexFileHandler()
.createDVIndexFileMaintainer(
table.snapshotManager().latestSnapshotId(),
partition,
0,
false);
List<DeletionFile> deletionFilesBefore =
compactBefore.stream()
.map(f -> dvIndexFileMaintainer.getDeletionFile(f.fileName()))
.collect(Collectors.toList());
compactAfter.addAll(
write.compactRewriter(partition, 0)
.rewrite(compactBefore, deletionFilesBefore));

compactBefore.forEach(
f -> {
dvIndexFileMaintainer.notify(f.fileName());
});
List<IndexManifestEntry> indexEntries = dvIndexFileMaintainer.persist();
Preconditions.checkArgument(
indexEntries.stream().noneMatch(i -> i.kind() == FileKind.ADD));
List<IndexFileMeta> removed =
indexEntries.stream()
.map(IndexManifestEntry::indexFile)
.collect(Collectors.toList());
indexIncrement = new IndexIncrement(Collections.emptyList(), removed);
} else {
compactAfter.addAll(write.compactRewriter(partition, 0).rewrite(compactBefore));
indexIncrement = new IndexIncrement(Collections.emptyList());
}

CompactIncrement compactIncrement =
new CompactIncrement(compactBefore, compactAfter, Collections.emptyList());
return new CommitMessageImpl(
partition,
0, // bucket 0 is bucket for unaware-bucket table for compatibility with the old
// design
DataIncrement.emptyIncrement(),
compactIncrement);
compactIncrement,
indexIncrement);
}

public int hashCode() {
Expand Down
Loading

0 comments on commit 49c2ba1

Please sign in to comment.