From e65e3ea827021388608217670eba73b26511f43b Mon Sep 17 00:00:00 2001 From: "liming.1018" Date: Wed, 21 Aug 2024 17:37:42 +0800 Subject: [PATCH] [flink] support computing the changelog generated by compact during read time. This is used when changelog producer is none, but CoreOptions#needLookup is true and the table is used as a dim table. --- .../java/org/apache/paimon/CoreOptions.java | 3 +- .../operation/AbstractFileStoreScan.java | 5 + .../paimon/operation/FileStoreScan.java | 3 + .../table/source/AbstractDataTableScan.java | 1 + .../apache/paimon/table/source/DataSplit.java | 15 +++ .../table/source/DataTableStreamScan.java | 3 + .../table/source/KeyValueTableRead.java | 3 + .../snapshot/CompactionFollowUpScanner.java | 49 +++++++++ .../source/snapshot/SnapshotReaderImpl.java | 3 +- .../IncrementalCompactDiffReadProvider.java | 68 ++++++++++++ .../IncrementalCompactDiffSplitRead.java | 77 ++++++++++++++ .../apache/paimon/table/TableTestBase.java | 13 ++- .../flink/lookup/FileStoreLookupFunction.java | 4 +- .../flink/lookup/LookupStreamingReader.java | 14 ++- .../paimon/flink/utils/TableScanUtils.java | 19 ++++ .../paimon/flink/lookup/LookupTableTest.java | 100 ++++++++++++++++++ 16 files changed, 375 insertions(+), 5 deletions(-) create mode 100644 paimon-core/src/main/java/org/apache/paimon/table/source/snapshot/CompactionFollowUpScanner.java create mode 100644 paimon-core/src/main/java/org/apache/paimon/table/source/splitread/IncrementalCompactDiffReadProvider.java create mode 100644 paimon-core/src/main/java/org/apache/paimon/table/source/splitread/IncrementalCompactDiffSplitRead.java diff --git a/paimon-common/src/main/java/org/apache/paimon/CoreOptions.java b/paimon-common/src/main/java/org/apache/paimon/CoreOptions.java index ecef7a9cbe9cb..bd1c6eed70103 100644 --- a/paimon-common/src/main/java/org/apache/paimon/CoreOptions.java +++ b/paimon-common/src/main/java/org/apache/paimon/CoreOptions.java @@ -2362,7 +2362,8 @@ public static StreamingReadMode fromValue(String value) { public enum StreamScanMode implements DescribedEnum { NONE("none", "No requirement."), COMPACT_BUCKET_TABLE("compact-bucket-table", "Compaction for traditional bucket table."), - FILE_MONITOR("file-monitor", "Monitor data file changes."); + FILE_MONITOR("file-monitor", "Monitor data file changes."), + COMPACT_DELTA_MONITOR("compact-delta-monitor", "Monitor delta changes for compaction."); private final String value; private final String description; diff --git a/paimon-core/src/main/java/org/apache/paimon/operation/AbstractFileStoreScan.java b/paimon-core/src/main/java/org/apache/paimon/operation/AbstractFileStoreScan.java index ec3d4a2392c5a..9694bff2aa3c5 100644 --- a/paimon-core/src/main/java/org/apache/paimon/operation/AbstractFileStoreScan.java +++ b/paimon-core/src/main/java/org/apache/paimon/operation/AbstractFileStoreScan.java @@ -249,6 +249,11 @@ public ScanMode scanMode() { public List files() { return files; } + + @Override + public Snapshot.CommitKind commitKind() { + return readSnapshot == null ? null : readSnapshot.commitKind(); + } }; } diff --git a/paimon-core/src/main/java/org/apache/paimon/operation/FileStoreScan.java b/paimon-core/src/main/java/org/apache/paimon/operation/FileStoreScan.java index c7b0e8cdf73f4..5257335ca9b1b 100644 --- a/paimon-core/src/main/java/org/apache/paimon/operation/FileStoreScan.java +++ b/paimon-core/src/main/java/org/apache/paimon/operation/FileStoreScan.java @@ -127,6 +127,9 @@ default List files(FileKind kind) { return files().stream().filter(e -> e.kind() == kind).collect(Collectors.toList()); } + /** {@link org.apache.paimon.Snapshot.CommitKind} of the snapshot. */ + Snapshot.CommitKind commitKind(); + /** Return a map group by partition and bucket. */ static Map>> groupByPartFiles( List files) { diff --git a/paimon-core/src/main/java/org/apache/paimon/table/source/AbstractDataTableScan.java b/paimon-core/src/main/java/org/apache/paimon/table/source/AbstractDataTableScan.java index ba1bc6588f689..63cd38a73e4e8 100644 --- a/paimon-core/src/main/java/org/apache/paimon/table/source/AbstractDataTableScan.java +++ b/paimon-core/src/main/java/org/apache/paimon/table/source/AbstractDataTableScan.java @@ -117,6 +117,7 @@ protected StartingScanner createStartingScanner(boolean isStreaming) { isStreaming, "Set 'streaming-compact' in batch mode. This is unexpected."); return new ContinuousCompactorStartingScanner(snapshotManager); case FILE_MONITOR: + case COMPACT_DELTA_MONITOR: return new FullStartingScanner(snapshotManager); } diff --git a/paimon-core/src/main/java/org/apache/paimon/table/source/DataSplit.java b/paimon-core/src/main/java/org/apache/paimon/table/source/DataSplit.java index 067bf055c2417..731fd02906bc8 100644 --- a/paimon-core/src/main/java/org/apache/paimon/table/source/DataSplit.java +++ b/paimon-core/src/main/java/org/apache/paimon/table/source/DataSplit.java @@ -18,6 +18,7 @@ package org.apache.paimon.table.source; +import org.apache.paimon.Snapshot; import org.apache.paimon.data.BinaryRow; import org.apache.paimon.io.DataFileMeta; import org.apache.paimon.io.DataFileMeta08Serializer; @@ -65,6 +66,9 @@ public class DataSplit implements Split { private boolean isStreaming = false; private boolean rawConvertible; + // Internal use only, not involved in serde. + @Nullable private transient Snapshot.CommitKind commitKind; + public DataSplit() {} public long snapshotId() { @@ -108,6 +112,11 @@ public boolean rawConvertible() { return rawConvertible; } + @Nullable + public Snapshot.CommitKind commitKind() { + return commitKind; + } + public OptionalLong latestFileCreationEpochMillis() { return this.dataFiles.stream().mapToLong(DataFileMeta::creationTimeEpochMillis).max(); } @@ -230,6 +239,7 @@ private void assign(DataSplit other) { this.dataDeletionFiles = other.dataDeletionFiles; this.isStreaming = other.isStreaming; this.rawConvertible = other.rawConvertible; + this.commitKind = other.commitKind; } public void serialize(DataOutputView out) throws IOException { @@ -387,6 +397,11 @@ public Builder rawConvertible(boolean rawConvertible) { return this; } + public Builder withCommitKind(Snapshot.CommitKind commitKind) { + this.split.commitKind = commitKind; + return this; + } + public DataSplit build() { checkArgument(split.partition != null); checkArgument(split.bucket != -1); diff --git a/paimon-core/src/main/java/org/apache/paimon/table/source/DataTableStreamScan.java b/paimon-core/src/main/java/org/apache/paimon/table/source/DataTableStreamScan.java index f315bdfa9f1b8..e607e0d94c4ae 100644 --- a/paimon-core/src/main/java/org/apache/paimon/table/source/DataTableStreamScan.java +++ b/paimon-core/src/main/java/org/apache/paimon/table/source/DataTableStreamScan.java @@ -27,6 +27,7 @@ import org.apache.paimon.table.source.snapshot.AllDeltaFollowUpScanner; import org.apache.paimon.table.source.snapshot.BoundedChecker; import org.apache.paimon.table.source.snapshot.CompactionChangelogFollowUpScanner; +import org.apache.paimon.table.source.snapshot.CompactionFollowUpScanner; import org.apache.paimon.table.source.snapshot.DeltaFollowUpScanner; import org.apache.paimon.table.source.snapshot.FollowUpScanner; import org.apache.paimon.table.source.snapshot.InputChangelogFollowUpScanner; @@ -227,6 +228,8 @@ private FollowUpScanner createFollowUpScanner() { return new DeltaFollowUpScanner(); case FILE_MONITOR: return new AllDeltaFollowUpScanner(); + case COMPACT_DELTA_MONITOR: + return new CompactionFollowUpScanner(); } CoreOptions.ChangelogProducer changelogProducer = options.changelogProducer(); diff --git a/paimon-core/src/main/java/org/apache/paimon/table/source/KeyValueTableRead.java b/paimon-core/src/main/java/org/apache/paimon/table/source/KeyValueTableRead.java index c674e4792d436..0b8e5c7a6e497 100644 --- a/paimon-core/src/main/java/org/apache/paimon/table/source/KeyValueTableRead.java +++ b/paimon-core/src/main/java/org/apache/paimon/table/source/KeyValueTableRead.java @@ -28,6 +28,7 @@ import org.apache.paimon.reader.RecordReader; import org.apache.paimon.schema.TableSchema; import org.apache.paimon.table.source.splitread.IncrementalChangelogReadProvider; +import org.apache.paimon.table.source.splitread.IncrementalCompactDiffReadProvider; import org.apache.paimon.table.source.splitread.IncrementalDiffReadProvider; import org.apache.paimon.table.source.splitread.MergeFileSplitReadProvider; import org.apache.paimon.table.source.splitread.RawFileSplitReadProvider; @@ -62,6 +63,8 @@ public KeyValueTableRead( Arrays.asList( new RawFileSplitReadProvider(batchRawReadSupplier, this::assignValues), new MergeFileSplitReadProvider(mergeReadSupplier, this::assignValues), + new IncrementalCompactDiffReadProvider( + mergeReadSupplier, this::assignValues), new IncrementalChangelogReadProvider(mergeReadSupplier, this::assignValues), new IncrementalDiffReadProvider(mergeReadSupplier, this::assignValues)); } diff --git a/paimon-core/src/main/java/org/apache/paimon/table/source/snapshot/CompactionFollowUpScanner.java b/paimon-core/src/main/java/org/apache/paimon/table/source/snapshot/CompactionFollowUpScanner.java new file mode 100644 index 0000000000000..473978117cebc --- /dev/null +++ b/paimon-core/src/main/java/org/apache/paimon/table/source/snapshot/CompactionFollowUpScanner.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.paimon.table.source.snapshot; + +import org.apache.paimon.Snapshot; +import org.apache.paimon.table.source.ScanMode; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** {@link FollowUpScanner} for read all changed files after compact. */ +public class CompactionFollowUpScanner implements FollowUpScanner { + + private static final Logger LOG = LoggerFactory.getLogger(CompactionFollowUpScanner.class); + + @Override + public boolean shouldScanSnapshot(Snapshot snapshot) { + if (snapshot.commitKind() == Snapshot.CommitKind.COMPACT) { + return true; + } + + LOG.debug( + "Next snapshot id {} is not COMPACT, but is {}, check next one.", + snapshot.id(), + snapshot.commitKind()); + return false; + } + + @Override + public SnapshotReader.Plan scan(Snapshot snapshot, SnapshotReader snapshotReader) { + return snapshotReader.withMode(ScanMode.DELTA).withSnapshot(snapshot).readChanges(); + } +} diff --git a/paimon-core/src/main/java/org/apache/paimon/table/source/snapshot/SnapshotReaderImpl.java b/paimon-core/src/main/java/org/apache/paimon/table/source/snapshot/SnapshotReaderImpl.java index ef3523dfdeaa3..715a1b6e76dc7 100644 --- a/paimon-core/src/main/java/org/apache/paimon/table/source/snapshot/SnapshotReaderImpl.java +++ b/paimon-core/src/main/java/org/apache/paimon/table/source/snapshot/SnapshotReaderImpl.java @@ -398,7 +398,8 @@ private Plan toChangesPlan( .withBeforeFiles(before) .withDataFiles(data) .isStreaming(isStreaming) - .withBucketPath(pathFactory.bucketPath(part, bucket).toString()); + .withBucketPath(pathFactory.bucketPath(part, bucket).toString()) + .withCommitKind(plan.commitKind()); if (deletionVectors) { builder.withBeforeDeletionFiles( getDeletionFiles( diff --git a/paimon-core/src/main/java/org/apache/paimon/table/source/splitread/IncrementalCompactDiffReadProvider.java b/paimon-core/src/main/java/org/apache/paimon/table/source/splitread/IncrementalCompactDiffReadProvider.java new file mode 100644 index 0000000000000..12ce12a75f14f --- /dev/null +++ b/paimon-core/src/main/java/org/apache/paimon/table/source/splitread/IncrementalCompactDiffReadProvider.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.paimon.table.source.splitread; + +import org.apache.paimon.Snapshot; +import org.apache.paimon.data.InternalRow; +import org.apache.paimon.operation.MergeFileSplitRead; +import org.apache.paimon.operation.SplitRead; +import org.apache.paimon.table.source.DataSplit; +import org.apache.paimon.utils.LazyField; + +import java.util.function.Consumer; +import java.util.function.Supplier; + +/** A {@link SplitReadProvider} to streaming incremental diff read after compaction. */ +public class IncrementalCompactDiffReadProvider implements SplitReadProvider { + + private final LazyField> splitRead; + + public IncrementalCompactDiffReadProvider( + Supplier supplier, + Consumer> valuesAssigner) { + this.splitRead = + new LazyField<>( + () -> { + SplitRead read = create(supplier); + valuesAssigner.accept(read); + return read; + }); + } + + private SplitRead create(Supplier supplier) { + return new IncrementalCompactDiffSplitRead(supplier.get()); + } + + @Override + public boolean match(DataSplit split, boolean forceKeepDelete) { + return split.commitKind() == Snapshot.CommitKind.COMPACT + && !split.beforeFiles().isEmpty() + && split.isStreaming(); + } + + @Override + public boolean initialized() { + return splitRead.initialized(); + } + + @Override + public SplitRead getOrCreate() { + return splitRead.get(); + } +} diff --git a/paimon-core/src/main/java/org/apache/paimon/table/source/splitread/IncrementalCompactDiffSplitRead.java b/paimon-core/src/main/java/org/apache/paimon/table/source/splitread/IncrementalCompactDiffSplitRead.java new file mode 100644 index 0000000000000..7db3a380fba25 --- /dev/null +++ b/paimon-core/src/main/java/org/apache/paimon/table/source/splitread/IncrementalCompactDiffSplitRead.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.paimon.table.source.splitread; + +import org.apache.paimon.data.InternalRow; +import org.apache.paimon.io.DataFileMeta; +import org.apache.paimon.operation.MergeFileSplitRead; +import org.apache.paimon.operation.SplitRead; +import org.apache.paimon.reader.EmptyRecordReader; +import org.apache.paimon.reader.RecordReader; +import org.apache.paimon.table.source.DataSplit; + +import java.io.IOException; +import java.util.List; +import java.util.stream.Collectors; + +/** A {@link SplitRead} for streaming incremental diff after compaction. */ +public class IncrementalCompactDiffSplitRead extends IncrementalDiffSplitRead { + + public IncrementalCompactDiffSplitRead(MergeFileSplitRead mergeRead) { + super(mergeRead); + } + + @Override + public RecordReader createReader(DataSplit split) throws IOException { + if (split.beforeFiles().stream().noneMatch(file -> file.level() == 0)) { + return new EmptyRecordReader<>(); + } + return super.createReader(filterLevel0Files(split)); + } + + private DataSplit filterLevel0Files(DataSplit split) { + List beforeFiles = + split.beforeFiles().stream() + .filter(file -> file.level() > 0) + .collect(Collectors.toList()); + List afterFiles = + split.dataFiles().stream() + .filter(file -> file.level() > 0) + .collect(Collectors.toList()); + DataSplit.Builder builder = + new DataSplit.Builder() + .withSnapshot(split.snapshotId()) + .withPartition(split.partition()) + .withBucket(split.bucket()) + .withBucketPath(split.bucketPath()) + .withBeforeFiles(beforeFiles) + .withDataFiles(afterFiles) + .isStreaming(split.isStreaming()) + .rawConvertible(split.rawConvertible()) + .withCommitKind(split.commitKind()); + + if (split.beforeDeletionFiles().isPresent()) { + builder.withBeforeDeletionFiles(split.beforeDeletionFiles().get()); + } + if (split.deletionFiles().isPresent()) { + builder.withDataDeletionFiles(split.deletionFiles().get()); + } + return builder.build(); + } +} diff --git a/paimon-core/src/test/java/org/apache/paimon/table/TableTestBase.java b/paimon-core/src/test/java/org/apache/paimon/table/TableTestBase.java index cc8fc98dd4e27..eaaf8ca70bc8c 100644 --- a/paimon-core/src/test/java/org/apache/paimon/table/TableTestBase.java +++ b/paimon-core/src/test/java/org/apache/paimon/table/TableTestBase.java @@ -127,10 +127,21 @@ protected void write(Table table, IOManager ioManager, InternalRow... rows) thro } protected void compact(Table table, BinaryRow partition, int bucket) throws Exception { + compact(table, partition, bucket, null, true); + } + + protected void compact( + Table table, + BinaryRow partition, + int bucket, + IOManager ioManager, + boolean fullCompaction) + throws Exception { BatchWriteBuilder writeBuilder = table.newBatchWriteBuilder(); try (BatchTableWrite write = writeBuilder.newWrite(); BatchTableCommit commit = writeBuilder.newCommit()) { - write.compact(partition, bucket, true); + write.withIOManager(ioManager); + write.compact(partition, bucket, fullCompaction); commit.commit(write.prepareCommit()); } } diff --git a/paimon-flink/paimon-flink-common/src/main/java/org/apache/paimon/flink/lookup/FileStoreLookupFunction.java b/paimon-flink/paimon-flink-common/src/main/java/org/apache/paimon/flink/lookup/FileStoreLookupFunction.java index 01ebbde201540..4090193de285d 100644 --- a/paimon-flink/paimon-flink-common/src/main/java/org/apache/paimon/flink/lookup/FileStoreLookupFunction.java +++ b/paimon-flink/paimon-flink-common/src/main/java/org/apache/paimon/flink/lookup/FileStoreLookupFunction.java @@ -98,7 +98,9 @@ public class FileStoreLookupFunction implements Serializable, Closeable { public FileStoreLookupFunction( Table table, int[] projection, int[] joinKeyIndex, @Nullable Predicate predicate) { - TableScanUtils.streamingReadingValidate(table); + if (!TableScanUtils.supportCompactDiffStreamingReading(table)) { + TableScanUtils.streamingReadingValidate(table); + } this.table = table; this.partitionLoader = DynamicPartitionLoader.of(table); diff --git a/paimon-flink/paimon-flink-common/src/main/java/org/apache/paimon/flink/lookup/LookupStreamingReader.java b/paimon-flink/paimon-flink-common/src/main/java/org/apache/paimon/flink/lookup/LookupStreamingReader.java index ceb40c1a864fc..fb42a6906c896 100644 --- a/paimon-flink/paimon-flink-common/src/main/java/org/apache/paimon/flink/lookup/LookupStreamingReader.java +++ b/paimon-flink/paimon-flink-common/src/main/java/org/apache/paimon/flink/lookup/LookupStreamingReader.java @@ -20,6 +20,7 @@ import org.apache.paimon.CoreOptions; import org.apache.paimon.data.InternalRow; +import org.apache.paimon.flink.utils.TableScanUtils; import org.apache.paimon.io.SplitsParallelReadUtil; import org.apache.paimon.mergetree.compact.ConcatRecordReader; import org.apache.paimon.options.ConfigOption; @@ -50,6 +51,9 @@ import java.util.function.IntUnaryOperator; import java.util.stream.IntStream; +import static org.apache.paimon.CoreOptions.SCAN_BOUNDED_WATERMARK; +import static org.apache.paimon.CoreOptions.STREAM_SCAN_MODE; +import static org.apache.paimon.CoreOptions.StreamScanMode.COMPACT_DELTA_MONITOR; import static org.apache.paimon.flink.FlinkConnectorOptions.LOOKUP_BOOTSTRAP_PARALLELISM; import static org.apache.paimon.predicate.PredicateBuilder.transformFieldMapping; @@ -117,12 +121,20 @@ private Table unsetTimeTravelOptions(Table origin) { Map newOptions = new HashMap<>(fileStoreTable.options()); TIME_TRAVEL_OPTIONS.stream().map(ConfigOption::key).forEach(newOptions::remove); - CoreOptions.StartupMode startupMode = CoreOptions.fromMap(newOptions).startupMode(); + CoreOptions coreOptions = CoreOptions.fromMap(newOptions); + CoreOptions.StartupMode startupMode = coreOptions.startupMode(); if (startupMode != CoreOptions.StartupMode.COMPACTED_FULL) { startupMode = CoreOptions.StartupMode.LATEST_FULL; } newOptions.put(CoreOptions.SCAN_MODE.key(), startupMode.toString()); + if (origin.primaryKeys().size() > 0 + && coreOptions.changelogProducer() == CoreOptions.ChangelogProducer.NONE + && TableScanUtils.supportCompactDiffStreamingReading(origin)) { + newOptions.put(STREAM_SCAN_MODE.key(), COMPACT_DELTA_MONITOR.getValue()); + newOptions.put(SCAN_BOUNDED_WATERMARK.key(), null); + } + TableSchema newSchema = fileStoreTable.schema().copy(newOptions); return fileStoreTable.copy(newSchema); } diff --git a/paimon-flink/paimon-flink-common/src/main/java/org/apache/paimon/flink/utils/TableScanUtils.java b/paimon-flink/paimon-flink-common/src/main/java/org/apache/paimon/flink/utils/TableScanUtils.java index a5645302f93f3..5a35e811a537f 100644 --- a/paimon-flink/paimon-flink-common/src/main/java/org/apache/paimon/flink/utils/TableScanUtils.java +++ b/paimon-flink/paimon-flink-common/src/main/java/org/apache/paimon/flink/utils/TableScanUtils.java @@ -20,12 +20,15 @@ import org.apache.paimon.CoreOptions; import org.apache.paimon.flink.source.FileStoreSourceSplit; +import org.apache.paimon.options.Options; import org.apache.paimon.table.Table; import org.apache.paimon.table.source.DataSplit; import org.apache.paimon.table.source.TableScan; import java.util.HashMap; +import java.util.HashSet; import java.util.Optional; +import java.util.Set; /** Utility methods for {@link TableScan}, such as validating. */ public class TableScanUtils { @@ -59,4 +62,20 @@ public static Optional getSnapshotId(FileStoreSourceSplit split) { } return Optional.empty(); } + + public static boolean supportCompactDiffStreamingReading(Table table) { + CoreOptions options = CoreOptions.fromMap(table.options()); + Set compactDiffReadingEngine = + new HashSet() { + { + add(CoreOptions.MergeEngine.PARTIAL_UPDATE); + add(CoreOptions.MergeEngine.AGGREGATE); + } + }; + + return options.needLookup() + && compactDiffReadingEngine.contains(options.mergeEngine()) + && !Options.fromMap(options.toMap()) + .get(CoreOptions.PARTIAL_UPDATE_REMOVE_RECORD_ON_DELETE); + } } diff --git a/paimon-flink/paimon-flink-common/src/test/java/org/apache/paimon/flink/lookup/LookupTableTest.java b/paimon-flink/paimon-flink-common/src/test/java/org/apache/paimon/flink/lookup/LookupTableTest.java index 14643542e73de..ccf31e9c71b9a 100644 --- a/paimon-flink/paimon-flink-common/src/test/java/org/apache/paimon/flink/lookup/LookupTableTest.java +++ b/paimon-flink/paimon-flink-common/src/test/java/org/apache/paimon/flink/lookup/LookupTableTest.java @@ -20,6 +20,7 @@ import org.apache.paimon.CoreOptions; import org.apache.paimon.catalog.Identifier; +import org.apache.paimon.data.BinaryRow; import org.apache.paimon.data.GenericRow; import org.apache.paimon.data.InternalRow; import org.apache.paimon.data.JoinedRow; @@ -722,6 +723,105 @@ public void testPKLookupTableRefreshAsync(boolean refreshAsync) throws Exception table.close(); } + @Test + public void testFullCacheLookupTableWithForceLookup() throws Exception { + Options options = new Options(); + options.set(CoreOptions.MERGE_ENGINE, CoreOptions.MergeEngine.PARTIAL_UPDATE); + options.set(CoreOptions.WRITE_ONLY, true); + options.set(CoreOptions.FORCE_LOOKUP, true); + options.set(CoreOptions.BUCKET, 1); + FileStoreTable storeTable = createTable(singletonList("f0"), options); + FileStoreTable compactTable = + storeTable.copy(Collections.singletonMap(CoreOptions.WRITE_ONLY.key(), "false")); + FullCacheLookupTable.Context context = + new FullCacheLookupTable.Context( + storeTable, + new int[] {0, 1, 2}, + null, + null, + tempDir.toFile(), + singletonList("f0"), + null); + table = FullCacheLookupTable.create(context, ThreadLocalRandom.current().nextInt(2) * 10); + + // initialize + write(storeTable, ioManager, GenericRow.of(1, 11, 111)); + compact(compactTable, BinaryRow.EMPTY_ROW, 0, ioManager, true); + table.open(); + + List result = table.get(row(1)); + assertThat(result).hasSize(1); + assertRow(result.get(0), 1, 11, 111); + + // first write + write(storeTable, GenericRow.of(1, null, 222)); + table.refresh(); + result = table.get(row(1)); + assertThat(result).hasSize(1); + assertRow(result.get(0), 1, 11, 111); // old value because there is no compact + + // only L0 occur compact + compact(compactTable, BinaryRow.EMPTY_ROW, 0, ioManager, false); + table.refresh(); + result = table.get(row(1)); + assertThat(result).hasSize(1); + assertRow(result.get(0), 1, 11, 222); // get new value after compact + + // second write + write(storeTable, GenericRow.of(1, 22, null)); + table.refresh(); + result = table.get(row(1)); + assertThat(result).hasSize(1); + assertRow(result.get(0), 1, 11, 222); // old value + + // full compact + compact(compactTable, BinaryRow.EMPTY_ROW, 0, ioManager, true); + table.refresh(); + result = table.get(row(1)); + assertThat(result).hasSize(1); + assertRow(result.get(0), 1, 22, 222); // new value + } + + @Test + public void testPartialLookupTableWithForceLookup() throws Exception { + Options options = new Options(); + options.set(CoreOptions.MERGE_ENGINE, CoreOptions.MergeEngine.PARTIAL_UPDATE); + options.set(CoreOptions.CHANGELOG_PRODUCER, CoreOptions.ChangelogProducer.NONE); + options.set(CoreOptions.FORCE_LOOKUP, true); + options.set(CoreOptions.BUCKET, 1); + FileStoreTable dimTable = createTable(singletonList("f0"), options); + + PrimaryKeyPartialLookupTable table = + PrimaryKeyPartialLookupTable.createLocalTable( + dimTable, + new int[] {0, 1, 2}, + tempDir.toFile(), + ImmutableList.of("f0"), + null); + table.open(); + + List result = table.get(row(1, -1)); + assertThat(result).hasSize(0); + + write(dimTable, ioManager, GenericRow.of(1, -1, 11), GenericRow.of(2, -2, 22)); + result = table.get(row(1)); + assertThat(result).hasSize(0); + + table.refresh(); + result = table.get(row(1)); + assertThat(result).hasSize(1); + assertRow(result.get(0), 1, -1, 11); + result = table.get(row(2)); + assertThat(result).hasSize(1); + assertRow(result.get(0), 2, -2, 22); + + write(dimTable, ioManager, GenericRow.of(1, null, 111)); + table.refresh(); + result = table.get(row(1)); + assertThat(result).hasSize(1); + assertRow(result.get(0), 1, -1, 111); + } + private FileStoreTable createDimTable() throws Exception { FileIO fileIO = LocalFileIO.create(); org.apache.paimon.fs.Path tablePath =