diff --git a/paimon-core/src/main/java/org/apache/paimon/io/IdentifierSerializer.java b/paimon-core/src/main/java/org/apache/paimon/io/IdentifierSerializer.java index 5c71c872276c8..c5f298876e055 100644 --- a/paimon-core/src/main/java/org/apache/paimon/io/IdentifierSerializer.java +++ b/paimon-core/src/main/java/org/apache/paimon/io/IdentifierSerializer.java @@ -24,7 +24,7 @@ import org.apache.paimon.data.InternalRow; import org.apache.paimon.utils.ObjectSerializer; -/** Serializer for Identifier. */ +/** Serializer for {@link Identifier}.*/ public class IdentifierSerializer extends ObjectSerializer { public IdentifierSerializer() { diff --git a/paimon-flink/paimon-flink-common/src/main/java/org/apache/paimon/flink/action/CompactDatabaseAction.java b/paimon-flink/paimon-flink-common/src/main/java/org/apache/paimon/flink/action/CompactDatabaseAction.java index 31e74464c72cc..e0f4c834fe02d 100644 --- a/paimon-flink/paimon-flink-common/src/main/java/org/apache/paimon/flink/action/CompactDatabaseAction.java +++ b/paimon-flink/paimon-flink-common/src/main/java/org/apache/paimon/flink/action/CompactDatabaseAction.java @@ -198,7 +198,7 @@ private void buildForCombinedMode() { excludingPattern, tableOptions.get(CoreOptions.CONTINUOUS_DISCOVERY_INTERVAL).toMillis()); - // multi bucket table + // multi bucket table which has multi bucket in a partition like fix bucket and dynamic bucket DataStream multiBucketTableSource = partition( sourceBuilder diff --git a/paimon-flink/paimon-flink-common/src/main/java/org/apache/paimon/flink/compact/AbstractBucketScanLogic.java b/paimon-flink/paimon-flink-common/src/main/java/org/apache/paimon/flink/compact/AbstractBucketScanLogic.java index 28e374ec72869..03b3b719934a5 100644 --- a/paimon-flink/paimon-flink-common/src/main/java/org/apache/paimon/flink/compact/AbstractBucketScanLogic.java +++ b/paimon-flink/paimon-flink-common/src/main/java/org/apache/paimon/flink/compact/AbstractBucketScanLogic.java @@ -36,14 +36,14 @@ import static org.apache.paimon.flink.utils.MultiTablesCompactorUtil.shouldCompactTable; /** - * This class is responsible for implementing the scanning logic of different buckets during table + * This class is responsible for implementing the scanning logic for the table of different type buckets during * compaction. * * @param the result of scanning file : *
    - *
  1. the splits {@link Split} for the table with multi buckets, such as dynamic or fixed + *
  2. {@link Split} for the table with multi buckets, such as dynamic or fixed * bucket table. - *
  3. the compaction task {@link AppendOnlyCompactionTask} for the table witch fixed single + *
  4. {@link AppendOnlyCompactionTask} for the table witch fixed single * bucket ,such as unaware bucket table. *
*/ diff --git a/paimon-flink/paimon-flink-common/src/main/java/org/apache/paimon/flink/compact/BatchFileScanner.java b/paimon-flink/paimon-flink-common/src/main/java/org/apache/paimon/flink/compact/BatchFileScanner.java index e64f18f6a1ed1..dd74171f3aee9 100644 --- a/paimon-flink/paimon-flink-common/src/main/java/org/apache/paimon/flink/compact/BatchFileScanner.java +++ b/paimon-flink/paimon-flink-common/src/main/java/org/apache/paimon/flink/compact/BatchFileScanner.java @@ -43,10 +43,9 @@ public void scan(SourceFunction.SourceContext ctx) throws Exception { return; } if (isEmpty) { - // Currently, in the combine mode, scan tasks for two different bucket tables are + // Currently, in the combined mode, there are two scan tasks for the table of two different bucket type (multi bucket & unaware bucket) are // running concurrently. - // Given that there is only one type of bucket, only one task will encounter data, - // therefore an exception should not be thrown here. + // There will be a situation that there is only one task compaction , therefore this should not be thrown exception here. LOGGER.info( "No file were collected for the table of {}", tableScanLogic.bucketType()); }