Skip to content

Commit

Permalink
format
Browse files Browse the repository at this point in the history
  • Loading branch information
wg1026688210 committed Feb 28, 2024
1 parent 79a9661 commit 635bae2
Show file tree
Hide file tree
Showing 4 changed files with 7 additions and 8 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
import org.apache.paimon.data.InternalRow;
import org.apache.paimon.utils.ObjectSerializer;

/** Serializer for Identifier. */
/** Serializer for {@link Identifier}.*/
public class IdentifierSerializer extends ObjectSerializer<Identifier> {

public IdentifierSerializer() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ private void buildForCombinedMode() {
excludingPattern,
tableOptions.get(CoreOptions.CONTINUOUS_DISCOVERY_INTERVAL).toMillis());

// multi bucket table
// multi bucket table which has multi bucket in a partition like fix bucket and dynamic bucket
DataStream<RowData> multiBucketTableSource =
partition(
sourceBuilder
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,14 +36,14 @@
import static org.apache.paimon.flink.utils.MultiTablesCompactorUtil.shouldCompactTable;

/**
* This class is responsible for implementing the scanning logic of different buckets during table
* This class is responsible for implementing the scanning logic for the table of different type buckets during
* compaction.
*
* @param <T> the result of scanning file :
* <ol>
* <li>the splits {@link Split} for the table with multi buckets, such as dynamic or fixed
* <li>{@link Split} for the table with multi buckets, such as dynamic or fixed
* bucket table.
* <li>the compaction task {@link AppendOnlyCompactionTask} for the table witch fixed single
* <li>{@link AppendOnlyCompactionTask} for the table witch fixed single
* bucket ,such as unaware bucket table.
* </ol>
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,10 +43,9 @@ public void scan(SourceFunction.SourceContext<T> ctx) throws Exception {
return;
}
if (isEmpty) {
// Currently, in the combine mode, scan tasks for two different bucket tables are
// Currently, in the combined mode, there are two scan tasks for the table of two different bucket type (multi bucket & unaware bucket) are
// running concurrently.
// Given that there is only one type of bucket, only one task will encounter data,
// therefore an exception should not be thrown here.
// There will be a situation that there is only one task compaction , therefore this should not be thrown exception here.
LOGGER.info(
"No file were collected for the table of {}", tableScanLogic.bucketType());
}
Expand Down

0 comments on commit 635bae2

Please sign in to comment.