Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[core] Refactor MetastoreClient methods to simplify catalog #4726

Merged
merged 4 commits into from
Dec 17, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -417,7 +417,7 @@ protected Table getDataOrFormatTable(Identifier identifier) throws TableNotExist
lockFactory().orElse(null),
lockContext().orElse(null),
identifier),
metastoreClientFactory(identifier, tableMeta.schema).orElse(null)));
metastoreClientFactory(identifier).orElse(null)));
CoreOptions options = table.coreOptions();
if (options.type() == TableType.OBJECT_TABLE) {
String objectLocation = options.objectLocation();
Expand Down Expand Up @@ -485,8 +485,7 @@ protected abstract TableSchema getDataTableSchema(Identifier identifier)
throws TableNotExistException;

/** Get metastore client factory for the table specified by {@code identifier}. */
public Optional<MetastoreClient.Factory> metastoreClientFactory(
Identifier identifier, TableSchema schema) {
public Optional<MetastoreClient.Factory> metastoreClientFactory(Identifier identifier) {
return Optional.empty();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
import org.apache.paimon.manifest.ManifestEntry;
import org.apache.paimon.table.sink.CommitCallback;
import org.apache.paimon.table.sink.CommitMessage;
import org.apache.paimon.utils.InternalRowPartitionComputer;

import org.apache.paimon.shade.guava30.com.google.common.cache.Cache;
import org.apache.paimon.shade.guava30.com.google.common.cache.CacheBuilder;
Expand All @@ -48,9 +49,12 @@ public class AddPartitionCommitCallback implements CommitCallback {
.build();

private final MetastoreClient client;
private final InternalRowPartitionComputer partitionComputer;

public AddPartitionCommitCallback(MetastoreClient client) {
public AddPartitionCommitCallback(
MetastoreClient client, InternalRowPartitionComputer partitionComputer) {
this.client = client;
this.partitionComputer = partitionComputer;
}

@Override
Expand Down Expand Up @@ -81,7 +85,10 @@ private void addPartitions(Set<BinaryRow> partitions) {
}
}
if (!newPartitions.isEmpty()) {
client.addPartitions(newPartitions);
client.addPartitions(
newPartitions.stream()
.map(partitionComputer::generatePartValues)
.collect(Collectors.toList()));
newPartitions.forEach(partition -> cache.put(partition, true));
}
} catch (Exception e) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ public void notifyDeletion(String tagName) {
LinkedHashMap<String, String> partitionSpec = new LinkedHashMap<>();
partitionSpec.put(partitionField, tagName);
try {
client.deletePartition(partitionSpec);
client.dropPartition(partitionSpec);
} catch (Exception e) {
throw new RuntimeException(e);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,45 +18,28 @@

package org.apache.paimon.metastore;

import org.apache.paimon.data.BinaryRow;

import java.io.Serializable;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;

/**
* A metastore client related to a table. All methods of this interface operate on the same specific
* table.
*/
public interface MetastoreClient extends AutoCloseable {

void addPartition(BinaryRow partition) throws Exception;

default void addPartitions(List<BinaryRow> partitions) throws Exception {
for (BinaryRow partition : partitions) {
addPartition(partition);
}
}
void addPartition(LinkedHashMap<String, String> partition) throws Exception;

void addPartition(LinkedHashMap<String, String> partitionSpec) throws Exception;
void addPartitions(List<LinkedHashMap<String, String>> partitions) throws Exception;

default void addPartitionsSpec(List<LinkedHashMap<String, String>> partitionSpecsList)
throws Exception {
for (LinkedHashMap<String, String> partitionSpecs : partitionSpecsList) {
addPartition(partitionSpecs);
}
}
void dropPartition(LinkedHashMap<String, String> partition) throws Exception;

void deletePartition(LinkedHashMap<String, String> partitionSpec) throws Exception;
void dropPartitions(List<LinkedHashMap<String, String>> partitions) throws Exception;

void markDone(LinkedHashMap<String, String> partitionSpec) throws Exception;
void markPartitionDone(LinkedHashMap<String, String> partition) throws Exception;

default void alterPartition(
LinkedHashMap<String, String> partitionSpec,
Map<String, String> parameters,
long modifyTime,
boolean ignoreIfNotExist)
LinkedHashMap<String, String> partition, PartitionStats partitionStats)
throws Exception {
throw new UnsupportedOperationException();
}
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.paimon.metastore;

/** Statistic for partition. */
public interface PartitionStats {

long numFiles();

long totalSize();

long numRows();

long lastUpdateTimeMillis();

static PartitionStats create(
long numFiles, long totalSize, long numRows, long lastUpdateTimeMillis) {
return new PartitionStats() {

@Override
public long numFiles() {
return numFiles;
}

@Override
public long totalSize() {
return totalSize;
}

@Override
public long numRows() {
return numRows;
}

@Override
public long lastUpdateTimeMillis() {
return lastUpdateTimeMillis;
}

@Override
public String toString() {
return String.format(
"numFiles: %s, totalSize: %s, numRows: %s, lastUpdateTimeMillis: %s",
numFiles, totalSize, numRows, lastUpdateTimeMillis);
}
};
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -167,15 +167,13 @@ private List<Map<String, String>> doExpire(
}

private void deleteMetastorePartitions(List<Map<String, String>> partitions) {
if (metastoreClient != null) {
partitions.forEach(
partition -> {
try {
metastoreClient.deletePartition(new LinkedHashMap<>(partition));
} catch (Exception e) {
throw new RuntimeException(e);
}
});
if (metastoreClient != null && partitions.size() > 0) {
try {
metastoreClient.dropPartitions(
partitions.stream().map(LinkedHashMap::new).collect(Collectors.toList()));
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ public MarkPartitionDoneEventAction(MetastoreClient metastoreClient) {
public void markDone(String partition) throws Exception {
LinkedHashMap<String, String> partitionSpec =
extractPartitionSpecFromPath(new Path(partition));
metastoreClient.markDone(partitionSpec);
metastoreClient.markPartitionDone(partitionSpec);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@
import org.apache.paimon.table.source.snapshot.TimeTravelUtil;
import org.apache.paimon.tag.TagPreview;
import org.apache.paimon.utils.BranchManager;
import org.apache.paimon.utils.InternalRowPartitionComputer;
import org.apache.paimon.utils.Preconditions;
import org.apache.paimon.utils.SegmentsCache;
import org.apache.paimon.utils.SimpleFileReader;
Expand Down Expand Up @@ -469,7 +470,15 @@ protected List<CommitCallback> createCommitCallbacks(String commitUser) {
if (options.partitionedTableInMetastore()
&& metastoreClientFactory != null
&& !tableSchema.partitionKeys().isEmpty()) {
callbacks.add(new AddPartitionCommitCallback(metastoreClientFactory.create()));
InternalRowPartitionComputer partitionComputer =
new InternalRowPartitionComputer(
options.partitionDefaultName(),
tableSchema.logicalPartitionType(),
tableSchema.partitionKeys().toArray(new String[0]),
options.legacyPartitionName());
callbacks.add(
new AddPartitionCommitCallback(
metastoreClientFactory.create(), partitionComputer));
}

TagPreview tagPreview = TagPreview.create(options);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,12 @@
import org.apache.paimon.io.CompactIncrement;
import org.apache.paimon.io.DataFileMeta;
import org.apache.paimon.io.DataIncrement;
import org.apache.paimon.metastore.MetastoreClient;
import org.apache.paimon.options.Options;
import org.apache.paimon.schema.Schema;
import org.apache.paimon.schema.SchemaManager;
import org.apache.paimon.schema.TableSchema;
import org.apache.paimon.table.CatalogEnvironment;
import org.apache.paimon.table.FileStoreTable;
import org.apache.paimon.table.FileStoreTableFactory;
import org.apache.paimon.table.sink.CommitMessage;
Expand All @@ -54,16 +58,19 @@
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.ThreadLocalRandom;

import static java.util.Collections.emptyList;
import static java.util.Collections.singletonList;
import static org.apache.paimon.CoreOptions.METASTORE_PARTITIONED_TABLE;
import static org.apache.paimon.CoreOptions.PARTITION_EXPIRATION_CHECK_INTERVAL;
import static org.apache.paimon.CoreOptions.PARTITION_EXPIRATION_TIME;
import static org.apache.paimon.CoreOptions.PARTITION_TIMESTAMP_FORMATTER;
import static org.apache.paimon.CoreOptions.PATH;
import static org.apache.paimon.CoreOptions.WRITE_ONLY;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
Expand All @@ -75,12 +82,54 @@ public class PartitionExpireTest {

private Path path;
private FileStoreTable table;
private List<LinkedHashMap<String, String>> deletedPartitions;

@BeforeEach
public void beforeEach() {
path = new Path(tempDir.toUri());
}

private void newTable() {
LocalFileIO fileIO = LocalFileIO.create();
Options options = new Options();
options.set(PATH, path.toString());
Path tablePath = CoreOptions.path(options);
String branchName = CoreOptions.branch(options.toMap());
TableSchema tableSchema = new SchemaManager(fileIO, tablePath, branchName).latest().get();
deletedPartitions = new ArrayList<>();
MetastoreClient.Factory factory =
() ->
new MetastoreClient() {
@Override
public void addPartition(LinkedHashMap<String, String> partition) {}

@Override
public void addPartitions(
List<LinkedHashMap<String, String>> partitions) {}

@Override
public void dropPartition(LinkedHashMap<String, String> partition) {
deletedPartitions.add(partition);
}

@Override
public void dropPartitions(
List<LinkedHashMap<String, String>> partitions) {
deletedPartitions.addAll(partitions);
}

@Override
public void markPartitionDone(LinkedHashMap<String, String> partition) {
throw new UnsupportedOperationException();
}

@Override
public void close() {}
};
CatalogEnvironment env = new CatalogEnvironment(null, null, Lock.emptyFactory(), factory);
table = FileStoreTableFactory.create(fileIO, path, tableSchema, env);
}

@Test
public void testNonPartitionedTable() {
SchemaManager schemaManager = new SchemaManager(LocalFileIO.create(), path);
Expand Down Expand Up @@ -108,7 +157,7 @@ public void testIllegalPartition() throws Exception {
emptyList(),
Collections.emptyMap(),
""));
table = FileStoreTableFactory.create(LocalFileIO.create(), path);
newTable();
write("20230101", "11");
write("abcd", "12");
write("20230101", "12");
Expand All @@ -129,9 +178,9 @@ public void test() throws Exception {
RowType.of(VarCharType.STRING_TYPE, VarCharType.STRING_TYPE).getFields(),
singletonList("f0"),
emptyList(),
Collections.emptyMap(),
Collections.singletonMap(METASTORE_PARTITIONED_TABLE.key(), "true"),
""));
table = FileStoreTableFactory.create(LocalFileIO.create(), path);
newTable();

write("20230101", "11");
write("20230101", "12");
Expand All @@ -156,6 +205,12 @@ public void test() throws Exception {

expire.expire(date(8), Long.MAX_VALUE);
assertThat(read()).isEmpty();

assertThat(deletedPartitions)
.containsExactlyInAnyOrder(
new LinkedHashMap<>(Collections.singletonMap("f0", "20230101")),
new LinkedHashMap<>(Collections.singletonMap("f0", "20230103")),
new LinkedHashMap<>(Collections.singletonMap("f0", "20230105")));
}

@Test
Expand All @@ -169,7 +224,7 @@ public void testFilterCommittedAfterExpiring() throws Exception {
Collections.emptyMap(),
""));

table = FileStoreTableFactory.create(LocalFileIO.create(), path);
newTable();
// disable compaction and snapshot expiration
table = table.copy(Collections.singletonMap(WRITE_ONLY.key(), "true"));
String commitUser = UUID.randomUUID().toString();
Expand Down Expand Up @@ -243,7 +298,7 @@ public void testDeleteExpiredPartition() throws Exception {
emptyList(),
Collections.emptyMap(),
""));
table = FileStoreTableFactory.create(LocalFileIO.create(), path);
newTable();
table = newExpireTable();

List<CommitMessage> commitMessages = write("20230101", "11");
Expand Down
Loading
Loading