Skip to content

Commit

Permalink
[core] Add TableFormatBenchmark to test format only
Browse files Browse the repository at this point in the history
  • Loading branch information
JingsongLi authored and zhu3pang committed Mar 29, 2024
1 parent c928967 commit 096c433
Show file tree
Hide file tree
Showing 3 changed files with 135 additions and 16 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,6 @@
import java.util.Collections;
import java.util.List;

import static java.util.Collections.singletonList;

/** Base class for table benchmark. */
public class TableBenchmark {

Expand All @@ -53,6 +51,11 @@ public class TableBenchmark {
private final RandomDataGenerator random = new RandomDataGenerator();

protected Table createTable(Options tableOptions, String tableName) throws Exception {
return createTable(tableOptions, tableName, Collections.singletonList("k"));
}

protected Table createTable(Options tableOptions, String tableName, List<String> primaryKeys)
throws Exception {
Options catalogOptions = new Options();
catalogOptions.set(CatalogOptions.WAREHOUSE, tempFile.toUri().toString());
Catalog catalog = CatalogFactory.createCatalog(CatalogContext.create(catalogOptions));
Expand All @@ -66,12 +69,7 @@ protected Table createTable(Options tableOptions, String tableName) throws Excep
}
tableOptions.set(CoreOptions.SNAPSHOT_NUM_RETAINED_MAX, 10);
Schema schema =
new Schema(
fields,
Collections.emptyList(),
singletonList("k"),
tableOptions.toMap(),
"");
new Schema(fields, Collections.emptyList(), primaryKeys, tableOptions.toMap(), "");
Identifier identifier = Identifier.create(database, tableName);
catalog.createTable(identifier, schema, false);
return catalog.getTable(identifier);
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.paimon.benchmark;

import org.apache.paimon.data.InternalRow;
import org.apache.paimon.options.Options;
import org.apache.paimon.reader.RecordReader;
import org.apache.paimon.table.Table;
import org.apache.paimon.table.sink.CommitMessage;
import org.apache.paimon.table.sink.StreamTableCommit;
import org.apache.paimon.table.sink.StreamTableWrite;
import org.apache.paimon.table.sink.StreamWriteBuilder;
import org.apache.paimon.table.source.Split;

import org.junit.jupiter.api.Test;

import java.util.Collections;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;

/** Benchmark for table read. */
public class TableFormatBenchmark extends TableBenchmark {

private final int rowCount = 10000000;

@Test
public void testRead() throws Exception {
innerTest(prepareData());
/*
* OpenJDK 64-Bit Server VM 1.8.0_292-b10 on Mac OS X 10.16
* Apple M1 Pro
* read: Best/Avg Time(ms) Row Rate(K/s) Per Row(ns) Relative
* ------------------------------------------------------------------------------------------------
* OPERATORTEST_read_read-orc 11314 / 11366 2651.6 377.1 1.0X
*/
}

private void innerTest(Table table) {
int readTime = 3;
Benchmark benchmark =
new Benchmark("read", readTime * rowCount)
.setNumWarmupIters(1)
.setOutputPerIteration(true);

benchmark.addCase(
"read",
5,
() -> {
for (int i = 0; i < readTime; i++) {
List<Split> splits = table.newReadBuilder().newScan().plan().splits();
AtomicLong readCount = new AtomicLong(0);
try {
for (Split split : splits) {
RecordReader<InternalRow> reader =
table.newReadBuilder().newRead().createReader(split);
reader.forEachRemaining(row -> readCount.incrementAndGet());
}
System.out.printf("Finish read %d rows.\n", readCount.get());
} catch (Exception e) {
throw new RuntimeException(e);
}
}
});
benchmark.run();
}

private Table prepareData() throws Exception {
Table table = createTable(new Options(), "table", Collections.emptyList());
StreamWriteBuilder writeBuilder = table.newStreamWriteBuilder();
StreamTableWrite write = writeBuilder.newWrite();
StreamTableCommit commit = writeBuilder.newCommit();
AtomicInteger writeCount = new AtomicInteger(0);
for (int i = 0; i < rowCount; i++) {
try {
write.write(newRandomRow());
writeCount.incrementAndGet();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
List<CommitMessage> commitMessages = write.prepareCommit(true, 1);
commit.commit(1, commitMessages);

write.close();
return table;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@

import org.junit.jupiter.api.Test;

import java.util.LinkedHashMap;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
Expand All @@ -43,20 +43,37 @@ public class TableReadBenchmark extends TableBenchmark {
private final int rowCount = 1000000;

@Test
public void testRead() throws Exception {
Map<String, Table> tables = new LinkedHashMap<>();
tables.put("orc", prepareData(orc(), "orc"));
tables.put("parquet", prepareData(parquet(), "parquet"));
tables.put("avro", prepareData(avro(), "avro"));

innerTest(tables);
public void testOrcRead() throws Exception {
innerTest(Collections.singletonMap("orc", prepareData(orc(), "orc")));
/*
* OpenJDK 64-Bit Server VM 1.8.0_292-b10 on Mac OS X 10.16
* Apple M1 Pro
* read: Best/Avg Time(ms) Row Rate(K/s) Per Row(ns) Relative
* ------------------------------------------------------------------------------------------------
* OPERATORTEST_read_read-orc 1046 / 1295 2867.3 348.8 1.0X
*/
}

@Test
public void testParquetRead() throws Exception {
innerTest(Collections.singletonMap("parquet", prepareData(parquet(), "parquet")));
/*
* OpenJDK 64-Bit Server VM 1.8.0_292-b10 on Mac OS X 10.16
* Apple M1 Pro
* read: Best/Avg Time(ms) Row Rate(K/s) Per Row(ns) Relative
* ------------------------------------------------------------------------------------------------
* OPERATORTEST_read_read-parquet 3076 / 5295 975.4 1025.2 0.3X
*/
}

@Test
public void testAvroRead() throws Exception {
innerTest(Collections.singletonMap("avro", prepareData(avro(), "avro")));
/*
* OpenJDK 64-Bit Server VM 1.8.0_292-b10 on Mac OS X 10.16
* Apple M1 Pro
* read: Best/Avg Time(ms) Row Rate(K/s) Per Row(ns) Relative
* ------------------------------------------------------------------------------------------------
* OPERATORTEST_read_read-avro 4156 / 4362 721.8 1385.5 0.3X
*/
}
Expand Down

0 comments on commit 096c433

Please sign in to comment.