From 5cdbe9998c93fffec0da714f90eb59a735a264d4 Mon Sep 17 00:00:00 2001 From: Yukang-Lian Date: Tue, 10 Dec 2024 15:25:51 +0800 Subject: [PATCH] 1 --- ...yunOssSdk.groovy => aliyun_oss_sdk.groovy} | 16 +- .../plugins/cloud_show_data_plugin.groovy | 220 ++++++++++++++ .../suites/show_data/ddl/lineitem_mow.sql | 3 +- .../show_data/test_show_mow_data.groovy | 42 +-- .../test_cloud_follower_show_data.groovy | 127 ++++++++ .../test_cloud_mtmv_show_data.groovy | 209 +++++++++++++ ...hange_add_and_drop_column_show_data.groovy | 196 ++++++++++++ ...change_add_and_drop_index_show_data.groovy | 196 ++++++++++++ ...ema_change_reorder_column_show_data.groovy | 166 +++++++++++ ...t_cloud_delete_table_rows_show_data.groovy | 196 ++++++++++++ ...rop_and_recover_partition_show_data.groovy | 226 ++++++++++++++ .../test_cloud_drop_table_show_data.groovy | 149 ++++++++++ ...runcate_and_recover_table_show_data.groovy | 149 ++++++++++ ..._cloud_disable_compaction_show_data.groovy | 93 ++++++ ...t_cloud_inverted_index_v1_show_data.groovy | 95 ++++++ ...t_cloud_inverted_index_v2_show_data.groovy | 96 ++++++ .../test_cloud_lz4_show_data.groovy | 93 ++++++ .../test_cloud_zstd_show_data.groovy | 93 ++++++ .../test_cloud_agg_show_data.groovy | 95 ++++++ .../test_cloud_dup_show_data.groovy | 93 ++++++ .../test_cloud_mor_show_data.groovy | 93 ++++++ ..._cloud_mow_partial_update_show_data.groovy | 281 ++++++++++++++++++ .../test_cloud_mow_show_data.groovy | 92 ++++++ 23 files changed, 2987 insertions(+), 32 deletions(-) rename regression-test/plugins/{aliyunOssSdk.groovy => aliyun_oss_sdk.groovy} (98%) create mode 100644 regression-test/plugins/cloud_show_data_plugin.groovy create mode 100644 regression-test/suites/show_data_p2/test_cloud_follower_show_data.groovy create mode 100644 regression-test/suites/show_data_p2/test_table_modification/test_cloud_mtmv_show_data.groovy create mode 100644 regression-test/suites/show_data_p2/test_table_modification/test_cloud_schema_change_add_and_drop_column_show_data.groovy create mode 100644 regression-test/suites/show_data_p2/test_table_modification/test_cloud_schema_change_add_and_drop_index_show_data.groovy create mode 100644 regression-test/suites/show_data_p2/test_table_modification/test_cloud_schema_change_reorder_column_show_data.groovy create mode 100644 regression-test/suites/show_data_p2/test_table_operation/test_cloud_delete_table_rows_show_data.groovy create mode 100644 regression-test/suites/show_data_p2/test_table_operation/test_cloud_drop_and_recover_partition_show_data.groovy create mode 100644 regression-test/suites/show_data_p2/test_table_operation/test_cloud_drop_table_show_data.groovy create mode 100644 regression-test/suites/show_data_p2/test_table_operation/test_cloud_truncate_and_recover_table_show_data.groovy create mode 100644 regression-test/suites/show_data_p2/test_table_property/test_cloud_disable_compaction_show_data.groovy create mode 100644 regression-test/suites/show_data_p2/test_table_property/test_cloud_inverted_index_v1_show_data.groovy create mode 100644 regression-test/suites/show_data_p2/test_table_property/test_cloud_inverted_index_v2_show_data.groovy create mode 100644 regression-test/suites/show_data_p2/test_table_property/test_cloud_lz4_show_data.groovy create mode 100644 regression-test/suites/show_data_p2/test_table_property/test_cloud_zstd_show_data.groovy create mode 100644 regression-test/suites/show_data_p2/test_table_type/test_cloud_agg_show_data.groovy create mode 100644 regression-test/suites/show_data_p2/test_table_type/test_cloud_dup_show_data.groovy create mode 100644 regression-test/suites/show_data_p2/test_table_type/test_cloud_mor_show_data.groovy create mode 100644 regression-test/suites/show_data_p2/test_table_type/test_cloud_mow_partial_update_show_data.groovy create mode 100644 regression-test/suites/show_data_p2/test_table_type/test_cloud_mow_show_data.groovy diff --git a/regression-test/plugins/aliyunOssSdk.groovy b/regression-test/plugins/aliyun_oss_sdk.groovy similarity index 98% rename from regression-test/plugins/aliyunOssSdk.groovy rename to regression-test/plugins/aliyun_oss_sdk.groovy index cbc132a088dffb..efd6efa585b397 100644 --- a/regression-test/plugins/aliyunOssSdk.groovy +++ b/regression-test/plugins/aliyun_oss_sdk.groovy @@ -1,5 +1,4 @@ - // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information @@ -49,7 +48,7 @@ Suite.metaClass.listOssObjectWithPrefix = { OSS client, String bucketName, Strin String nextMarker = null; final int maxKeys = 500; List sums = null; - + if (!client.doesBucketExist(bucketName)) { logger.info("no bucket named ${bucketName} in ${endpoint}") return @@ -61,14 +60,14 @@ Suite.metaClass.listOssObjectWithPrefix = { OSS client, String bucketName, Strin do { objectListing = client.listObjects(new ListObjectsRequest(bucketName). withPrefix(prefix).withMarker(nextMarker).withMaxKeys(maxKeys)); - + sums = objectListing.getObjectSummaries(); for (OSSObjectSummary s : sums) { logger.info("\t" + s.getKey()); } - + nextMarker = objectListing.getNextMarker(); - + } while (objectListing.isTruncated()); } catch (OSSException oe) { logger.error("Caught an OSSException, which means your request made it to OSS, " @@ -107,7 +106,7 @@ Suite.metaClass.calculateFolderLength = { OSS client, String bucketName, String for (OSSObjectSummary s : sums) { size += s.getSize(); } - } while (objectListing.isTruncated()); + } while (objectListing.isTruncated()); return size; } @@ -143,7 +142,7 @@ Suite.metaClass.getOssAllDirSizeWithPrefix = { OSS client, String bucketName, St logger.info(s.getKey() + " : " + (s.getSize() / (1024 * 1024 * 1024)) + "GB"); } } while (objectListing.isTruncated()); - + } catch (OSSException oe) { logger.error("Caught an OSSException, which means your request made it to OSS, " + "but was rejected with an error response for some reason."); @@ -164,6 +163,3 @@ Suite.metaClass.getOssAllDirSizeWithPrefix = { OSS client, String bucketName, St logger.info("Done!") } } - - - diff --git a/regression-test/plugins/cloud_show_data_plugin.groovy b/regression-test/plugins/cloud_show_data_plugin.groovy new file mode 100644 index 00000000000000..43dc6fd38345cd --- /dev/null +++ b/regression-test/plugins/cloud_show_data_plugin.groovy @@ -0,0 +1,220 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +import groovy.json.JsonOutput +import org.apache.doris.regression.suite.Suite +import org.codehaus.groovy.runtime.IOGroovyMethods + + Suite.metaClass.repeate_stream_load_same_data = { String tableName, int loadTimes, String filePath-> + for (int i = 0; i < loadTimes; i++) { + streamLoad { + table tableName + set 'column_separator', '|' + set 'compress_type', 'GZ' + file """${getS3Url()}/${filePath}""" + time 10000 // limit inflight 10s + check { result, exception, startTime, endTime -> + if (exception != null) { + throw exception + } + log.info("Stream load result: ${result}".toString()) + def json = parseJson(result) + assertEquals("success", json.Status.toLowerCase()) + assertEquals(json.NumberTotalRows, json.NumberLoadedRows) + assertTrue(json.NumberLoadedRows > 0 && json.LoadBytes > 0) + } + } + } + } + + Suite.metaClass.stream_load_partial_update_data = { String tableName-> + for (int i = 0; i < 20; i++) { + int start = i * 10 + 1 + int end = (i + 1) * 10 + def elements = (start..end).collect { "a$it" } + String columns = "id," + elements.join(',') + streamLoad { + table tableName + set 'column_separator', '|' + set 'compress_type', 'GZ' + set 'columns', columns + set 'partial_columns', 'true' + file """${getS3Url()}/regression/show_data/fullData.1.part${i+1}.gz""" + time 10000 // limit inflight 10s + check { result, exception, startTime, endTime -> + if (exception != null) { + throw exception + } + log.info("Stream load result: ${result}".toString()) + def json = parseJson(result) + assertEquals("success", json.Status.toLowerCase()) + assertEquals(json.NumberTotalRows, json.NumberLoadedRows) + assertTrue(json.NumberLoadedRows > 0 && json.LoadBytes > 0) + } + } + } + } + + Suite.metaClass.get_tablets_from_table = { String table -> + def res = sql_return_maparray """show tablets from ${table}""" + return res + } + + Suite.metaClass.show_tablet_compaction = { HashMap tablet -> + StringBuilder sb = new StringBuilder(); + sb.append("curl -X GET ") + sb.append(tablet["CompactionStatus"]) + String command = sb.toString() + logger.info(command) + def process = command.execute() + def code = process.waitFor() + def err = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))); + def out = process.getText() + logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) + assertEquals(code, 0) + return parseJson(out.trim()) + } + + Suite.metaClass.trigger_tablet_compaction = { HashMap tablet, String compact_type -> + //support trigger base/cumulative/full compaction + def tabletStatusBeforeCompaction = show_tablet_compaction(tablet) + + String tabletInBe = tablet + String showCompactionStatus = tablet["CompactionStatus"] + String triggerCompactionUrl = showCompactionStatus.split("show")[0] + "run?tablet_id=" + tablet["TabletId"] + "&compact_type=" + compact_type + StringBuilder sb = new StringBuilder(); + sb.append("curl -X POST ") + sb.append(triggerCompactionUrl) + String command = sb.toString() + logger.info(command) + def process = command.execute() + def code = process.waitFor() + def err = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))); + def out = process.getText() + def outJson = parseJson(out) + logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) + // if code = 0 means compaction happend, need to check + // other condition may indicate no suitable compaction condition + if ( code == 0 && outJson.status.toLowerCase() == "success" ){ + def compactionStatus = "RUNNING" + def tabletStatusAfterCompaction = null + long startTime = System.currentTimeMillis() + long timeoutTimestamp = startTime + 5 * 60 * 1000 // 5 min + do { + tabletStatusAfterCompaction = show_tablet_compaction(tablet) + logger.info("tabletStatusAfterCompaction class: " + tabletStatusAfterCompaction.class) + logger.info("hhhhhh: " + tabletStatusAfterCompaction.toString()) + if (tabletStatusAfterCompaction.rowsets.size() < tabletStatusBeforeCompaction.rowsets.size()){ + compactionStatus = 'FINISHED' + } + Thread.sleep(60 * 1000) + } while (timeoutTimestamp > System.currentTimeMillis() && (status != 'FINISHED')) + + if (status != "FINISHED") { + logger.info("compaction not Finish or failed") + return false + } + } + } + + Suite.metaClass.trigger_compaction = { List> tablets -> + for(def tablet: tablets) { + trigger_tablet_compaction(tablet, "cumulative") + trigger_tablet_compaction(tablet, "base") + trigger_tablet_compaction(tablet, "full") + } + } + + Suite.metaClass.caculate_table_data_size_in_backend_storage = { List> tablets -> + def storageType = context.config.otherConfigs.get("storageProvider") + Double storageSize = 0 + + List tabletIds = [] + for(def tablet: tablets) { + tabletIds.add(tablet["TabletId"]) + } + + if (storageType.toLowerCase() == "oss") { + //cbs means cluster backend storage + def ak = context.config.otherConfigs.get("cbsS3Ak") + def sk = context.config.otherConfigs.get("cbsS3Sk") + def endpoint = context.config.otherConfigs.get("cbsS3Endpoint") + def bucketName = context.config.otherConfigs.get("cbsS3Bucket") + def storagePrefix = context.config.otherConfigs.get("cbsS3Prefix") + + def client = initOssClient(ak, sk, endpoint) + for(String tabletId: tabletIds) { + storageSize += calculateFolderLength(client, bucketName, storagePrefix + "/data/" + tabletId) + } + shutDownOssClient(client) + } + + if (storageType.toLowerCase() == "hdfs") { + def fsName = context.config.otherConfigs.get("cbsFsName") + def isKerberosFs = context.config.otherConfigs.get("cbsFsKerberos") + def fsUser = context.config.otherConfigs.get("cbsFsUser") + def storagePrefix = context.config.otherConfigs.get("cbsFsPrefix") + } + + return storageSize + } + + Suite.metaClass.translate_different_unit_to_MB = { String size, String unitField -> + Double sizeKb = 0.0 + if (unitField == "KB") { + sizeKb = Double.parseDouble(size) / 1024 + } else if (unitField == "MB") { + sizeKb = Double.parseDouble(size) + } else if (unitField == "GB") { + sizeKb = Double.parseDouble(size) * 1024 * 1024 + } else if (unitField == "TB") { + sizeKb = Double.parseDouble(size) * 1024 * 1024 * 1024 + } + return sizeKb + } + + Suite.metaClass.show_table_data_size_through_mysql = { String table -> + def mysqlShowDataSize = 0L + def res = sql_return_maparray " show data from ${table}" + def tableSizeInfo = res[0] + def fields = tableSizeInfo["Size"].split(" ") + if (fields.length == 2 ){ + def sizeField = fields[0] + def unitField = fields[1] + mysqlShowDataSize = translate_different_unit_to_MB(sizeField, unitField) + } + return mysqlShowDataSize + } + + Suite.metaClass.caculate_table_data_size_through_api = { List> tablets -> + Double apiCaculateSize = 0 + for (HashMap tablet in tablets) { + def tabletStatus = show_tablet_compaction(tablet) + + for(String rowset: tabletStatus.rowsets){ + def fields = rowset.split(" ") + if (fields.length == 7) { + def sizeField = fields[-2] // the last field(size) + def unitField = fields[-1] // The second to last field(unit) + // 转换成 KB + apiCaculateSize += translate_different_unit_to_MB(sizeField, unitField ) + } + } + } + + return apiCaculateSize + } +//http://qa-build.oss-cn-beijing.aliyuncs.com/regression/show_data/fullData.1.part1.gz diff --git a/regression-test/suites/show_data/ddl/lineitem_mow.sql b/regression-test/suites/show_data/ddl/lineitem_mow.sql index 1d29f44ac8fb6a..88f56787bb1235 100644 --- a/regression-test/suites/show_data/ddl/lineitem_mow.sql +++ b/regression-test/suites/show_data/ddl/lineitem_mow.sql @@ -14,7 +14,8 @@ CREATE TABLE IF NOT EXISTS lineitem_mow ( L_RECEIPTDATE DATE NOT NULL, L_SHIPINSTRUCT CHAR(25) NOT NULL, L_SHIPMODE CHAR(10) NOT NULL, - L_COMMENT VARCHAR(44) NOT NULL + L_COMMENT VARCHAR(44) NOT NULL, + L_NULL VARCHAR NULL ) UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER) DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3 diff --git a/regression-test/suites/show_data/test_show_mow_data.groovy b/regression-test/suites/show_data/test_show_mow_data.groovy index c94e5786d7efae..383577ad8045df 100644 --- a/regression-test/suites/show_data/test_show_mow_data.groovy +++ b/regression-test/suites/show_data/test_show_mow_data.groovy @@ -34,7 +34,7 @@ suite("test_mow_show_data_in_cloud","p2") { table tableName set 'column_separator', '|' set 'compress_type', 'GZ' - file """${getS3Url()}/regression/tpch/sf1/lineitem.csv.split00.gz""" + file """${getS3Url()}/regression/tpch/sf0.1/lineitem.tbl.gz""" time 10000 // limit inflight 10s check { result, exception, startTime, endTime -> if (exception != null) { @@ -61,10 +61,10 @@ suite("test_mow_show_data_in_cloud","p2") { sb.append(tablet["CompactionStatus"]) String command = sb.toString() logger.info(command) - process = command.execute() - code = process.waitFor() - err = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))); - out = process.getText() + def process = command.execute() + def code = process.waitFor() + def err = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))); + def out = process.getText() logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) return parseJson(out.trim()) @@ -82,10 +82,10 @@ suite("test_mow_show_data_in_cloud","p2") { sb.append(triggerCompactionUrl) String command = sb.toString() logger.info(command) - process = command.execute() - code = process.waitFor() - err = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))); - out = process.getText() + def process = command.execute() + def code = process.waitFor() + def err = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))); + def out = process.getText() def outJson = parseJson(out) logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) // if code = 0 means compaction happend, need to check @@ -121,7 +121,7 @@ suite("test_mow_show_data_in_cloud","p2") { } def caculate_table_data_size_in_backend_storage = { List> tablets -> - storageType = context.config.otherConfigs.get("storageProvider") + def storageType = context.config.otherConfigs.get("storageProvider") Double storageSize = 0 List tabletIds = [] @@ -131,13 +131,13 @@ suite("test_mow_show_data_in_cloud","p2") { if (storageType.toLowerCase() == "oss") { //cbs means cluster backend storage - ak = context.config.otherConfigs.get("cbsS3Ak") - sk = context.config.otherConfigs.get("cbsS3Sk") - endpoint = context.config.otherConfigs.get("cbsS3Endpoint") - bucketName = context.config.otherConfigs.get("cbsS3Bucket") - storagePrefix = context.config.otherConfigs.get("cbsS3Prefix") + def ak = context.config.otherConfigs.get("cbsS3Ak") + def sk = context.config.otherConfigs.get("cbsS3Sk") + def endpoint = context.config.otherConfigs.get("cbsS3Endpoint") + def bucketName = context.config.otherConfigs.get("cbsS3Bucket") + def storagePrefix = context.config.otherConfigs.get("cbsS3Prefix") - client = initOssClient(ak, sk, endpoint) + def client = initOssClient(ak, sk, endpoint) for(String tabletId: tabletIds) { storageSize += calculateFolderLength(client, bucketName, storagePrefix + "/data/" + tabletId) } @@ -145,10 +145,10 @@ suite("test_mow_show_data_in_cloud","p2") { } if (storageType.toLowerCase() == "hdfs") { - fsName = context.config.otherConfigs.get("cbsFsName") - isKerberosFs = context.config.otherConfigs.get("cbsFsKerberos") - fsUser = context.config.otherConfigs.get("cbsFsUser") - storagePrefix = context.config.otherConfigs.get("cbsFsPrefix") + def fsName = context.config.otherConfigs.get("cbsFsName") + def isKerberosFs = context.config.otherConfigs.get("cbsFsKerberos") + def fsUser = context.config.otherConfigs.get("cbsFsUser") + def storagePrefix = context.config.otherConfigs.get("cbsFsPrefix") } return storageSize @@ -201,7 +201,7 @@ suite("test_mow_show_data_in_cloud","p2") { } def main = { - tableName="lineitem_mow" + def tableName="lineitem_mow" sql "DROP TABLE IF EXISTS ${tableName};" sql new File("""${context.file.parent}/ddl/${tableName}.sql""").text sql new File("""${context.file.parent}/ddl/lineitem_delete.sql""").text.replaceAll("\\\$\\{table\\}", tableName) diff --git a/regression-test/suites/show_data_p2/test_cloud_follower_show_data.groovy b/regression-test/suites/show_data_p2/test_cloud_follower_show_data.groovy new file mode 100644 index 00000000000000..e127dbcb5b6b41 --- /dev/null +++ b/regression-test/suites/show_data_p2/test_cloud_follower_show_data.groovy @@ -0,0 +1,127 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// The cases is copied from https://github.com/trinodb/trino/tree/master +// /testing/trino-product-tests/src/main/resources/sql-tests/testcases/tpcds +// and modified by Doris. +import org.codehaus.groovy.runtime.IOGroovyMethods + + // loading one data 10 times, expect data size not rising +suite("test_cloud_follower_show_data","p2") { + //cloud-mode + if (!isCloudMode()) { + logger.info("not cloud mode, not run") + return + } + + def create_table = { String tableName -> + sql "DROP TABLE IF EXISTS ${tableName};" + sql """ + CREATE TABLE IF NOT EXISTS ${tableName}( + L_ORDERKEY INTEGER NOT NULL, + L_PARTKEY INTEGER NOT NULL, + L_SUPPKEY INTEGER NOT NULL, + L_LINENUMBER INTEGER NOT NULL, + L_QUANTITY DECIMAL(15,2) NOT NULL, + L_EXTENDEDPRICE DECIMAL(15,2) NOT NULL, + L_DISCOUNT DECIMAL(15,2) NOT NULL, + L_TAX DECIMAL(15,2) NOT NULL, + L_RETURNFLAG CHAR(1) NOT NULL, + L_LINESTATUS CHAR(1) NOT NULL, + L_SHIPDATE DATE NOT NULL, + L_COMMITDATE DATE NOT NULL, + L_RECEIPTDATE DATE NOT NULL, + L_SHIPINSTRUCT CHAR(25) NOT NULL, + L_SHIPMODE CHAR(10) NOT NULL, + L_COMMENT VARCHAR(44) NOT NULL, + L_NULL VARCHAR + ) + UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER) + DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3 + PROPERTIES ( + "replication_num" = "1" + ) + """ + } + + + def check = {String tableName -> + List tablets = get_tablets_from_table(tableName) + def loadTimes = [1, 10] + Map sizeRecords = ["apiSize":[], "mysqlSize":[], "cbsSize":[]] + for (int i in loadTimes){ + // stream load 1 time, record each size + repeate_stream_load_same_data(tableName, i, "regression/tpch/sf0.1/lineitem.tbl.gz") + def rows = sql_return_maparray "select count(*) as count from ${tableName};" + logger.info("table ${tableName} has ${rows[0]["count"]} rows") + // 加一下触发compaction的机制 + trigger_compaction(tablets) + + // 然后 sleep 1min, 等fe汇报完 + sleep(60 * 1000) + sql "select count(*) from ${tableName}" + + sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets)) + sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets)) + sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName)) + sleep(60 * 1000) + logger.info("after ${i} times stream load, mysqlSize is: ${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, storageSize is: ${sizeRecords["cbsSize"][-1]}") + } + + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0]) + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0]) + // expect load 1 times == load 10 times + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["mysqlSize"][1]) + assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1]) + assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1]) + + + def result = sql """show frontends;""" + logger.info("result:" + result) + for (int i = 0; i < result.size(); i++) { + if (result[i][8] == "false" && result[i][11] == "true") { + def tokens = context.config.jdbcUrl.split('/') + url = tokens[0] + "//" + tokens[2] + "/" + "information_schema" + "?" + def new_jdbc_url = url.replaceAll(/\/\/[0-9.]+:/, "//${switch_ip}:") + logger.info("new_jdbc_url: " + new_jdbc_url) + + connect('root', '', new_jdbc_url) { + sql "select count(*) from ${tableName}" + + sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets)) + sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets)) + sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName)) + + + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][i+1], sizeRecords["apiSize"][i+1]) + assertEquals(sizeRecords["mysqlSize"][i+1], sizeRecords["cbsSize"][i+1]) + assertEquals(sizeRecords["mysqlSize"][1], sizeRecords["apiSize"][i+1]) + } + } + } + } + + def main = { + def tableName="test_cloud_follower_show_data" + create_table(tableName) + check(tableName) + } + + main() +} diff --git a/regression-test/suites/show_data_p2/test_table_modification/test_cloud_mtmv_show_data.groovy b/regression-test/suites/show_data_p2/test_table_modification/test_cloud_mtmv_show_data.groovy new file mode 100644 index 00000000000000..cc4fd289296028 --- /dev/null +++ b/regression-test/suites/show_data_p2/test_table_modification/test_cloud_mtmv_show_data.groovy @@ -0,0 +1,209 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// The cases is copied from https://github.com/trinodb/trino/tree/master +// /testing/trino-product-tests/src/main/resources/sql-tests/testcases/tpcds +// and modified by Doris. +import org.codehaus.groovy.runtime.IOGroovyMethods + + // loading one data 10 times, expect data size not rising +suite("test_cloud_mtmv_show_data","p2") { + //cloud-mode + if (!isCloudMode()) { + logger.info("not cloud mode, not run") + return + } + + def create_table = { String tableName -> + sql "DROP TABLE IF EXISTS ${tableName};" + sql """ + CREATE TABLE IF NOT EXISTS ${tableName}( + L_ORDERKEY INTEGER NOT NULL, + L_PARTKEY INTEGER NOT NULL, + L_SUPPKEY INTEGER NOT NULL, + L_LINENUMBER INTEGER NOT NULL, + L_QUANTITY DECIMAL(15,2) NOT NULL, + L_EXTENDEDPRICE DECIMAL(15,2) NOT NULL, + L_DISCOUNT DECIMAL(15,2) NOT NULL, + L_TAX DECIMAL(15,2) NOT NULL, + L_RETURNFLAG CHAR(1) NOT NULL, + L_LINESTATUS CHAR(1) NOT NULL, + L_SHIPDATE DATE NOT NULL, + L_COMMITDATE DATE NOT NULL, + L_RECEIPTDATE DATE NOT NULL, + L_SHIPINSTRUCT CHAR(25) NOT NULL, + L_SHIPMODE CHAR(10) NOT NULL, + L_COMMENT VARCHAR(44) NOT NULL, + L_NULL VARCHAR + ) + UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER) + DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3 + PROPERTIES ( + "replication_num" = "1" + ) + """ + } + + def create_mv = { String tableName -> + def mvName = tableName + "_mv" + sql """DROP MATERIALIZED VIEW IF EXISTS ${mvName}""" + sql""" + CREATE MATERIALIZED VIEW ${mvName} + AS + select L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER from ${tableName}; + """ + Thread.sleep(10000) + } + + def create_mtmv = { String tableName -> + def mtmvName = tableName + "_mtmv" + sql """DROP MATERIALIZED VIEW IF EXISTS ${mtmvName}""" + sql""" + CREATE MATERIALIZED VIEW ${mtmvName} + BUILD DEFERRED REFRESH COMPLETE ON MANUAL + DISTRIBUTED BY RANDOM BUCKETS 2 + PROPERTIES ('replication_num' = '1') + AS + select L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER from ${tableName}; + """ + sql """refresh materialized view ${mtmvName} auto;""" + def db = "regression_test_show_data_p2_test_table_modification" + Thread.sleep(30000) + } + + def check = {String tableName, int op -> + List tablets = get_tablets_from_table(tableName) + def loadTimes = [1, 10] + Map sizeRecords = ["apiSize":[], "mysqlSize":[], "cbsSize":[]] + for (int i in loadTimes){ + // stream load 1 time, record each size + repeate_stream_load_same_data(tableName, i, "regression/tpch/sf0.1/lineitem.tbl.gz") + def rows = sql_return_maparray "select count(*) as count from ${tableName};" + logger.info("table ${tableName} has ${rows[0]["count"]} rows") + // 加一下触发compaction的机制 + trigger_compaction(tablets) + + // 然后 sleep 1min, 等fe汇报完 + sleep(60 * 1000) + sql "select count(*) from ${tableName}" + + sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets)) + sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets)) + sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName)) + sleep(60 * 1000) + logger.info("after ${i} times stream load, mysqlSize is: ${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, storageSize is: ${sizeRecords["cbsSize"][-1]}") + } + + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0]) + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0]) + // expect load 1 times == load 10 times + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["mysqlSize"][1]) + assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1]) + assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1]) + + if (op == 1){ + create_mv(tableName) + tablets = get_tablets_from_table(tableName) + + // 加一下触发compaction的机制 + trigger_compaction(tablets) + + // 然后 sleep 1min, 等fe汇报完 + sleep(60 * 1000) + + sql "select count(*) from ${tableName}" + + sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets)) + sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets)) + sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName)) + + + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][2], sizeRecords["apiSize"][2]) + assertEquals(sizeRecords["mysqlSize"][2], sizeRecords["cbsSize"][2]) + + // 加一下触发compaction的机制 + trigger_compaction(tablets) + + // 然后 sleep 1min, 等fe汇报完 + sleep(60 * 1000) + + sql "select count(*) from ${tableName}" + + sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets)) + sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets)) + sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName)) + + + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][3], sizeRecords["apiSize"][3]) + assertEquals(sizeRecords["mysqlSize"][3], sizeRecords["cbsSize"][3]) + } + + if (op == 2){ + create_mtmv(tableName) + tableName = ${tableName} + "_mtmv" + tablets = get_tablets_from_table(tableName) + + // 加一下触发compaction的机制 + trigger_compaction(tablets) + + // 然后 sleep 1min, 等fe汇报完 + sleep(60 * 1000) + + sql "select count(*) from ${tableName}" + + sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets)) + sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets)) + sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName)) + + + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][2], sizeRecords["apiSize"][2]) + assertEquals(sizeRecords["mysqlSize"][2], sizeRecords["cbsSize"][2]) + + // 加一下触发compaction的机制 + trigger_compaction(tablets) + + // 然后 sleep 1min, 等fe汇报完 + sleep(60 * 1000) + + sql "select count(*) from ${tableName}" + + sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets)) + sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets)) + sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName)) + + + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][3], sizeRecords["apiSize"][3]) + assertEquals(sizeRecords["mysqlSize"][3], sizeRecords["cbsSize"][3]) + } + } + + def main = { + def tableName="test_cloud_mv_show_data" + create_table(tableName) + check(tableName, 1) + tableName="test_cloud_mtmv_show_data" + create_table(tableName) + check(tableName, 2) + } + + main() +} diff --git a/regression-test/suites/show_data_p2/test_table_modification/test_cloud_schema_change_add_and_drop_column_show_data.groovy b/regression-test/suites/show_data_p2/test_table_modification/test_cloud_schema_change_add_and_drop_column_show_data.groovy new file mode 100644 index 00000000000000..da9772e0a91f0e --- /dev/null +++ b/regression-test/suites/show_data_p2/test_table_modification/test_cloud_schema_change_add_and_drop_column_show_data.groovy @@ -0,0 +1,196 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// The cases is copied from https://github.com/trinodb/trino/tree/master +// /testing/trino-product-tests/src/main/resources/sql-tests/testcases/tpcds +// and modified by Doris. +import org.codehaus.groovy.runtime.IOGroovyMethods + + // loading one data 10 times, expect data size not rising +suite("test_cloud_schema_change_add_and_drop_column_show_data","p2") { + //cloud-mode + if (!isCloudMode()) { + logger.info("not cloud mode, not run") + return + } + + def create_table = { String tableName -> + sql "DROP TABLE IF EXISTS ${tableName};" + sql """ + CREATE TABLE IF NOT EXISTS ${tableName}( + L_ORDERKEY INTEGER NOT NULL, + L_PARTKEY INTEGER NOT NULL, + L_SUPPKEY INTEGER NOT NULL, + L_LINENUMBER INTEGER NOT NULL, + L_QUANTITY DECIMAL(15,2) NOT NULL, + L_EXTENDEDPRICE DECIMAL(15,2) NOT NULL, + L_DISCOUNT DECIMAL(15,2) NOT NULL, + L_TAX DECIMAL(15,2) NOT NULL, + L_RETURNFLAG CHAR(1) NOT NULL, + L_LINESTATUS CHAR(1) NOT NULL, + L_SHIPDATE DATE NOT NULL, + L_COMMITDATE DATE NOT NULL, + L_RECEIPTDATE DATE NOT NULL, + L_SHIPINSTRUCT CHAR(25) NOT NULL, + L_SHIPMODE CHAR(10) NOT NULL, + L_COMMENT VARCHAR(44) NOT NULL, + L_NULL VARCHAR + ) + UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER) + DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3 + PROPERTIES ( + "replication_num" = "1" + ) + """ + } + + def create_index_table = { String tableName -> + sql "DROP TABLE IF EXISTS ${tableName};" + sql """ + CREATE TABLE IF NOT EXISTS ${tableName}( + L_ORDERKEY INTEGER NOT NULL, + L_PARTKEY INTEGER NOT NULL, + L_SUPPKEY INTEGER NOT NULL, + L_LINENUMBER INTEGER NOT NULL, + L_QUANTITY DECIMAL(15,2) NOT NULL, + L_EXTENDEDPRICE DECIMAL(15,2) NOT NULL, + L_DISCOUNT DECIMAL(15,2) NOT NULL, + L_TAX DECIMAL(15,2) NOT NULL, + L_RETURNFLAG CHAR(1) NOT NULL, + L_LINESTATUS CHAR(1) NOT NULL, + L_SHIPDATE DATE NOT NULL, + L_COMMITDATE DATE NOT NULL, + L_RECEIPTDATE DATE NOT NULL, + L_SHIPINSTRUCT CHAR(25) NOT NULL, + L_SHIPMODE CHAR(10) NOT NULL, + L_COMMENT VARCHAR(44) NOT NULL, + L_NULL VARCHAR, + index index_SHIPINSTRUCT (L_SHIPINSTRUCT) using inverted, + index index_SHIPMODE (L_SHIPMODE) using inverted, + index index_COMMENT (L_COMMENT) using inverted + ) + UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER) + DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3 + PROPERTIES ( + "replication_num" = "1" + ) + """ + } + + def schema_change_add_column = { String tableName -> + sql """ + ALTER TABLE ${tableName} add column l_test int after L_COMMENT; + """ + + waitForSchemaChangeDone { + sql """ SHOW ALTER TABLE COLUMN WHERE TableName='${tableName}' ORDER BY createtime DESC LIMIT 1 """ + time 600 + } + } + + def schema_change_drop_column = { String tableName -> + sql """ + ALTER TABLE ${tableName} drop column L_COMMENT; + """ + + waitForSchemaChangeDone { + sql """ SHOW ALTER TABLE COLUMN WHERE TableName='${tableName}' ORDER BY createtime DESC LIMIT 1 """ + time 600 + } + } + + def check = {String tableName -> + List tablets = get_tablets_from_table(tableName) + def loadTimes = [1, 10] + Map sizeRecords = ["apiSize":[], "mysqlSize":[], "cbsSize":[]] + for (int i in loadTimes){ + // stream load 1 time, record each size + repeate_stream_load_same_data(tableName, i, "regression/tpch/sf0.1/lineitem.tbl.gz") + def rows = sql_return_maparray "select count(*) as count from ${tableName};" + logger.info("table ${tableName} has ${rows[0]["count"]} rows") + // 加一下触发compaction的机制 + trigger_compaction(tablets) + + // 然后 sleep 1min, 等fe汇报完 + sleep(60 * 1000) + sql "select count(*) from ${tableName}" + + sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets)) + sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets)) + sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName)) + sleep(60 * 1000) + logger.info("after ${i} times stream load, mysqlSize is: ${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, storageSize is: ${sizeRecords["cbsSize"][-1]}") + } + + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0]) + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0]) + // expect load 1 times == load 10 times + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["mysqlSize"][1]) + assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1]) + assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1]) + + schema_change_add_column(tableName) + + // 加一下触发compaction的机制 + trigger_compaction(tablets) + + // 然后 sleep 1min, 等fe汇报完 + sleep(60 * 1000) + + sql "select count(*) from ${tableName}" + + sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets)) + sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets)) + sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName)) + + + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][2], sizeRecords["apiSize"][2]) + assertEquals(sizeRecords["mysqlSize"][2], sizeRecords["cbsSize"][2]) + + schema_change_drop_column(tableName) + + // 加一下触发compaction的机制 + trigger_compaction(tablets) + + // 然后 sleep 1min, 等fe汇报完 + sleep(60 * 1000) + + sql "select count(*) from ${tableName}" + + sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets)) + sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets)) + sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName)) + + + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][3], sizeRecords["apiSize"][3]) + assertEquals(sizeRecords["mysqlSize"][3], sizeRecords["cbsSize"][3]) + } + + def main = { + def tableName="test_cloud_schema_change_add_and_drop_column_show_data" + create_table(tableName) + check(tableName) + tableName="test_cloud_schema_change_add_and_drop_column_show_data_index" + create_index_table(tableName) + check(tableName) + } + + main() +} diff --git a/regression-test/suites/show_data_p2/test_table_modification/test_cloud_schema_change_add_and_drop_index_show_data.groovy b/regression-test/suites/show_data_p2/test_table_modification/test_cloud_schema_change_add_and_drop_index_show_data.groovy new file mode 100644 index 00000000000000..eae225d94c8f37 --- /dev/null +++ b/regression-test/suites/show_data_p2/test_table_modification/test_cloud_schema_change_add_and_drop_index_show_data.groovy @@ -0,0 +1,196 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// The cases is copied from https://github.com/trinodb/trino/tree/master +// /testing/trino-product-tests/src/main/resources/sql-tests/testcases/tpcds +// and modified by Doris. +import org.codehaus.groovy.runtime.IOGroovyMethods + + // loading one data 10 times, expect data size not rising +suite("test_cloud_schema_change_add_and_drop_index_show_data","p2") { + //cloud-mode + if (!isCloudMode()) { + logger.info("not cloud mode, not run") + return + } + + def create_table = { String tableName -> + sql "DROP TABLE IF EXISTS ${tableName};" + sql """ + CREATE TABLE IF NOT EXISTS ${tableName}( + L_ORDERKEY INTEGER NOT NULL, + L_PARTKEY INTEGER NOT NULL, + L_SUPPKEY INTEGER NOT NULL, + L_LINENUMBER INTEGER NOT NULL, + L_QUANTITY DECIMAL(15,2) NOT NULL, + L_EXTENDEDPRICE DECIMAL(15,2) NOT NULL, + L_DISCOUNT DECIMAL(15,2) NOT NULL, + L_TAX DECIMAL(15,2) NOT NULL, + L_RETURNFLAG CHAR(1) NOT NULL, + L_LINESTATUS CHAR(1) NOT NULL, + L_SHIPDATE DATE NOT NULL, + L_COMMITDATE DATE NOT NULL, + L_RECEIPTDATE DATE NOT NULL, + L_SHIPINSTRUCT CHAR(25) NOT NULL, + L_SHIPMODE CHAR(10) NOT NULL, + L_COMMENT VARCHAR(44) NOT NULL, + L_NULL VARCHAR + ) + UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER) + DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3 + PROPERTIES ( + "replication_num" = "1" + ) + """ + } + + def create_index_table = { String tableName -> + sql "DROP TABLE IF EXISTS ${tableName};" + sql """ + CREATE TABLE IF NOT EXISTS ${tableName}( + L_ORDERKEY INTEGER NOT NULL, + L_PARTKEY INTEGER NOT NULL, + L_SUPPKEY INTEGER NOT NULL, + L_LINENUMBER INTEGER NOT NULL, + L_QUANTITY DECIMAL(15,2) NOT NULL, + L_EXTENDEDPRICE DECIMAL(15,2) NOT NULL, + L_DISCOUNT DECIMAL(15,2) NOT NULL, + L_TAX DECIMAL(15,2) NOT NULL, + L_RETURNFLAG CHAR(1) NOT NULL, + L_LINESTATUS CHAR(1) NOT NULL, + L_SHIPDATE DATE NOT NULL, + L_COMMITDATE DATE NOT NULL, + L_RECEIPTDATE DATE NOT NULL, + L_SHIPINSTRUCT CHAR(25) NOT NULL, + L_SHIPMODE CHAR(10) NOT NULL, + L_COMMENT VARCHAR(44) NOT NULL, + L_NULL VARCHAR, + index index_SHIPINSTRUCT (L_SHIPINSTRUCT) using inverted, + index index_SHIPMODE (L_SHIPMODE) using inverted, + index index_COMMENT (L_COMMENT) using inverted + ) + UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER) + DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3 + PROPERTIES ( + "replication_num" = "1" + ) + """ + } + + def schema_change_add_index = { String tableName -> + sql """ + ALTER TABLE ${tableName} add index index1 (L_LINESTATUS) using inverted; + """ + + waitForSchemaChangeDone { + sql """ SHOW ALTER TABLE column WHERE TableName='${tableName}' ORDER BY createtime DESC LIMIT 1 """ + time 600 + } + } + + def schema_change_drop_index = { String tableName -> + sql """ + ALTER TABLE ${tableName} drop index index1; + """ + + waitForSchemaChangeDone { + sql """ SHOW ALTER TABLE column WHERE TableName='${tableName}' ORDER BY createtime DESC LIMIT 1 """ + time 600 + } + } + + def check = {String tableName -> + List tablets = get_tablets_from_table(tableName) + def loadTimes = [1, 10] + Map sizeRecords = ["apiSize":[], "mysqlSize":[], "cbsSize":[]] + for (int i in loadTimes){ + // stream load 1 time, record each size + repeate_stream_load_same_data(tableName, i, "regression/tpch/sf0.1/lineitem.tbl.gz") + def rows = sql_return_maparray "select count(*) as count from ${tableName};" + logger.info("table ${tableName} has ${rows[0]["count"]} rows") + // 加一下触发compaction的机制 + trigger_compaction(tablets) + + // 然后 sleep 1min, 等fe汇报完 + sleep(60 * 1000) + sql "select count(*) from ${tableName}" + + sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets)) + sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets)) + sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName)) + sleep(60 * 1000) + logger.info("after ${i} times stream load, mysqlSize is: ${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, storageSize is: ${sizeRecords["cbsSize"][-1]}") + } + + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0]) + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0]) + // expect load 1 times == load 10 times + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["mysqlSize"][1]) + assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1]) + assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1]) + + schema_change_add_index(tableName) + + // 加一下触发compaction的机制 + trigger_compaction(tablets) + + // 然后 sleep 1min, 等fe汇报完 + sleep(60 * 1000) + + sql "select count(*) from ${tableName}" + + sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets)) + sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets)) + sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName)) + + + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][2], sizeRecords["apiSize"][2]) + assertEquals(sizeRecords["mysqlSize"][2], sizeRecords["cbsSize"][2]) + + schema_change_drop_index(tableName) + + // 加一下触发compaction的机制 + trigger_compaction(tablets) + + // 然后 sleep 1min, 等fe汇报完 + sleep(60 * 1000) + + sql "select count(*) from ${tableName}" + + sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets)) + sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets)) + sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName)) + + + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][3], sizeRecords["apiSize"][3]) + assertEquals(sizeRecords["mysqlSize"][3], sizeRecords["cbsSize"][3]) + } + + def main = { + def tableName="test_cloud_schema_change_add_and_drop_index_show_data" + create_table(tableName) + check(tableName) + tableName="test_cloud_schema_change_add_and_drop_index_show_data_index" + create_index_table(tableName) + check(tableName) + } + + main() +} diff --git a/regression-test/suites/show_data_p2/test_table_modification/test_cloud_schema_change_reorder_column_show_data.groovy b/regression-test/suites/show_data_p2/test_table_modification/test_cloud_schema_change_reorder_column_show_data.groovy new file mode 100644 index 00000000000000..4ad5815fc20290 --- /dev/null +++ b/regression-test/suites/show_data_p2/test_table_modification/test_cloud_schema_change_reorder_column_show_data.groovy @@ -0,0 +1,166 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// The cases is copied from https://github.com/trinodb/trino/tree/master +// /testing/trino-product-tests/src/main/resources/sql-tests/testcases/tpcds +// and modified by Doris. +import org.codehaus.groovy.runtime.IOGroovyMethods + + // loading one data 10 times, expect data size not rising +suite("test_cloud_schema_change_reorder_column_show_data","p2") { + //cloud-mode + if (!isCloudMode()) { + logger.info("not cloud mode, not run") + return + } + + def create_table = { String tableName -> + sql "DROP TABLE IF EXISTS ${tableName};" + sql """ + CREATE TABLE IF NOT EXISTS ${tableName}( + L_ORDERKEY INTEGER NOT NULL, + L_PARTKEY INTEGER NOT NULL, + L_SUPPKEY INTEGER NOT NULL, + L_LINENUMBER INTEGER NOT NULL, + L_QUANTITY DECIMAL(15,2) NOT NULL, + L_EXTENDEDPRICE DECIMAL(15,2) NOT NULL, + L_DISCOUNT DECIMAL(15,2) NOT NULL, + L_TAX DECIMAL(15,2) NOT NULL, + L_RETURNFLAG CHAR(1) NOT NULL, + L_LINESTATUS CHAR(1) NOT NULL, + L_SHIPDATE DATE NOT NULL, + L_COMMITDATE DATE NOT NULL, + L_RECEIPTDATE DATE NOT NULL, + L_SHIPINSTRUCT CHAR(25) NOT NULL, + L_SHIPMODE CHAR(10) NOT NULL, + L_COMMENT VARCHAR(44) NOT NULL, + L_NULL VARCHAR + ) + UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER) + DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3 + PROPERTIES ( + "replication_num" = "1" + ) + """ + } + + def create_index_table = { String tableName -> + sql "DROP TABLE IF EXISTS ${tableName};" + sql """ + CREATE TABLE IF NOT EXISTS ${tableName}( + L_ORDERKEY INTEGER NOT NULL, + L_PARTKEY INTEGER NOT NULL, + L_SUPPKEY INTEGER NOT NULL, + L_LINENUMBER INTEGER NOT NULL, + L_QUANTITY DECIMAL(15,2) NOT NULL, + L_EXTENDEDPRICE DECIMAL(15,2) NOT NULL, + L_DISCOUNT DECIMAL(15,2) NOT NULL, + L_TAX DECIMAL(15,2) NOT NULL, + L_RETURNFLAG CHAR(1) NOT NULL, + L_LINESTATUS CHAR(1) NOT NULL, + L_SHIPDATE DATE NOT NULL, + L_COMMITDATE DATE NOT NULL, + L_RECEIPTDATE DATE NOT NULL, + L_SHIPINSTRUCT CHAR(25) NOT NULL, + L_SHIPMODE CHAR(10) NOT NULL, + L_COMMENT VARCHAR(44) NOT NULL, + L_NULL VARCHAR, + index index_SHIPINSTRUCT (L_SHIPINSTRUCT) using inverted, + index index_SHIPMODE (L_SHIPMODE) using inverted, + index index_COMMENT (L_COMMENT) using inverted + ) + UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER) + DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3 + PROPERTIES ( + "replication_num" = "1" + ) + """ + } + + def schema_change_reorder_column= { String tableName -> + sql """ + ALTER TABLE ${tableName} modify column L_SHIPMODE CHAR(10) NOT NULL after L_COMMENT; + """ + + waitForSchemaChangeDone { + sql """ SHOW ALTER TABLE column WHERE TableName='${tableName}' ORDER BY createtime DESC LIMIT 1 """ + time 600 + } + } + + def check = {String tableName -> + List tablets = get_tablets_from_table(tableName) + def loadTimes = [1, 10] + Map sizeRecords = ["apiSize":[], "mysqlSize":[], "cbsSize":[]] + for (int i in loadTimes){ + // stream load 1 time, record each size + repeate_stream_load_same_data(tableName, i, "regression/tpch/sf0.1/lineitem.tbl.gz") + def rows = sql_return_maparray "select count(*) as count from ${tableName};" + logger.info("table ${tableName} has ${rows[0]["count"]} rows") + // 加一下触发compaction的机制 + trigger_compaction(tablets) + + // 然后 sleep 1min, 等fe汇报完 + sleep(60 * 1000) + sql "select count(*) from ${tableName}" + + sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets)) + sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets)) + sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName)) + sleep(60 * 1000) + logger.info("after ${i} times stream load, mysqlSize is: ${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, storageSize is: ${sizeRecords["cbsSize"][-1]}") + } + + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0]) + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0]) + // expect load 1 times == load 10 times + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["mysqlSize"][1]) + assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1]) + assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1]) + + schema_change_reorder_column(tableName) + + // 加一下触发compaction的机制 + trigger_compaction(tablets) + + // 然后 sleep 1min, 等fe汇报完 + sleep(60 * 1000) + + sql "select count(*) from ${tableName}" + + sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets)) + sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets)) + sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName)) + + + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][2], sizeRecords["apiSize"][2]) + assertEquals(sizeRecords["mysqlSize"][2], sizeRecords["cbsSize"][2]) + } + + def main = { + def tableName="test_cloud_schema_change_reorder_column_show_data" + create_table(tableName) + check(tableName) + tableName="test_cloud_schema_change_reorder_column_show_data_index" + create_index_table(tableName) + check(tableName) + } + + main() +} diff --git a/regression-test/suites/show_data_p2/test_table_operation/test_cloud_delete_table_rows_show_data.groovy b/regression-test/suites/show_data_p2/test_table_operation/test_cloud_delete_table_rows_show_data.groovy new file mode 100644 index 00000000000000..792cc1d2b4da98 --- /dev/null +++ b/regression-test/suites/show_data_p2/test_table_operation/test_cloud_delete_table_rows_show_data.groovy @@ -0,0 +1,196 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// The cases is copied from https://github.com/trinodb/trino/tree/master +// /testing/trino-product-tests/src/main/resources/sql-tests/testcases/tpcds +// and modified by Doris. +import org.codehaus.groovy.runtime.IOGroovyMethods + + // loading one data 10 times, expect data size not rising +suite("test_cloud_delete_table_rows_show_data","p2") { + //cloud-mode + if (!isCloudMode()) { + logger.info("not cloud mode, not run") + return + } + + def create_normal_table = { String tableName -> + sql "DROP TABLE IF EXISTS ${tableName};" + sql """ + CREATE TABLE IF NOT EXISTS ${tableName}( + L_ORDERKEY INTEGER NOT NULL, + L_PARTKEY INTEGER NOT NULL, + L_SUPPKEY INTEGER NOT NULL, + L_LINENUMBER INTEGER NOT NULL, + L_QUANTITY DECIMAL(15,2) NOT NULL, + L_EXTENDEDPRICE DECIMAL(15,2) NOT NULL, + L_DISCOUNT DECIMAL(15,2) NOT NULL, + L_TAX DECIMAL(15,2) NOT NULL, + L_RETURNFLAG CHAR(1) NOT NULL, + L_LINESTATUS CHAR(1) NOT NULL, + L_SHIPDATE DATE NOT NULL, + L_COMMITDATE DATE NOT NULL, + L_RECEIPTDATE DATE NOT NULL, + L_SHIPINSTRUCT CHAR(25) NOT NULL, + L_SHIPMODE CHAR(10) NOT NULL, + L_COMMENT VARCHAR(44) NOT NULL, + L_NULL VARCHAR + ) + UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER) + DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3 + PROPERTIES ( + "replication_num" = "1" + ) + """ + } + + def create_dynamic_partition_table = { String tableName -> + sql "DROP TABLE IF EXISTS ${tableName};" + sql """ + CREATE TABLE IF NOT EXISTS ${tableName}( + L_ORDERKEY INTEGER NOT NULL, + L_PARTKEY INTEGER NOT NULL, + L_SUPPKEY INTEGER NOT NULL, + L_LINENUMBER INTEGER NOT NULL, + L_QUANTITY DECIMAL(15,2) NOT NULL, + L_EXTENDEDPRICE DECIMAL(15,2) NOT NULL, + L_DISCOUNT DECIMAL(15,2) NOT NULL, + L_TAX DECIMAL(15,2) NOT NULL, + L_RETURNFLAG CHAR(1) NOT NULL, + L_LINESTATUS CHAR(1) NOT NULL, + L_SHIPDATE DATE NOT NULL, + L_COMMITDATE DATE NOT NULL, + L_RECEIPTDATE DATE NOT NULL, + L_SHIPINSTRUCT CHAR(25) NOT NULL, + L_SHIPMODE CHAR(10) NOT NULL, + L_COMMENT VARCHAR(44) NOT NULL, + L_NULL VARCHAR + ) + UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER) + PARTITION BY RANGE(L_ORDERKEY) + ( + PARTITION p1 VALUES LESS THAN (100000), + PARTITION p2 VALUES LESS THAN (200000), + PARTITION p3 VALUES LESS THAN (300000), + PARTITION p4 VALUES LESS THAN (400000), + PARTITION p5 VALUES LESS THAN (500000), + PARTITION other VALUES LESS THAN (MAXVALUE) + ) + DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3 + PROPERTIES ( + "replication_num" = "1" + ) + """ + } + + def create_auto_partition_table = { String tableName -> + sql "DROP TABLE IF EXISTS ${tableName};" + sql """ + CREATE TABLE IF NOT EXISTS ${tableName}( + L_ORDERKEY INTEGER NOT NULL, + L_PARTKEY INTEGER NOT NULL, + L_SUPPKEY INTEGER NOT NULL, + L_LINENUMBER INTEGER NOT NULL, + L_QUANTITY DECIMAL(15,2) NOT NULL, + L_EXTENDEDPRICE DECIMAL(15,2) NOT NULL, + L_DISCOUNT DECIMAL(15,2) NOT NULL, + L_TAX DECIMAL(15,2) NOT NULL, + L_RETURNFLAG CHAR(1) NOT NULL, + L_LINESTATUS CHAR(1) NOT NULL, + L_SHIPDATE DATE NOT NULL, + L_COMMITDATE DATE NOT NULL, + L_RECEIPTDATE DATE NOT NULL, + L_SHIPINSTRUCT CHAR(25) NOT NULL, + L_SHIPMODE CHAR(10) NOT NULL, + L_COMMENT VARCHAR(44) NOT NULL, + L_NULL VARCHAR + ) + UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER, L_QUANTITY, + L_EXTENDEDPRICE, L_DISCOUNT, L_TAX, L_RETURNFLAG, L_LINESTATUS, + L_SHIPDATE) + AUTO PARTITION BY RANGE (date_trunc(`L_SHIPDATE`, 'year')) + ( + ) + DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3 + PROPERTIES ( + "replication_num" = "1" + ) + """ + } + + def check = {String tableName -> + List tablets = get_tablets_from_table(tableName) + def loadTimes = [1, 10] + Map sizeRecords = ["apiSize":[], "mysqlSize":[], "cbsSize":[]] + for (int i in loadTimes){ + // stream load 1 time, record each size + repeate_stream_load_same_data(tableName, i, "regression/tpch/sf0.1/lineitem.tbl.gz") + def rows = sql_return_maparray "select count(*) as count from ${tableName};" + logger.info("table ${tableName} has ${rows[0]["count"]} rows") + // 加一下触发compaction的机制 + trigger_compaction(tablets) + + // 然后 sleep 1min, 等fe汇报完 + sleep(60 * 1000) + sql "select count(*) from ${tableName}" + + sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets)) + sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets)) + sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName)) + sleep(60 * 1000) + logger.info("after ${i} times stream load, mysqlSize is: ${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, storageSize is: ${sizeRecords["cbsSize"][-1]}") + } + + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0]) + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0]) + // expect load 1 times == load 10 times + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["mysqlSize"][1]) + assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1]) + assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1]) + + sql """delete from ${tableName} where L_ORDERKEY >=0;""" + sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets)) + sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets)) + sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName)) + + // 加一下触发compaction的机制 + trigger_compaction(tablets) + + // 然后 sleep 1min, 等fe汇报完 + sleep(60 * 1000) + sql "select count(*) from ${tableName}" + + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][2], sizeRecords["apiSize"][2]) + assertEquals(sizeRecords["mysqlSize"][2], sizeRecords["cbsSize"][2]) + } + + def main = { + def tableName = "test_cloud_delete_table_rows_show_data" + create_normal_table(tableName) + check(tableName) + tableName = "test_cloud_delete_table_rows_dynamic_partition_show_data" + create_dynamic_partition_table(tableName) + check(tableName) + tableName = "test_cloud_delete_table_rows_auto_partition_show_data" + create_auto_partition_table(tableName) + check(tableName) + } + + main() +} diff --git a/regression-test/suites/show_data_p2/test_table_operation/test_cloud_drop_and_recover_partition_show_data.groovy b/regression-test/suites/show_data_p2/test_table_operation/test_cloud_drop_and_recover_partition_show_data.groovy new file mode 100644 index 00000000000000..672c0f78e394a0 --- /dev/null +++ b/regression-test/suites/show_data_p2/test_table_operation/test_cloud_drop_and_recover_partition_show_data.groovy @@ -0,0 +1,226 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// The cases is copied from https://github.com/trinodb/trino/tree/master +// /testing/trino-product-tests/src/main/resources/sql-tests/testcases/tpcds +// and modified by Doris. +import org.codehaus.groovy.runtime.IOGroovyMethods + + // loading one data 10 times, expect data size not rising +suite("test_cloud_drop_and_recover_partition_show_data","p2") { + //cloud-mode + if (!isCloudMode()) { + logger.info("not cloud mode, not run") + return + } + + def create_normal_table = { String tableName -> + sql "DROP TABLE IF EXISTS ${tableName};" + sql """ + CREATE TABLE IF NOT EXISTS ${tableName}( + L_ORDERKEY INTEGER NOT NULL, + L_PARTKEY INTEGER NOT NULL, + L_SUPPKEY INTEGER NOT NULL, + L_LINENUMBER INTEGER NOT NULL, + L_QUANTITY DECIMAL(15,2) NOT NULL, + L_EXTENDEDPRICE DECIMAL(15,2) NOT NULL, + L_DISCOUNT DECIMAL(15,2) NOT NULL, + L_TAX DECIMAL(15,2) NOT NULL, + L_RETURNFLAG CHAR(1) NOT NULL, + L_LINESTATUS CHAR(1) NOT NULL, + L_SHIPDATE DATE NOT NULL, + L_COMMITDATE DATE NOT NULL, + L_RECEIPTDATE DATE NOT NULL, + L_SHIPINSTRUCT CHAR(25) NOT NULL, + L_SHIPMODE CHAR(10) NOT NULL, + L_COMMENT VARCHAR(44) NOT NULL, + L_NULL VARCHAR + ) + UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER) + DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3 + PROPERTIES ( + "replication_num" = "1" + ) + """ + } + + def create_dynamic_partition_table = { String tableName -> + sql "DROP TABLE IF EXISTS ${tableName};" + sql """ + CREATE TABLE IF NOT EXISTS ${tableName}( + L_ORDERKEY INTEGER NOT NULL, + L_PARTKEY INTEGER NOT NULL, + L_SUPPKEY INTEGER NOT NULL, + L_LINENUMBER INTEGER NOT NULL, + L_QUANTITY DECIMAL(15,2) NOT NULL, + L_EXTENDEDPRICE DECIMAL(15,2) NOT NULL, + L_DISCOUNT DECIMAL(15,2) NOT NULL, + L_TAX DECIMAL(15,2) NOT NULL, + L_RETURNFLAG CHAR(1) NOT NULL, + L_LINESTATUS CHAR(1) NOT NULL, + L_SHIPDATE DATE NOT NULL, + L_COMMITDATE DATE NOT NULL, + L_RECEIPTDATE DATE NOT NULL, + L_SHIPINSTRUCT CHAR(25) NOT NULL, + L_SHIPMODE CHAR(10) NOT NULL, + L_COMMENT VARCHAR(44) NOT NULL, + L_NULL VARCHAR + ) + UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER) + PARTITION BY RANGE(L_ORDERKEY) + ( + PARTITION p1 VALUES LESS THAN (100000), + PARTITION p2 VALUES LESS THAN (200000), + PARTITION p3 VALUES LESS THAN (300000), + PARTITION p4 VALUES LESS THAN (400000), + PARTITION p5 VALUES LESS THAN (500000), + PARTITION other VALUES LESS THAN (MAXVALUE) + ) + DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3 + PROPERTIES ( + "replication_num" = "1" + ) + """ + } + + def create_auto_partition_table = { String tableName -> + sql "DROP TABLE IF EXISTS ${tableName};" + sql """ + CREATE TABLE IF NOT EXISTS ${tableName}( + L_ORDERKEY INTEGER NOT NULL, + L_PARTKEY INTEGER NOT NULL, + L_SUPPKEY INTEGER NOT NULL, + L_LINENUMBER INTEGER NOT NULL, + L_QUANTITY DECIMAL(15,2) NOT NULL, + L_EXTENDEDPRICE DECIMAL(15,2) NOT NULL, + L_DISCOUNT DECIMAL(15,2) NOT NULL, + L_TAX DECIMAL(15,2) NOT NULL, + L_RETURNFLAG CHAR(1) NOT NULL, + L_LINESTATUS CHAR(1) NOT NULL, + L_SHIPDATE DATE NOT NULL, + L_COMMITDATE DATE NOT NULL, + L_RECEIPTDATE DATE NOT NULL, + L_SHIPINSTRUCT CHAR(25) NOT NULL, + L_SHIPMODE CHAR(10) NOT NULL, + L_COMMENT VARCHAR(44) NOT NULL, + L_NULL VARCHAR + ) + UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER, L_QUANTITY, + L_EXTENDEDPRICE, L_DISCOUNT, L_TAX, L_RETURNFLAG, L_LINESTATUS, + L_SHIPDATE) + AUTO PARTITION BY RANGE (date_trunc(`L_SHIPDATE`, 'year')) + ( + ) + DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3 + PROPERTIES ( + "replication_num" = "1" + ) + """ + } + + def check = {String tableName, int op -> + List tablets = get_tablets_from_table(tableName) + def loadTimes = [1, 10] + Map sizeRecords = ["apiSize":[], "mysqlSize":[], "cbsSize":[]] + for (int i in loadTimes){ + // stream load 1 time, record each size + repeate_stream_load_same_data(tableName, i, "regression/tpch/sf0.1/lineitem.tbl.gz") + def rows = sql_return_maparray "select count(*) as count from ${tableName};" + logger.info("table ${tableName} has ${rows[0]["count"]} rows") + // 加一下触发compaction的机制 + trigger_compaction(tablets) + + // 然后 sleep 1min, 等fe汇报完 + sleep(60 * 1000) + sql "select count(*) from ${tableName}" + + sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets)) + sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets)) + sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName)) + sleep(60 * 1000) + logger.info("after ${i} times stream load, mysqlSize is: ${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, storageSize is: ${sizeRecords["cbsSize"][-1]}") + } + + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0]) + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0]) + // expect load 1 times == load 10 times + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["mysqlSize"][1]) + assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1]) + assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1]) + + if (op == 1){ + sql """alter table ${tableName} drop partition p1;""" + } else if(op == 2){ + sql """alter table ${tableName} drop partition p19920101000000;""" + } + + // after drop partition,tablets will changed,need get new tablets + tablets = get_tablets_from_table(tableName) + sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets)) + sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets)) + sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName)) + + // 加一下触发compaction的机制 + trigger_compaction(tablets) + + // 然后 sleep 1min, 等fe汇报完 + sleep(60 * 1000) + sql "select count(*) from ${tableName}" + + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][2], sizeRecords["apiSize"][2]) + assertEquals(sizeRecords["mysqlSize"][2], sizeRecords["cbsSize"][2]) + + if (op == 1){ + sql """recover partition p1 from ${tableName};""" + } else if(op == 2){ + sql """recover partition pp19920101000000 from ${tableName};""" + } + + // after drop partition,tablets will changed,need get new tablets + tablets = get_tablets_from_table(tableName) + sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets)) + sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets)) + sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName)) + + // 加一下触发compaction的机制 + trigger_compaction(tablets) + + // 然后 sleep 1min, 等fe汇报完 + sleep(60 * 1000) + sql "select count(*) from ${tableName}" + + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][3], sizeRecords["apiSize"][3]) + assertEquals(sizeRecords["mysqlSize"][3], sizeRecords["cbsSize"][3]) + } + + def main = { + def tableName = "test_cloud_drop_and_recover_partition_show_data" + create_normal_table(tableName) + check(tableName, 0) + tableName = "test_cloud_drop_and_recover_dynamic_partition_show_data" + create_dynamic_partition_table(tableName) + check(tableName, 1) + tableName = "test_cloud_drop_and_recover_auto_partition_show_data" + create_auto_partition_table(tableName) + check(tableName, 2) + } + + main() +} diff --git a/regression-test/suites/show_data_p2/test_table_operation/test_cloud_drop_table_show_data.groovy b/regression-test/suites/show_data_p2/test_table_operation/test_cloud_drop_table_show_data.groovy new file mode 100644 index 00000000000000..4e406296b7120e --- /dev/null +++ b/regression-test/suites/show_data_p2/test_table_operation/test_cloud_drop_table_show_data.groovy @@ -0,0 +1,149 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// The cases is copied from https://github.com/trinodb/trino/tree/master +// /testing/trino-product-tests/src/main/resources/sql-tests/testcases/tpcds +// and modified by Doris. +import org.codehaus.groovy.runtime.IOGroovyMethods + + // loading one data 10 times, expect data size not rising +suite("test_cloud_drop_and_recover_table_show_data","p2") { + //cloud-mode + if (!isCloudMode()) { + logger.info("not cloud mode, not run") + return + } + + def create_normal_table = { String tableName -> + sql "DROP TABLE IF EXISTS ${tableName};" + sql """ + CREATE TABLE IF NOT EXISTS ${tableName}( + L_ORDERKEY INTEGER NOT NULL, + L_PARTKEY INTEGER NOT NULL, + L_SUPPKEY INTEGER NOT NULL, + L_LINENUMBER INTEGER NOT NULL, + L_QUANTITY DECIMAL(15,2) NOT NULL, + L_EXTENDEDPRICE DECIMAL(15,2) NOT NULL, + L_DISCOUNT DECIMAL(15,2) NOT NULL, + L_TAX DECIMAL(15,2) NOT NULL, + L_RETURNFLAG CHAR(1) NOT NULL, + L_LINESTATUS CHAR(1) NOT NULL, + L_SHIPDATE DATE NOT NULL, + L_COMMITDATE DATE NOT NULL, + L_RECEIPTDATE DATE NOT NULL, + L_SHIPINSTRUCT CHAR(25) NOT NULL, + L_SHIPMODE CHAR(10) NOT NULL, + L_COMMENT VARCHAR(44) NOT NULL, + L_NULL VARCHAR + ) + UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER) + DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3 + PROPERTIES ( + "replication_num" = "1" + ) + """ + } + + def check = {String tableName, int op -> + List tablets = get_tablets_from_table(tableName) + def loadTimes = [1, 10] + Map sizeRecords = ["apiSize":[], "mysqlSize":[], "cbsSize":[]] + for (int i in loadTimes){ + // stream load 1 time, record each size + repeate_stream_load_same_data(tableName, i, "regression/tpch/sf0.1/lineitem.tbl.gz") + def rows = sql_return_maparray "select count(*) as count from ${tableName};" + logger.info("table ${tableName} has ${rows[0]["count"]} rows") + // 加一下触发compaction的机制 + trigger_compaction(tablets) + + // 然后 sleep 1min, 等fe汇报完 + sleep(60 * 1000) + sql "select count(*) from ${tableName}" + + sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets)) + sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets)) + sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName)) + sleep(60 * 1000) + logger.info("after ${i} times stream load, mysqlSize is: ${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, storageSize is: ${sizeRecords["cbsSize"][-1]}") + } + + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0]) + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0]) + // expect load 1 times == load 10 times + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["mysqlSize"][1]) + assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1]) + assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1]) + + if(op == 1){ + + sql """drop table ${tableName}""" + + sleep(60 * 1000) + + sql """recover table ${tableName}""" + sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets)) + sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets)) + sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName)) + + // 加一下触发compaction的机制 + trigger_compaction(tablets) + + // 然后 sleep 1min, 等fe汇报完 + sleep(60 * 1000) + sql "select count(*) from ${tableName}" + + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][2], sizeRecords["apiSize"][2]) + assertEquals(sizeRecords["mysqlSize"][2], sizeRecords["cbsSize"][2]) + assertEquals(sizeRecords["mysqlSize"][1], sizeRecords["apiSize"][2]) + } + + if(op == 2){ + + sql """drop table ${tableName} force""" + sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets)) + sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets)) + sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName)) + + // 加一下触发compaction的机制 + trigger_compaction(tablets) + + // 然后 sleep 1min, 等fe汇报完 + sleep(60 * 1000) + sql "select count(*) from ${tableName}" + + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][2], sizeRecords["apiSize"][2]) + assertEquals(sizeRecords["mysqlSize"][2], sizeRecords["cbsSize"][2]) + assertEquals(sizeRecords["mysqlSize"][2], 0) + + } + } + + def main = { + def tableName = "test_cloud_drop_and_recover_table_show_data" + create_normal_table(tableName) + check(tableName, 1) + + tableName = "test_cloud_drop_and_recover_table_force_show_data" + create_normal_table(tableName) + check(tableName, 2) + } + + main() +} diff --git a/regression-test/suites/show_data_p2/test_table_operation/test_cloud_truncate_and_recover_table_show_data.groovy b/regression-test/suites/show_data_p2/test_table_operation/test_cloud_truncate_and_recover_table_show_data.groovy new file mode 100644 index 00000000000000..c06a402ce94a4a --- /dev/null +++ b/regression-test/suites/show_data_p2/test_table_operation/test_cloud_truncate_and_recover_table_show_data.groovy @@ -0,0 +1,149 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// The cases is copied from https://github.com/trinodb/trino/tree/master +// /testing/trino-product-tests/src/main/resources/sql-tests/testcases/tpcds +// and modified by Doris. +import org.codehaus.groovy.runtime.IOGroovyMethods + + // loading one data 10 times, expect data size not rising +suite("test_cloud_truncate_and_recover_table_show_data","p2") { + //cloud-mode + if (!isCloudMode()) { + logger.info("not cloud mode, not run") + return + } + + def create_normal_table = { String tableName -> + sql "drop TABLE IF EXISTS ${tableName};" + sql """ + CREATE TABLE IF NOT EXISTS ${tableName}( + L_ORDERKEY INTEGER NOT NULL, + L_PARTKEY INTEGER NOT NULL, + L_SUPPKEY INTEGER NOT NULL, + L_LINENUMBER INTEGER NOT NULL, + L_QUANTITY DECIMAL(15,2) NOT NULL, + L_EXTENDEDPRICE DECIMAL(15,2) NOT NULL, + L_DISCOUNT DECIMAL(15,2) NOT NULL, + L_TAX DECIMAL(15,2) NOT NULL, + L_RETURNFLAG CHAR(1) NOT NULL, + L_LINESTATUS CHAR(1) NOT NULL, + L_SHIPDATE DATE NOT NULL, + L_COMMITDATE DATE NOT NULL, + L_RECEIPTDATE DATE NOT NULL, + L_SHIPINSTRUCT CHAR(25) NOT NULL, + L_SHIPMODE CHAR(10) NOT NULL, + L_COMMENT VARCHAR(44) NOT NULL, + L_NULL VARCHAR + ) + UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER) + DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3 + PROPERTIES ( + "replication_num" = "1" + ) + """ + } + + def check = {String tableName, int op -> + List tablets = get_tablets_from_table(tableName) + def loadTimes = [1, 10] + Map sizeRecords = ["apiSize":[], "mysqlSize":[], "cbsSize":[]] + for (int i in loadTimes){ + // stream load 1 time, record each size + repeate_stream_load_same_data(tableName, i, "regression/tpch/sf0.1/lineitem.tbl.gz") + def rows = sql_return_maparray "select count(*) as count from ${tableName};" + logger.info("table ${tableName} has ${rows[0]["count"]} rows") + // 加一下触发compaction的机制 + trigger_compaction(tablets) + + // 然后 sleep 1min, 等fe汇报完 + sleep(60 * 1000) + sql "select count(*) from ${tableName}" + + sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets)) + sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets)) + sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName)) + sleep(60 * 1000) + logger.info("after ${i} times stream load, mysqlSize is: ${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, storageSize is: ${sizeRecords["cbsSize"][-1]}") + } + + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0]) + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0]) + // expect load 1 times == load 10 times + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["mysqlSize"][1]) + assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1]) + assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1]) + + if(op == 1){ + + sql """truncate table ${tableName}""" + + sleep(60 * 1000) + + sql """recover table ${tableName}""" + sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets)) + sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets)) + sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName)) + + // 加一下触发compaction的机制 + trigger_compaction(tablets) + + // 然后 sleep 1min, 等fe汇报完 + sleep(60 * 1000) + sql "select count(*) from ${tableName}" + + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][2], sizeRecords["apiSize"][2]) + assertEquals(sizeRecords["mysqlSize"][2], sizeRecords["cbsSize"][2]) + assertEquals(sizeRecords["mysqlSize"][1], sizeRecords["apiSize"][2]) + } + + if(op == 2){ + + sql """truncate table ${tableName} force""" + sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets)) + sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets)) + sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName)) + + // 加一下触发compaction的机制 + trigger_compaction(tablets) + + // 然后 sleep 1min, 等fe汇报完 + sleep(60 * 1000) + sql "select count(*) from ${tableName}" + + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][2], sizeRecords["apiSize"][2]) + assertEquals(sizeRecords["mysqlSize"][2], sizeRecords["cbsSize"][2]) + assertEquals(sizeRecords["mysqlSize"][2], 0) + + } + } + + def main = { + def tableName = "test_cloud_truncate_and_recover_table_show_data" + create_normal_table(tableName) + check(tableName, 1) + + tableName = "test_cloud_truncate_and_recover_table_force_show_data" + create_normal_table(tableName) + check(tableName, 2) + } + + main() +} diff --git a/regression-test/suites/show_data_p2/test_table_property/test_cloud_disable_compaction_show_data.groovy b/regression-test/suites/show_data_p2/test_table_property/test_cloud_disable_compaction_show_data.groovy new file mode 100644 index 00000000000000..7a773342074de7 --- /dev/null +++ b/regression-test/suites/show_data_p2/test_table_property/test_cloud_disable_compaction_show_data.groovy @@ -0,0 +1,93 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// The cases is copied from https://github.com/trinodb/trino/tree/master +// /testing/trino-product-tests/src/main/resources/sql-tests/testcases/tpcds +// and modified by Doris. +import org.codehaus.groovy.runtime.IOGroovyMethods + + // loading one data 10 times, expect data size not rising +suite("test_cloud_disable_compaction_show_data","p2") { + //cloud-mode + if (!isCloudMode()) { + logger.info("not cloud mode, not run") + return + } + + def main = { + def tableName="test_cloud_disable_compaction_show_data" + sql "DROP TABLE IF EXISTS ${tableName};" + sql """ + CREATE TABLE IF NOT EXISTS ${tableName}( + L_ORDERKEY INTEGER NOT NULL, + L_PARTKEY INTEGER NOT NULL, + L_SUPPKEY INTEGER NOT NULL, + L_LINENUMBER INTEGER NOT NULL, + L_QUANTITY DECIMAL(15,2) NOT NULL, + L_EXTENDEDPRICE DECIMAL(15,2) NOT NULL, + L_DISCOUNT DECIMAL(15,2) NOT NULL, + L_TAX DECIMAL(15,2) NOT NULL, + L_RETURNFLAG CHAR(1) NOT NULL, + L_LINESTATUS CHAR(1) NOT NULL, + L_SHIPDATE DATE NOT NULL, + L_COMMITDATE DATE NOT NULL, + L_RECEIPTDATE DATE NOT NULL, + L_SHIPINSTRUCT CHAR(25) NOT NULL, + L_SHIPMODE CHAR(10) NOT NULL, + L_COMMENT VARCHAR(44) NOT NULL, + L_NULL VARCHAR + ) + UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER) + DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3 + PROPERTIES ( + "disable_auto_compaction" = "true", + "replication_num" = "1" + ) + """ + List tablets = get_tablets_from_table(tableName) + def loadTimes = [1, 10] + Map sizeRecords = ["apiSize":[], "mysqlSize":[], "cbsSize":[]] + for (int i in loadTimes){ + // stream load 1 time, record each size + repeate_stream_load_same_data(tableName, i, "regression/tpch/sf0.1/lineitem.tbl.gz") + def rows = sql_return_maparray "select count(*) as count from ${tableName};" + logger.info("table ${tableName} has ${rows[0]["count"]} rows") + // 加一下触发compaction的机制 + trigger_compaction(tablets) + + // 然后 sleep 1min, 等fe汇报完 + sleep(60 * 1000) + sql "select count(*) from ${tableName}" + + sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets)) + sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets)) + sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName)) + sleep(60 * 1000) + logger.info("after ${i} times stream load, mysqlSize is: ${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, storageSize is: ${sizeRecords["cbsSize"][-1]}") + } + + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0]) + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0]) + // expect load 1 times == load 10 times + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["mysqlSize"][1]) + assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1]) + assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1]) + } + + main() +} diff --git a/regression-test/suites/show_data_p2/test_table_property/test_cloud_inverted_index_v1_show_data.groovy b/regression-test/suites/show_data_p2/test_table_property/test_cloud_inverted_index_v1_show_data.groovy new file mode 100644 index 00000000000000..dd8d917e1e8e1d --- /dev/null +++ b/regression-test/suites/show_data_p2/test_table_property/test_cloud_inverted_index_v1_show_data.groovy @@ -0,0 +1,95 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// The cases is copied from https://github.com/trinodb/trino/tree/master +// /testing/trino-product-tests/src/main/resources/sql-tests/testcases/tpcds +// and modified by Doris. +import org.codehaus.groovy.runtime.IOGroovyMethods + + // loading one data 10 times, expect data size not rising +suite("test_cloud_inverted_index_v1_show_data","p2") { + //cloud-mode + if (!isCloudMode()) { + logger.info("not cloud mode, not run") + return + } + + def main = { + def tableName="test_cloud_inverted_index_v1_show_data" + sql "DROP TABLE IF EXISTS ${tableName};" + sql """ + CREATE TABLE IF NOT EXISTS ${tableName}( + L_ORDERKEY INTEGER NOT NULL, + L_PARTKEY INTEGER NOT NULL, + L_SUPPKEY INTEGER NOT NULL, + L_LINENUMBER INTEGER NOT NULL, + L_QUANTITY DECIMAL(15,2) NOT NULL, + L_EXTENDEDPRICE DECIMAL(15,2) NOT NULL, + L_DISCOUNT DECIMAL(15,2) NOT NULL, + L_TAX DECIMAL(15,2) NOT NULL, + L_RETURNFLAG CHAR(1) NOT NULL, + L_LINESTATUS CHAR(1) NOT NULL, + L_SHIPDATE DATE NOT NULL, + L_COMMITDATE DATE NOT NULL, + L_RECEIPTDATE DATE NOT NULL, + L_SHIPINSTRUCT CHAR(25) NOT NULL, + L_SHIPMODE CHAR(10) NOT NULL, + L_COMMENT VARCHAR(44) NOT NULL, + L_NULL VARCHAR, + index index_SHIPINSTRUCT (L_SHIPINSTRUCT) using inverted, + index index_SHIPMODE (L_SHIPMODE) using inverted, + index index_COMMENT (L_COMMENT) using inverted + ) + UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER) + DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3 + PROPERTIES ( + "replication_num" = "1" + ) + """ + List tablets = get_tablets_from_table(tableName) + def loadTimes = [1, 10] + Map sizeRecords = ["apiSize":[], "mysqlSize":[], "cbsSize":[]] + for (int i in loadTimes){ + // stream load 1 time, record each size + repeate_stream_load_same_data(tableName, i, "regression/tpch/sf0.1/lineitem.tbl.gz") + def rows = sql_return_maparray "select count(*) as count from ${tableName};" + logger.info("table ${tableName} has ${rows[0]["count"]} rows") + // 加一下触发compaction的机制 + trigger_compaction(tablets) + + // 然后 sleep 1min, 等fe汇报完 + sleep(60 * 1000) + sql "select count(*) from ${tableName}" + + sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets)) + sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets)) + sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName)) + sleep(60 * 1000) + logger.info("after ${i} times stream load, mysqlSize is: ${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, storageSize is: ${sizeRecords["cbsSize"][-1]}") + } + + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0]) + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0]) + // expect load 1 times == load 10 times + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["mysqlSize"][1]) + assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1]) + assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1]) + } + + main() +} diff --git a/regression-test/suites/show_data_p2/test_table_property/test_cloud_inverted_index_v2_show_data.groovy b/regression-test/suites/show_data_p2/test_table_property/test_cloud_inverted_index_v2_show_data.groovy new file mode 100644 index 00000000000000..6670e2067da03f --- /dev/null +++ b/regression-test/suites/show_data_p2/test_table_property/test_cloud_inverted_index_v2_show_data.groovy @@ -0,0 +1,96 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// The cases is copied from https://github.com/trinodb/trino/tree/master +// /testing/trino-product-tests/src/main/resources/sql-tests/testcases/tpcds +// and modified by Doris. +import org.codehaus.groovy.runtime.IOGroovyMethods + + // loading one data 10 times, expect data size not rising +suite("test_cloud_inverted_index_v2_show_data","p2") { + //cloud-mode + if (!isCloudMode()) { + logger.info("not cloud mode, not run") + return + } + + def main = { + def tableName="test_cloud_inverted_index_v2_show_data" + sql "DROP TABLE IF EXISTS ${tableName};" + sql """ + CREATE TABLE IF NOT EXISTS ${tableName}( + L_ORDERKEY INTEGER NOT NULL, + L_PARTKEY INTEGER NOT NULL, + L_SUPPKEY INTEGER NOT NULL, + L_LINENUMBER INTEGER NOT NULL, + L_QUANTITY DECIMAL(15,2) NOT NULL, + L_EXTENDEDPRICE DECIMAL(15,2) NOT NULL, + L_DISCOUNT DECIMAL(15,2) NOT NULL, + L_TAX DECIMAL(15,2) NOT NULL, + L_RETURNFLAG CHAR(1) NOT NULL, + L_LINESTATUS CHAR(1) NOT NULL, + L_SHIPDATE DATE NOT NULL, + L_COMMITDATE DATE NOT NULL, + L_RECEIPTDATE DATE NOT NULL, + L_SHIPINSTRUCT CHAR(25) NOT NULL, + L_SHIPMODE CHAR(10) NOT NULL, + L_COMMENT VARCHAR(44) NOT NULL, + L_NULL VARCHAR, + index index_SHIPINSTRUCT (L_SHIPINSTRUCT) using inverted, + index index_SHIPMODE (L_SHIPMODE) using inverted, + index index_COMMENT (L_COMMENT) using inverted + ) + UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER) + DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3 + PROPERTIES ( + "inverted_index_storage_format" = "V2", + "replication_num" = "1" + ) + """ + List tablets = get_tablets_from_table(tableName) + def loadTimes = [1, 10] + Map sizeRecords = ["apiSize":[], "mysqlSize":[], "cbsSize":[]] + for (int i in loadTimes){ + // stream load 1 time, record each size + repeate_stream_load_same_data(tableName, i, "regression/tpch/sf0.1/lineitem.tbl.gz") + def rows = sql_return_maparray "select count(*) as count from ${tableName};" + logger.info("table ${tableName} has ${rows[0]["count"]} rows") + // 加一下触发compaction的机制 + trigger_compaction(tablets) + + // 然后 sleep 1min, 等fe汇报完 + sleep(60 * 1000) + sql "select count(*) from ${tableName}" + + sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets)) + sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets)) + sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName)) + sleep(60 * 1000) + logger.info("after ${i} times stream load, mysqlSize is: ${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, storageSize is: ${sizeRecords["cbsSize"][-1]}") + } + + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0]) + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0]) + // expect load 1 times == load 10 times + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["mysqlSize"][1]) + assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1]) + assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1]) + } + + main() +} diff --git a/regression-test/suites/show_data_p2/test_table_property/test_cloud_lz4_show_data.groovy b/regression-test/suites/show_data_p2/test_table_property/test_cloud_lz4_show_data.groovy new file mode 100644 index 00000000000000..de5464759cc1eb --- /dev/null +++ b/regression-test/suites/show_data_p2/test_table_property/test_cloud_lz4_show_data.groovy @@ -0,0 +1,93 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// The cases is copied from https://github.com/trinodb/trino/tree/master +// /testing/trino-product-tests/src/main/resources/sql-tests/testcases/tpcds +// and modified by Doris. +import org.codehaus.groovy.runtime.IOGroovyMethods + + // loading one data 10 times, expect data size not rising +suite("test_cloud_lz4_show_data","p2") { + //cloud-mode + if (!isCloudMode()) { + logger.info("not cloud mode, not run") + return + } + + def main = { + def tableName="test_cloud_lz4_show_data" + sql "DROP TABLE IF EXISTS ${tableName};" + sql """ + CREATE TABLE IF NOT EXISTS ${tableName}( + L_ORDERKEY INTEGER NOT NULL, + L_PARTKEY INTEGER NOT NULL, + L_SUPPKEY INTEGER NOT NULL, + L_LINENUMBER INTEGER NOT NULL, + L_QUANTITY DECIMAL(15,2) NOT NULL, + L_EXTENDEDPRICE DECIMAL(15,2) NOT NULL, + L_DISCOUNT DECIMAL(15,2) NOT NULL, + L_TAX DECIMAL(15,2) NOT NULL, + L_RETURNFLAG CHAR(1) NOT NULL, + L_LINESTATUS CHAR(1) NOT NULL, + L_SHIPDATE DATE NOT NULL, + L_COMMITDATE DATE NOT NULL, + L_RECEIPTDATE DATE NOT NULL, + L_SHIPINSTRUCT CHAR(25) NOT NULL, + L_SHIPMODE CHAR(10) NOT NULL, + L_COMMENT VARCHAR(44) NOT NULL, + L_NULL VARCHAR + ) + UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER) + DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3 + PROPERTIES ( + "compression" = "lz4", + "replication_num" = "1" + ) + """ + List tablets = get_tablets_from_table(tableName) + def loadTimes = [1, 10] + Map sizeRecords = ["apiSize":[], "mysqlSize":[], "cbsSize":[]] + for (int i in loadTimes){ + // stream load 1 time, record each size + repeate_stream_load_same_data(tableName, i, "regression/tpch/sf0.1/lineitem.tbl.gz") + def rows = sql_return_maparray "select count(*) as count from ${tableName};" + logger.info("table ${tableName} has ${rows[0]["count"]} rows") + // 加一下触发compaction的机制 + trigger_compaction(tablets) + + // 然后 sleep 1min, 等fe汇报完 + sleep(60 * 1000) + sql "select count(*) from ${tableName}" + + sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets)) + sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets)) + sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName)) + sleep(60 * 1000) + logger.info("after ${i} times stream load, mysqlSize is: ${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, storageSize is: ${sizeRecords["cbsSize"][-1]}") + } + + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0]) + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0]) + // expect load 1 times == load 10 times + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["mysqlSize"][1]) + assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1]) + assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1]) + } + + main() +} diff --git a/regression-test/suites/show_data_p2/test_table_property/test_cloud_zstd_show_data.groovy b/regression-test/suites/show_data_p2/test_table_property/test_cloud_zstd_show_data.groovy new file mode 100644 index 00000000000000..ad37f9ac95e03a --- /dev/null +++ b/regression-test/suites/show_data_p2/test_table_property/test_cloud_zstd_show_data.groovy @@ -0,0 +1,93 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// The cases is copied from https://github.com/trinodb/trino/tree/master +// /testing/trino-product-tests/src/main/resources/sql-tests/testcases/tpcds +// and modified by Doris. +import org.codehaus.groovy.runtime.IOGroovyMethods + + // loading one data 10 times, expect data size not rising +suite("test_cloud_lz4_show_data","p2") { + //cloud-mode + if (!isCloudMode()) { + logger.info("not cloud mode, not run") + return + } + + def main = { + def tableName="test_cloud_lz4_show_data" + sql "DROP TABLE IF EXISTS ${tableName};" + sql """ + CREATE TABLE IF NOT EXISTS ${tableName}( + L_ORDERKEY INTEGER NOT NULL, + L_PARTKEY INTEGER NOT NULL, + L_SUPPKEY INTEGER NOT NULL, + L_LINENUMBER INTEGER NOT NULL, + L_QUANTITY DECIMAL(15,2) NOT NULL, + L_EXTENDEDPRICE DECIMAL(15,2) NOT NULL, + L_DISCOUNT DECIMAL(15,2) NOT NULL, + L_TAX DECIMAL(15,2) NOT NULL, + L_RETURNFLAG CHAR(1) NOT NULL, + L_LINESTATUS CHAR(1) NOT NULL, + L_SHIPDATE DATE NOT NULL, + L_COMMITDATE DATE NOT NULL, + L_RECEIPTDATE DATE NOT NULL, + L_SHIPINSTRUCT CHAR(25) NOT NULL, + L_SHIPMODE CHAR(10) NOT NULL, + L_COMMENT VARCHAR(44) NOT NULL, + L_NULL VARCHAR + ) + UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER) + DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3 + PROPERTIES ( + "compression" = "zstd", + "replication_num" = "1" + ) + """ + List tablets = get_tablets_from_table(tableName) + def loadTimes = [1, 10] + Map sizeRecords = ["apiSize":[], "mysqlSize":[], "cbsSize":[]] + for (int i in loadTimes){ + // stream load 1 time, record each size + repeate_stream_load_same_data(tableName, i, "regression/tpch/sf0.1/lineitem.tbl.gz") + def rows = sql_return_maparray "select count(*) as count from ${tableName};" + logger.info("table ${tableName} has ${rows[0]["count"]} rows") + // 加一下触发compaction的机制 + trigger_compaction(tablets) + + // 然后 sleep 1min, 等fe汇报完 + sleep(60 * 1000) + sql "select count(*) from ${tableName}" + + sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets)) + sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets)) + sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName)) + sleep(60 * 1000) + logger.info("after ${i} times stream load, mysqlSize is: ${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, storageSize is: ${sizeRecords["cbsSize"][-1]}") + } + + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0]) + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0]) + // expect load 1 times == load 10 times + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["mysqlSize"][1]) + assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1]) + assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1]) + } + + main() +} diff --git a/regression-test/suites/show_data_p2/test_table_type/test_cloud_agg_show_data.groovy b/regression-test/suites/show_data_p2/test_table_type/test_cloud_agg_show_data.groovy new file mode 100644 index 00000000000000..e995845f26ac5d --- /dev/null +++ b/regression-test/suites/show_data_p2/test_table_type/test_cloud_agg_show_data.groovy @@ -0,0 +1,95 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// The cases is copied from https://github.com/trinodb/trino/tree/master +// /testing/trino-product-tests/src/main/resources/sql-tests/testcases/tpcds +// and modified by Doris. +import org.codehaus.groovy.runtime.IOGroovyMethods + + // loading one data 10 times, expect data size not rising +suite("test_cloud_agg_show_data","p2") { + //cloud-mode + if (!isCloudMode()) { + logger.info("not cloud mode, not run") + return + } + + def main = { + def tableName="test_cloud_agg_show_data" + sql "DROP TABLE IF EXISTS ${tableName};" + sql """ + CREATE TABLE IF NOT EXISTS ${tableName} + ( L_ORDERKEY INTEGER NOT NULL, + L_PARTKEY INTEGER NOT NULL, + L_SUPPKEY INTEGER NOT NULL, + L_LINENUMBER INTEGER NOT NULL, + L_QUANTITY DECIMAL(15,2) SUM, + L_EXTENDEDPRICE DECIMAL(15,2) SUM, + L_DISCOUNT DECIMAL(15,2) SUM, + L_TAX DECIMAL(15,2) SUM, + L_RETURNFLAG CHAR(1) REPLACE, + L_LINESTATUS CHAR(1) REPLACE, + L_SHIPDATE DATE MAX, + L_COMMITDATE DATE MAX, + L_RECEIPTDATE DATE MAX, + L_SHIPINSTRUCT CHAR(25) REPLACE, + L_SHIPMODE CHAR(10) REPLACE, + L_COMMENT VARCHAR(44) REPLACE, + L_NULL VARCHAR REPLACE + ) + AGGREGATE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER) + DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3 + PROPERTIES ( + "replication_num" = "1" + ) + """ + List tablets = get_tablets_from_table(tableName) + def loadTimes = [1, 10] + Map sizeRecords = ["apiSize":[], "mysqlSize":[], "cbsSize":[]] + for (int i in loadTimes){ + // stream load 1 time, record each size + repeate_stream_load_same_data(tableName, i, "regression/tpch/sf0.1/lineitem.tbl.gz") + def rows = sql_return_maparray "select count(*) as count from ${tableName};" + logger.info("table ${tableName} has ${rows[0]["count"]} rows") + // 加一下触发compaction的机制 + trigger_compaction(tablets) + + // 然后 sleep 1min, 等fe汇报完 + sleep(60 * 1000) + sql "select count(*) from ${tableName}" + + sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets)) + sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets)) + sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName)) + sleep(60 * 1000) + logger.info("after ${i} times stream load, mysqlSize is: ${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, storageSize is: ${sizeRecords["cbsSize"][-1]}") + + } + + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0]) + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0]) + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][1], sizeRecords["apiSize"][1]) + assertEquals(sizeRecords["mysqlSize"][1], sizeRecords["cbsSize"][1]) + // expect 10 * 1 times on agg table >= load 10 times on agg table >= 1 times on agg table + assertTrue(10*sizeRecords["mysqlSize"][0]>=sizeRecords["mysqlSize"][1]) + assertTrue(sizeRecords["mysqlSize"][1]>=sizeRecords["mysqlSize"][0]) + } + + main() +} diff --git a/regression-test/suites/show_data_p2/test_table_type/test_cloud_dup_show_data.groovy b/regression-test/suites/show_data_p2/test_table_type/test_cloud_dup_show_data.groovy new file mode 100644 index 00000000000000..ad3109dd945b49 --- /dev/null +++ b/regression-test/suites/show_data_p2/test_table_type/test_cloud_dup_show_data.groovy @@ -0,0 +1,93 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// The cases is copied from https://github.com/trinodb/trino/tree/master +// /testing/trino-product-tests/src/main/resources/sql-tests/testcases/tpcds +// and modified by Doris. +import org.codehaus.groovy.runtime.IOGroovyMethods + + // loading one data 10 times, expect data size not rising +suite("test_cloud_dup_show_data","p2") { + //cloud-mode + if (!isCloudMode()) { + logger.info("not cloud mode, not run") + return + } + + def main = { + def tableName="test_cloud_dup_show_data" + sql "DROP TABLE IF EXISTS ${tableName};" + sql """ + CREATE TABLE IF NOT EXISTS ${tableName}( + L_ORDERKEY INTEGER NOT NULL, + L_PARTKEY INTEGER NOT NULL, + L_SUPPKEY INTEGER NOT NULL, + L_LINENUMBER INTEGER NOT NULL, + L_QUANTITY DECIMAL(15,2) NOT NULL, + L_EXTENDEDPRICE DECIMAL(15,2) NOT NULL, + L_DISCOUNT DECIMAL(15,2) NOT NULL, + L_TAX DECIMAL(15,2) NOT NULL, + L_RETURNFLAG CHAR(1) NOT NULL, + L_LINESTATUS CHAR(1) NOT NULL, + L_SHIPDATE DATE NOT NULL, + L_COMMITDATE DATE NOT NULL, + L_RECEIPTDATE DATE NOT NULL, + L_SHIPINSTRUCT CHAR(25) NOT NULL, + L_SHIPMODE CHAR(10) NOT NULL, + L_COMMENT VARCHAR(44) NOT NULL, + L_NULL VARCHAR + ) + DUPLICATE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER) + DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3 + PROPERTIES ( + "replication_num" = "1" + ) + """ + List tablets = get_tablets_from_table(tableName) + def loadTimes = [1, 10] + Map sizeRecords = ["apiSize":[], "mysqlSize":[], "cbsSize":[]] + for (int i in loadTimes){ + // stream load 1 time, record each size + repeate_stream_load_same_data(tableName, i, "regression/tpch/sf0.1/lineitem.tbl.gz") + def rows = sql_return_maparray "select count(*) as count from ${tableName};" + logger.info("table ${tableName} has ${rows[0]["count"]} rows") + // 加一下触发compaction的机制 + trigger_compaction(tablets) + + // 然后 sleep 5min, 等fe汇报完 + sleep(60 * 1000) + sql "select count(*) from ${tableName}" + + sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets)) + sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets)) + sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName)) + sleep(60 * 1000) + logger.info("after ${i} times stream load, mysqlSize is: ${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, storageSize is: ${sizeRecords["cbsSize"][-1]}") + } + + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0]) + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0]) + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][1], sizeRecords["apiSize"][1]) + assertEquals(sizeRecords["mysqlSize"][1], sizeRecords["cbsSize"][1]) + // expect load 10 times on dup table = 10 * load 1 times on dup table + assertTrue(10*sizeRecords["mysqlSize"][0]==sizeRecords["mysqlSize"][1]) + } + + main() +} diff --git a/regression-test/suites/show_data_p2/test_table_type/test_cloud_mor_show_data.groovy b/regression-test/suites/show_data_p2/test_table_type/test_cloud_mor_show_data.groovy new file mode 100644 index 00000000000000..e159ebcecf942c --- /dev/null +++ b/regression-test/suites/show_data_p2/test_table_type/test_cloud_mor_show_data.groovy @@ -0,0 +1,93 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// The cases is copied from https://github.com/trinodb/trino/tree/master +// /testing/trino-product-tests/src/main/resources/sql-tests/testcases/tpcds +// and modified by Doris. +import org.codehaus.groovy.runtime.IOGroovyMethods + + // loading one data 10 times, expect data size not rising +suite("test_cloud_mor_show_data","p2") { + //cloud-mode + if (!isCloudMode()) { + logger.info("not cloud mode, not run") + return + } + + def main = { + def tableName="test_cloud_mor_show_data" + sql "DROP TABLE IF EXISTS ${tableName};" + sql """ + CREATE TABLE IF NOT EXISTS ${tableName}( + L_ORDERKEY INTEGER NOT NULL, + L_PARTKEY INTEGER NOT NULL, + L_SUPPKEY INTEGER NOT NULL, + L_LINENUMBER INTEGER NOT NULL, + L_QUANTITY DECIMAL(15,2) NOT NULL, + L_EXTENDEDPRICE DECIMAL(15,2) NOT NULL, + L_DISCOUNT DECIMAL(15,2) NOT NULL, + L_TAX DECIMAL(15,2) NOT NULL, + L_RETURNFLAG CHAR(1) NOT NULL, + L_LINESTATUS CHAR(1) NOT NULL, + L_SHIPDATE DATE NOT NULL, + L_COMMITDATE DATE NOT NULL, + L_RECEIPTDATE DATE NOT NULL, + L_SHIPINSTRUCT CHAR(25) NOT NULL, + L_SHIPMODE CHAR(10) NOT NULL, + L_COMMENT VARCHAR(44) NOT NULL, + L_NULL VARCHAR + ) + UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER) + DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3 + PROPERTIES ( + "enable_unique_key_merge_on_write" = "false", + "replication_num" = "1" + ) + """ + List tablets = get_tablets_from_table(tableName) + def loadTimes = [1, 10] + Map sizeRecords = ["apiSize":[], "mysqlSize":[], "cbsSize":[]] + for (int i in loadTimes){ + // stream load 1 time, record each size + repeate_stream_load_same_data(tableName, i, "regression/tpch/sf0.1/lineitem.tbl.gz") + def rows = sql_return_maparray "select count(*) as count from ${tableName};" + logger.info("table ${tableName} has ${rows[0]["count"]} rows") + // 加一下触发compaction的机制 + trigger_compaction(tablets) + + // 然后 sleep 1min, 等fe汇报完 + sleep(60 * 1000) + sql "select count(*) from ${tableName}" + + sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets)) + sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets)) + sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName)) + sleep(60 * 1000) + logger.info("after ${i} times stream load, mysqlSize is: ${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, storageSize is: ${sizeRecords["cbsSize"][-1]}") + } + + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0]) + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0]) + // expect load 1 times == load 10 times + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["mysqlSize"][1]) + assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1]) + assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1]) + } + + main() +} diff --git a/regression-test/suites/show_data_p2/test_table_type/test_cloud_mow_partial_update_show_data.groovy b/regression-test/suites/show_data_p2/test_table_type/test_cloud_mow_partial_update_show_data.groovy new file mode 100644 index 00000000000000..e32342775fb06f --- /dev/null +++ b/regression-test/suites/show_data_p2/test_table_type/test_cloud_mow_partial_update_show_data.groovy @@ -0,0 +1,281 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// The cases is copied from https://github.com/trinodb/trino/tree/master +// /testing/trino-product-tests/src/main/resources/sql-tests/testcases/tpcds +// and modified by Doris. +import org.codehaus.groovy.runtime.IOGroovyMethods + + // loading one data 10 times, expect data size not rising +suite("test_cloud_mow_partial_update_show_data","p2") { + //cloud-mode + if (!isCloudMode()) { + logger.info("not cloud mode, not run") + return + } + + def main = { + def tableName="test_cloud_mow_partial_update_show_data" + sql "DROP TABLE IF EXISTS ${tableName};" + sql """ CREATE TABLE ${tableName} ( + id varchar(50) NOT NULL COMMENT "userid", + a1 varchar(100) DEFAULT 'anone' COMMENT "", + a2 varchar(100) DEFAULT 'anone' COMMENT "", + a3 bigint(20) DEFAULT '1' COMMENT "", + a4 bigint(20) DEFAULT '1' COMMENT "", + a5 bigint(11) DEFAULT '1' COMMENT "", + a6 bigint(11) DEFAULT '1' COMMENT "", + a7 bigint(11) DEFAULT '1' COMMENT "", + a8 bigint(20) DEFAULT '1' COMMENT "", + a9 bigint(20) DEFAULT '1' COMMENT "", + a10 bigint(20) DEFAULT '1' COMMENT "", + a11 bigint(20) DEFAULT '1' COMMENT "", + a12 bigint(20) DEFAULT '1' COMMENT "", + a13 bigint(20) DEFAULT '1' COMMENT "", + a14 bigint(20) DEFAULT '1' COMMENT "", + a15 varchar(100) DEFAULT 'anone' COMMENT "", + a16 varchar(100) DEFAULT 'anone' COMMENT "", + a17 varchar(100) DEFAULT 'anone' COMMENT "", + a18 bigint(20) DEFAULT '1' COMMENT "", + a19 varchar(100) DEFAULT 'anone' COMMENT "", + a20 varchar(100) DEFAULT 'anone' COMMENT "", + a21 bigint(20) DEFAULT '1' COMMENT "", + a22 bigint(20) DEFAULT '1' COMMENT "", + a23 bigint(20) DEFAULT '1' COMMENT "", + a24 bigint(20) DEFAULT '1' COMMENT "", + a25 varchar(100) DEFAULT 'anone' COMMENT "", + a26 varchar(100) DEFAULT 'anone' COMMENT "", + a27 varchar(100) DEFAULT 'anone' COMMENT "", + a28 bigint(20) DEFAULT '1' COMMENT "", + a29 bigint(20) DEFAULT '1' COMMENT "", + a30 bigint(20) DEFAULT '1' COMMENT "", + a31 varchar(100) DEFAULT 'anone' COMMENT "", + a32 varchar(100) DEFAULT 'anone' COMMENT "", + a33 bigint(20) DEFAULT '1' COMMENT "", + a34 bigint(11) DEFAULT '1' COMMENT "", + a35 bigint(11) DEFAULT '1' COMMENT "", + a36 bigint(20) DEFAULT '1' COMMENT "", + a37 bigint(20) DEFAULT '1' COMMENT "", + a38 bigint(20) DEFAULT '1' COMMENT "", + a39 varchar(100) DEFAULT 'anone' COMMENT "", + a40 varchar(100) DEFAULT 'anone' COMMENT "", + a41 bigint(11) DEFAULT '1' COMMENT "", + a42 bigint(20) DEFAULT '1' COMMENT "", + a43 varchar(100) DEFAULT 'anone' COMMENT "", + a44 varchar(100) DEFAULT 'anone' COMMENT "", + a45 varchar(100) DEFAULT 'anone' COMMENT "", + a46 bigint(20) DEFAULT '1' COMMENT "", + a47 bigint(11) DEFAULT '1' COMMENT "", + a48 bigint(20) DEFAULT '1' COMMENT "", + a49 bigint(20) DEFAULT '1' COMMENT "", + a50 bigint(20) DEFAULT '1' COMMENT "", + a51 bigint(11) DEFAULT '1' COMMENT "", + a52 bigint(20) DEFAULT '1' COMMENT "", + a53 varchar(100) DEFAULT 'anone' COMMENT "", + a54 varchar(100) DEFAULT 'anone' COMMENT "", + a55 varchar(100) DEFAULT 'anone' COMMENT "", + a56 varchar(100) DEFAULT 'anone' COMMENT "", + a57 bigint(20) DEFAULT '1' COMMENT "", + a58 bigint(11) DEFAULT '1' COMMENT "", + a59 varchar(100) DEFAULT 'anone' COMMENT "", + a60 bigint(11) DEFAULT '1' COMMENT "", + a61 varchar(100) DEFAULT 'anone' COMMENT "", + a62 bigint(11) DEFAULT '1' COMMENT "", + a63 varchar(100) DEFAULT 'anone' COMMENT "", + a64 varchar(100) DEFAULT 'anone' COMMENT "", + a65 bigint(20) DEFAULT '1' COMMENT "", + a66 bigint(20) DEFAULT '1' COMMENT "", + a67 varchar(100) DEFAULT 'anone' COMMENT "", + a68 varchar(100) DEFAULT 'anone' COMMENT "", + a69 varchar(100) DEFAULT 'anone' COMMENT "", + a70 varchar(100) DEFAULT 'anone' COMMENT "", + a71 bigint(20) DEFAULT '1' COMMENT "", + a72 bigint(20) DEFAULT '1' COMMENT "", + a73 bigint(20) DEFAULT '1' COMMENT "", + a74 bigint(20) DEFAULT '1' COMMENT "", + a75 bigint(20) DEFAULT '1' COMMENT "", + a76 bigint(20) DEFAULT '1' COMMENT "", + a77 bigint(20) DEFAULT '1' COMMENT "", + a78 bigint(20) DEFAULT '1' COMMENT "", + a79 bigint(20) DEFAULT '1' COMMENT "", + a80 bigint(20) DEFAULT '1' COMMENT "", + a81 varchar(100) DEFAULT 'anone' COMMENT "", + a82 varchar(100) DEFAULT 'anone' COMMENT "", + a83 varchar(100) DEFAULT 'anone' COMMENT "", + a84 bigint(11) DEFAULT '1' COMMENT "", + a85 bigint(11) DEFAULT '1' COMMENT "", + a86 varchar(100) DEFAULT 'anone' COMMENT "", + a87 varchar(100) DEFAULT 'anone' COMMENT "", + a88 bigint(20) DEFAULT '1' COMMENT "", + a89 bigint(20) DEFAULT '1' COMMENT "", + a90 varchar(100) DEFAULT 'anone' COMMENT "", + a91 varchar(100) DEFAULT 'anone' COMMENT "", + a92 varchar(100) DEFAULT 'anone' COMMENT "", + a93 varchar(100) DEFAULT 'anone' COMMENT "", + a94 bigint(20) DEFAULT '1' COMMENT "", + a95 bigint(20) DEFAULT '1' COMMENT "", + a96 varchar(100) DEFAULT 'anone' COMMENT "", + a97 varchar(100) DEFAULT 'anone' COMMENT "", + a98 bigint(20) DEFAULT '1' COMMENT "", + a99 bigint(11) DEFAULT '1' COMMENT "", + a100 varchar(100) DEFAULT 'anone' COMMENT "", + a101 varchar(100) DEFAULT 'anone' COMMENT "", + a102 varchar(100) DEFAULT 'anone' COMMENT "", + a103 varchar(100) DEFAULT 'anone' COMMENT "", + a104 varchar(500) DEFAULT 'anone' COMMENT "", + a105 bigint(20) DEFAULT '1' COMMENT "", + a106 varchar(100) DEFAULT 'anone' COMMENT "", + a107 varchar(100) DEFAULT 'anone' COMMENT "", + a108 varchar(100) DEFAULT 'anone' COMMENT "", + a109 bigint(20) DEFAULT '1' COMMENT "", + a110 varchar(100) DEFAULT 'anone' COMMENT "", + a111 varchar(100) DEFAULT 'anone' COMMENT "", + a112 bigint(11) DEFAULT '1' COMMENT "", + a113 varchar(100) DEFAULT 'anone' COMMENT "", + a114 varchar(100) DEFAULT 'anone' COMMENT "", + a115 bigint(11) DEFAULT '1' COMMENT "", + a116 varchar(100) DEFAULT 'anone' COMMENT "", + a117 bigint(11) DEFAULT '1' COMMENT "", + a118 varchar(100) DEFAULT 'anone' COMMENT "", + a119 varchar(100) DEFAULT 'anone' COMMENT "", + a120 bigint(20) DEFAULT '1' COMMENT "", + a121 bigint(20) DEFAULT '1' COMMENT "", + a122 bigint(20) DEFAULT '1' COMMENT "", + a123 bigint(20) DEFAULT '1' COMMENT "", + a124 bigint(20) DEFAULT '1' COMMENT "", + a125 bigint(20) DEFAULT '1' COMMENT "", + a126 bigint(20) DEFAULT '1' COMMENT "", + a127 bigint(20) DEFAULT '1' COMMENT "", + a128 bigint(20) DEFAULT '1' COMMENT "", + a129 bigint(20) DEFAULT '1' COMMENT "", + a130 bigint(20) DEFAULT '1' COMMENT "", + a131 varchar(100) DEFAULT 'anone' COMMENT "", + a132 varchar(100) DEFAULT 'anone' COMMENT "", + a133 varchar(100) DEFAULT 'anone' COMMENT "", + a134 varchar(100) DEFAULT 'anone' COMMENT "", + a135 varchar(100) DEFAULT 'anone' COMMENT "", + a136 bigint(20) DEFAULT '1' COMMENT "", + a137 varchar(100) DEFAULT 'anone' COMMENT "", + a138 varchar(100) DEFAULT 'anone' COMMENT "", + a139 varchar(100) DEFAULT 'anone' COMMENT "", + a140 varchar(100) DEFAULT 'anone' COMMENT "", + a141 varchar(100) DEFAULT 'anone' COMMENT "", + a142 bigint(20) DEFAULT '1' COMMENT "", + a143 varchar(100) DEFAULT 'anone' COMMENT "", + a144 varchar(100) DEFAULT 'anone' COMMENT "", + a145 bigint(20) DEFAULT '1' COMMENT "", + a146 varchar(100) DEFAULT 'anone' COMMENT "", + a147 varchar(100) DEFAULT 'anone' COMMENT "", + a148 bigint(20) DEFAULT '1' COMMENT "", + a149 varchar(100) DEFAULT 'anone' COMMENT "", + a150 bigint(20) DEFAULT '1' COMMENT "", + a151 varchar(100) DEFAULT 'anone' COMMENT "", + a152 bigint(11) DEFAULT '1' COMMENT "", + a153 varchar(100) DEFAULT 'anone' COMMENT "", + a154 bigint(20) DEFAULT '1' COMMENT "", + a155 bigint(20) DEFAULT '1' COMMENT "", + a156 varchar(100) DEFAULT 'anone' COMMENT "", + a157 varchar(100) DEFAULT 'anone' COMMENT "", + a158 varchar(100) DEFAULT 'anone' COMMENT "", + a159 varchar(100) DEFAULT 'anone' COMMENT "", + a160 bigint(20) DEFAULT '1' COMMENT "", + a161 bigint(20) DEFAULT '1' COMMENT "", + a162 bigint(11) DEFAULT '1' COMMENT "", + a163 bigint(20) DEFAULT '1' COMMENT "", + a164 bigint(20) DEFAULT '1' COMMENT "", + a165 bigint(20) DEFAULT '1' COMMENT "", + a166 bigint(20) DEFAULT '1' COMMENT "", + a167 varchar(100) DEFAULT 'anone' COMMENT "", + a168 varchar(100) DEFAULT 'anone' COMMENT "", + a169 varchar(100) DEFAULT 'anone' COMMENT "", + a170 varchar(100) DEFAULT 'anone' COMMENT "", + a171 varchar(150) DEFAULT 'anone' COMMENT "", + a172 bigint(20) DEFAULT '1' COMMENT "", + a173 varchar(100) DEFAULT 'anone' COMMENT "", + a174 bigint(20) DEFAULT '1' COMMENT "", + a175 varchar(100) DEFAULT 'anone' COMMENT "", + a176 bigint(11) DEFAULT '1' COMMENT "", + a177 bigint(20) DEFAULT '1' COMMENT "", + a178 varchar(100) DEFAULT 'anone' COMMENT "", + a179 bigint(11) DEFAULT '1' COMMENT "", + a180 varchar(100) DEFAULT 'anone' COMMENT "", + a181 bigint(20) DEFAULT '1' COMMENT "", + a182 varchar(100) DEFAULT 'anone' COMMENT "", + a183 varchar(100) DEFAULT 'anone' COMMENT "", + a184 varchar(100) DEFAULT 'anone' COMMENT "", + a185 bigint(20) DEFAULT '1' COMMENT "", + a186 varchar(100) DEFAULT 'anone' COMMENT "", + a187 bigint(20) DEFAULT '1' COMMENT "", + a188 varchar(100) DEFAULT 'anone' COMMENT "", + a189 varchar(100) DEFAULT 'anone' COMMENT "", + a190 varchar(100) DEFAULT 'anone' COMMENT "", + a191 varchar(100) DEFAULT 'anone' COMMENT "", + a192 bigint(20) DEFAULT '1' COMMENT "", + a193 bigint(11) DEFAULT '1' COMMENT "", + a194 varchar(100) DEFAULT 'anone' COMMENT "", + a195 bigint(20) DEFAULT '1' COMMENT "", + a196 varchar(100) DEFAULT 'anone' COMMENT "", + a197 varchar(100) DEFAULT 'anone' COMMENT "", + a198 varchar(100) DEFAULT 'anone' COMMENT "", + a199 bigint(20) DEFAULT '1' COMMENT "", + a200 varchar(100) DEFAULT 'anone' COMMENT "" + ) ENGINE=OLAP + UNIQUE KEY(`id`) + COMMENT "OLAP" + DISTRIBUTED BY HASH(`id`) BUCKETS 32 + PROPERTIES ( + "store_row_column" = "true" + ); + """ + List tablets = get_tablets_from_table(tableName) + def loadTimes = [1, 10] + Map sizeRecords = ["apiSize":[], "mysqlSize":[], "cbsSize":[]] + for (int i in loadTimes){ + // stream load 1 time, record each size + repeate_stream_load_same_data(tableName, i, "regression/show_data/fullData.1.gz") + def rows = sql_return_maparray "select count(*) as count from ${tableName};" + logger.info("table ${tableName} has ${rows[0]["count"]} rows") + // 加一下触发compaction的机制 + trigger_compaction(tablets) + + stream_load_partial_update_data(tableName) + rows = sql_return_maparray "select count(*) as count from ${tableName};" + logger.info("table ${tableName} has ${rows[0]["count"]} rows") + trigger_compaction(tablets) + + // 然后 sleep 1min, 等fe汇报完 + sleep(60 * 1000) + sql "select count(*) from ${tableName}" + + sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets)) + sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets)) + sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName)) + sleep(60 * 1000) + logger.info("after ${i} times stream load, mysqlSize is: ${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, storageSize is: ${sizeRecords["cbsSize"][-1]}") + } + + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0]) + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0]) + // expect load 1 times == load 10 times + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["mysqlSize"][1]) + assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1]) + assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1]) + } + + main() +} diff --git a/regression-test/suites/show_data_p2/test_table_type/test_cloud_mow_show_data.groovy b/regression-test/suites/show_data_p2/test_table_type/test_cloud_mow_show_data.groovy new file mode 100644 index 00000000000000..29ce5af49091ea --- /dev/null +++ b/regression-test/suites/show_data_p2/test_table_type/test_cloud_mow_show_data.groovy @@ -0,0 +1,92 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// The cases is copied from https://github.com/trinodb/trino/tree/master +// /testing/trino-product-tests/src/main/resources/sql-tests/testcases/tpcds +// and modified by Doris. +import org.codehaus.groovy.runtime.IOGroovyMethods + + // loading one data 10 times, expect data size not rising +suite("test_cloud_mow_show_data","p2") { + //cloud-mode + if (!isCloudMode()) { + logger.info("not cloud mode, not run") + return + } + + def main = { + def tableName="test_cloud_mow_show_data" + sql "DROP TABLE IF EXISTS ${tableName};" + sql """ + CREATE TABLE IF NOT EXISTS ${tableName}( + L_ORDERKEY INTEGER NOT NULL, + L_PARTKEY INTEGER NOT NULL, + L_SUPPKEY INTEGER NOT NULL, + L_LINENUMBER INTEGER NOT NULL, + L_QUANTITY DECIMAL(15,2) NOT NULL, + L_EXTENDEDPRICE DECIMAL(15,2) NOT NULL, + L_DISCOUNT DECIMAL(15,2) NOT NULL, + L_TAX DECIMAL(15,2) NOT NULL, + L_RETURNFLAG CHAR(1) NOT NULL, + L_LINESTATUS CHAR(1) NOT NULL, + L_SHIPDATE DATE NOT NULL, + L_COMMITDATE DATE NOT NULL, + L_RECEIPTDATE DATE NOT NULL, + L_SHIPINSTRUCT CHAR(25) NOT NULL, + L_SHIPMODE CHAR(10) NOT NULL, + L_COMMENT VARCHAR(44) NOT NULL, + L_NULL VARCHAR + ) + UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER) + DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3 + PROPERTIES ( + "replication_num" = "1" + ) + """ + List tablets = get_tablets_from_table(tableName) + def loadTimes = [1, 10] + Map sizeRecords = ["apiSize":[], "mysqlSize":[], "cbsSize":[]] + for (int i in loadTimes){ + // stream load 1 time, record each size + repeate_stream_load_same_data(tableName, i, "regression/tpch/sf0.1/lineitem.tbl.gz") + def rows = sql_return_maparray "select count(*) as count from ${tableName};" + logger.info("table ${tableName} has ${rows[0]["count"]} rows") + // 加一下触发compaction的机制 + trigger_compaction(tablets) + + // 然后 sleep 1min, 等fe汇报完 + sleep(60 * 1000) + sql "select count(*) from ${tableName}" + + sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets)) + sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets)) + sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName)) + sleep(60 * 1000) + logger.info("after ${i} times stream load, mysqlSize is: ${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, storageSize is: ${sizeRecords["cbsSize"][-1]}") + } + + // expect mysqlSize == apiSize == storageSize + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0]) + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0]) + // expect load 1 times == load 10 times + assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["mysqlSize"][1]) + assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1]) + assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1]) + } + + main() +}