Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[test](regression-test) use unified trigger_and_wait_compaction metho… #45908

Merged
merged 1 commit into from
Dec 25, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
156 changes: 156 additions & 0 deletions regression-test/plugins/plugin_compaction.groovy
Original file line number Diff line number Diff line change
@@ -0,0 +1,156 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.

import org.apache.doris.regression.suite.Suite
import java.util.concurrent.TimeUnit
import org.awaitility.Awaitility;

Suite.metaClass.be_get_compaction_status{ String ip, String port, String tablet_id /* param */->
return curl("GET", String.format("http://%s:%s/api/compaction/run_status?tablet_id=%s", ip, port, tablet_id))
}

Suite.metaClass.be_get_overall_compaction_status{ String ip, String port /* param */->
return curl("GET", String.format("http://%s:%s/api/compaction/run_status", ip, port))
}

Suite.metaClass.be_show_tablet_status{ String ip, String port, String tablet_id /* param */->
return curl("GET", String.format("http://%s:%s/api/compaction/show?tablet_id=%s", ip, port, tablet_id))
}

Suite.metaClass._be_run_compaction = { String ip, String port, String tablet_id, String compact_type ->
return curl("POST", String.format("http://%s:%s/api/compaction/run?tablet_id=%s&compact_type=%s",
ip, port, tablet_id, compact_type))
}

Suite.metaClass.be_run_base_compaction = { String ip, String port, String tablet_id /* param */->
return _be_run_compaction(ip, port, tablet_id, "base")
}

logger.info("Added 'be_run_base_compaction' function to Suite")

Suite.metaClass.be_run_cumulative_compaction = { String ip, String port, String tablet_id /* param */->
return _be_run_compaction(ip, port, tablet_id, "cumulative")
}

logger.info("Added 'be_run_cumulative_compaction' function to Suite")

Suite.metaClass.be_run_full_compaction = { String ip, String port, String tablet_id /* param */->
return _be_run_compaction(ip, port, tablet_id, "full")
}

Suite.metaClass.be_run_full_compaction_by_table_id = { String ip, String port, String table_id /* param */->
return curl("POST", String.format("http://%s:%s/api/compaction/run?table_id=%s&compact_type=full", ip, port, table_id))
}

logger.info("Added 'be_run_full_compaction' function to Suite")

Suite.metaClass.trigger_and_wait_compaction = { String table_name, String compaction_type, int timeout_seconds=300 ->
if (!(compaction_type in ["cumulative", "base", "full"])) {
throw new IllegalArgumentException("invalid compaction type: ${compaction_type}, supported types: cumulative, base, full")
}

def backendId_to_backendIP = [:]
def backendId_to_backendHttpPort = [:]
getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort);
def tablets = sql_return_maparray """show tablets from ${table_name}"""
def exit_code, stdout, stderr

def auto_compaction_disabled = sql("show create table ${table_name}")[0][1].contains('"disable_auto_compaction" = "true"')
def is_time_series_compaction = sql("show create table ${table_name}")[0][1].contains('"compaction_policy" = "time_series"')

// 1. cache compaction status
def be_tablet_compaction_status = [:]
for (tablet in tablets) {
def be_host = backendId_to_backendIP["${tablet.BackendId}"]
def be_port = backendId_to_backendHttpPort["${tablet.BackendId}"]
(exit_code, stdout, stderr) = be_show_tablet_status(be_host, be_port, tablet.TabletId)
assert exit_code == 0: "get tablet status failed, exit code: ${exit_code}, stdout: ${stdout}, stderr: ${stderr}"

def tabletStatus = parseJson(stdout.trim())
be_tablet_compaction_status.put("${be_host}-${tablet.TabletId}", tabletStatus)
}
// 2. trigger compaction
def triggered_tablets = []
for (tablet in tablets) {
def be_host = backendId_to_backendIP["${tablet.BackendId}"]
def be_port = backendId_to_backendHttpPort["${tablet.BackendId}"]
switch (compaction_type) {
case "cumulative":
(exit_code, stdout, stderr) = be_run_cumulative_compaction(be_host, be_port, tablet.TabletId)
break
case "base":
(exit_code, stdout, stderr) = be_run_base_compaction(be_host, be_port, tablet.TabletId)
break
case "full":
(exit_code, stdout, stderr) = be_run_full_compaction(be_host, be_port, tablet.TabletId)
break
}
assert exit_code == 0: "trigger compaction failed, exit code: ${exit_code}, stdout: ${stdout}, stderr: ${stderr}"
def trigger_status = parseJson(stdout.trim())
if (trigger_status.status.toLowerCase() != "success") {
if (trigger_status.status.toLowerCase() == "already_exist") {
triggered_tablets.add(tablet) // compaction already in queue, treat it as successfully triggered
} else if (!auto_compaction_disabled) {
// ignore the error if auto compaction enabled
} else {
throw new Exception("trigger compaction failed, be host: ${be_host}, tablet id: ${tablet.TabletId}, status: ${trigger_status.status}")
}
} else {
triggered_tablets.add(tablet)
}
}

// 3. wait all compaction finished
def running = triggered_tablets.size() > 0
Awaitility.await().atMost(timeout_seconds, TimeUnit.SECONDS).pollInterval(1, TimeUnit.SECONDS).until(() -> {
for (tablet in triggered_tablets) {
def be_host = backendId_to_backendIP["${tablet.BackendId}"]
def be_port = backendId_to_backendHttpPort["${tablet.BackendId}"]

(exit_code, stdout, stderr) = be_get_compaction_status(be_host, be_port, tablet.TabletId)
assert exit_code == 0: "get compaction status failed, exit code: ${exit_code}, stdout: ${stdout}, stderr: ${stderr}"
def compactionStatus = parseJson(stdout.trim())
assert compactionStatus.status.toLowerCase() == "success": "compaction failed, be host: ${be_host}, tablet id: ${tablet.TabletId}, status: ${compactionStatus.status}"
// running is true means compaction is still running
running = compactionStatus.run_status

if (!isCloudMode() && !is_time_series_compaction) {
(exit_code, stdout, stderr) = be_show_tablet_status(be_host, be_port, tablet.TabletId)
assert exit_code == 0: "get tablet status failed, exit code: ${exit_code}, stdout: ${stdout}, stderr: ${stderr}"
def tabletStatus = parseJson(stdout.trim())
def oldStatus = be_tablet_compaction_status.get("${be_host}-${tablet.TabletId}")
// last compaction success time isn't updated, indicates compaction is not started(so we treat it as running and wait)
running = running || (oldStatus["last ${compaction_type} success time"] == tabletStatus["last ${compaction_type} success time"])
if (running) {
logger.info("compaction is still running, be host: ${be_host}, tablet id: ${tablet.TabletId}, run status: ${compactionStatus.run_status}, old status: ${oldStatus}, new status: ${tabletStatus}")
return false
}
} else {
// 1. cloud mode doesn't show compaction success time in tablet status for the time being,
// 2. time series compaction sometimes doesn't update compaction success time
// so we solely check run_status for these two cases
if (running) {
logger.info("compaction is still running, be host: ${be_host}, tablet id: ${tablet.TabletId}")
return false
}
}
}
return true
})

assert !running: "wait compaction timeout, be host: ${be_host}"
}
8 changes: 3 additions & 5 deletions regression-test/plugins/plugin_curl_requester.groovy
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import org.apache.http.conn.ConnectTimeoutException
import org.apache.http.conn.HttpHostConnectException
import org.codehaus.groovy.runtime.IOGroovyMethods


Suite.metaClass.http_client = { String method, String url /* param */ ->
Suite suite = delegate as Suite
if (method != "GET" && method != "POST") {
Expand All @@ -35,7 +36,7 @@ Suite.metaClass.http_client = { String method, String url /* param */ ->
if (!url || !(url =~ /^https?:\/\/.+/)) {
throw new Exception("Invalid url: ${url}")
}

Integer timeout = 300 // seconds
Integer maxRetries = 10
Integer retryCount = 0
Expand Down Expand Up @@ -119,7 +120,7 @@ Suite.metaClass.curl = { String method, String url, String body = null /* param
if (url.isBlank()) {
throw new Exception("invalid curl url, blank")
}

Integer timeout = 10; // 10 seconds;
Integer maxRetries = 10; // Maximum number of retries
Integer retryCount = 0; // Current retry count
Expand Down Expand Up @@ -161,10 +162,8 @@ Suite.metaClass.curl = { String method, String url, String body = null /* param

return [code, out, err]
}

logger.info("Added 'curl' function to Suite")


Suite.metaClass.show_be_config = { String ip, String port /*param */ ->
return curl("GET", String.format("http://%s:%s/api/show_config", ip, port))
}
Expand Down Expand Up @@ -231,7 +230,6 @@ Suite.metaClass.update_all_be_config = { String key, Object value ->

logger.info("Added 'update_all_be_config' function to Suite")


Suite.metaClass._be_report = { String ip, int port, String reportName ->
def url = "http://${ip}:${port}/api/report/${reportName}"
def result = Http.GET(url, true)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -88,32 +88,7 @@ suite('compaction_width_array_column', "p2") {
while (isOverLap && tryCnt < 3) {
isOverLap = false

for (def tablet in tablets) {
String tablet_id = tablet.TabletId
backend_id = tablet.BackendId
(code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id)
logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err)
assertEquals(code, 0)
def compactJson = parseJson(out.trim())
assertEquals("success", compactJson.status.toLowerCase())
}

// wait for all compactions done
for (def tablet in tablets) {
boolean running = true
do {
Thread.sleep(1000)
String tablet_id = tablet.TabletId
backend_id = tablet.BackendId
(code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id)
logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err)
assertEquals(code, 0)
def compactionStatus = parseJson(out.trim())
assertEquals("success", compactionStatus.status.toLowerCase())
running = compactionStatus.run_status
} while (running)
}

trigger_and_wait_compaction(tableName, "cumulative")
for (def tablet in tablets) {
String tablet_id = tablet.TabletId
(code, out, err) = curl("GET", tablet.CompactionStatus)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ suite("test_base_compaction") {

backend_id = backendId_to_backendIP.keySet()[0]
def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id))

logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err)
assertEquals(code, 0)
def configList = parseJson(out.trim())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,11 @@ import org.codehaus.groovy.runtime.IOGroovyMethods

suite("test_base_compaction_with_dup_key_max_file_size_limit", "p2") {
def tableName = "test_base_compaction_with_dup_key_max_file_size_limit"

// use customer table of tpch_sf100
def rows = 15000000
def load_tpch_sf100_customer = {
def uniqueID = Math.abs(UUID.randomUUID().hashCode()).toString()
def load_tpch_sf100_customer = {
def uniqueID = Math.abs(UUID.randomUUID().hashCode()).toString()
def rowCount = sql "select count(*) from ${tableName}"
def s3BucketName = getS3BucketName()
def s3WithProperties = """WITH S3 (
Expand Down Expand Up @@ -62,7 +62,7 @@ suite("test_base_compaction_with_dup_key_max_file_size_limit", "p2") {
}
sleep(5000)
}
}
}
}
try {
String backend_id;
Expand All @@ -72,7 +72,7 @@ suite("test_base_compaction_with_dup_key_max_file_size_limit", "p2") {

backend_id = backendId_to_backendIP.keySet()[0]
def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id))

logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err)
assertEquals(code, 0)
def configList = parseJson(out.trim())
Expand Down Expand Up @@ -106,29 +106,6 @@ suite("test_base_compaction_with_dup_key_max_file_size_limit", "p2") {
}
assertEquals(code, 0)
return out
}

def waitForCompaction = { be_host, be_http_port, tablet_id ->
// wait for all compactions done
boolean running = true
do {
Thread.sleep(1000)
StringBuilder sb = new StringBuilder();
sb.append("curl -X GET http://${be_host}:${be_http_port}")
sb.append("/api/compaction/run_status?tablet_id=")
sb.append(tablet_id)

String command = sb.toString()
logger.info(command)
process = command.execute()
code = process.waitFor()
out = process.getText()
logger.info("Get compaction status: code=" + code + ", out=" + out)
assertEquals(code, 0)
def compactionStatus = parseJson(out.trim())
assertEquals("success", compactionStatus.status.toLowerCase())
running = compactionStatus.run_status
} while (running)
}

sql """ DROP TABLE IF EXISTS ${tableName}; """
Expand All @@ -148,7 +125,7 @@ suite("test_base_compaction_with_dup_key_max_file_size_limit", "p2") {
PROPERTIES (
"replication_num" = "1", "disable_auto_compaction" = "true"
)
"""
"""

def tablet = (sql_return_maparray """ show tablets from ${tableName}; """)[0]
String tablet_id = tablet.TabletId
Expand All @@ -164,10 +141,7 @@ suite("test_base_compaction_with_dup_key_max_file_size_limit", "p2") {
// [0-1] 0
// [2-2] 1G nooverlapping
// cp: 3
assertTrue(triggerCompaction(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id],
"cumulative", tablet_id).contains("Success"));
waitForCompaction(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id)

trigger_and_wait_compaction(tableName, "cumulative")

// rowsets:
// [0-1] 0
Expand All @@ -180,21 +154,15 @@ suite("test_base_compaction_with_dup_key_max_file_size_limit", "p2") {
// [0-1] 0
// [2-2] 1G nooverlapping
// [3-3] 1G nooverlapping
// cp: 4
assertTrue(triggerCompaction(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id],
"cumulative", tablet_id).contains("Success"));
waitForCompaction(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id)

// cp: 4
trigger_and_wait_compaction(tableName, "cumulative")

// The conditions for base compaction have been satisfied.
// Since the size of first input rowset is 0, there is no file size limitation. (maybe fix it?)
// rowsets:
// [0-3] 2G nooverlapping
// cp: 4
assertTrue(triggerCompaction(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id],
"base", tablet_id).contains("Success"));
waitForCompaction(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id)

// cp: 4
trigger_and_wait_compaction(tableName, "base")

// rowsets:
// [0-3] 2G nooverlapping
Expand All @@ -206,17 +174,15 @@ suite("test_base_compaction_with_dup_key_max_file_size_limit", "p2") {
// [0-3] 2G nooverlapping
// [4-4] 1G nooverlapping
// cp: 5
assertTrue(triggerCompaction(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id],
"cumulative", tablet_id).contains("Success"));
waitForCompaction(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id)

trigger_and_wait_compaction(tableName, "cumulative")

// Due to the limit of config::base_compaction_dup_key_max_file_size_mbytes(1G),
// can not do base compaction, return E-808
// rowsets:
// [0-3] 2G nooverlapping
// [4-4] 1G nooverlapping
// cp: 5
// WHAT: replace with plugin and handle fail?
assertTrue(triggerCompaction(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id],
"base", tablet_id).contains("E-808"));

Expand Down
Loading
Loading