Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Branch 2.0 #107

Closed
wants to merge 52 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
52 commits
Select commit Hold shift + click to select a range
72d5372
Revert test_db_sync.groovy
JackDrogon Feb 26, 2024
02bd5b2
Add verbose log in test_bloomfilter_index.groovy checkShowTimesOf
JackDrogon Feb 26, 2024
64827a6
Add verbose log in test_bitmap_index checkShowTimesOf
JackDrogon Feb 27, 2024
1b723ba
Fix case db_sync by send TEST_* query to target (#52)
w41ter Mar 22, 2024
a626a04
Fix bloomfilter index case by wait add index finish (#56)
w41ter Apr 10, 2024
0b7373c
Fix materialized view
w41ter Apr 10, 2024
61513ba
fix sync db error when table name is keyword (#53)
lsy3993 Apr 7, 2024
85e395c
add option to start pprof server (#51)
lsy3993 Apr 8, 2024
9b3e86d
skip view when set binlog.enable (#50)
lsy3993 Apr 8, 2024
eb5770a
Make the constants configurable (#49)
ButterBright Apr 8, 2024
1aefa7b
Add golang workflows
w41ter Apr 16, 2024
adfed4f
Drop the table if restore is cancelled by signature not matched (#58)
w41ter Apr 16, 2024
d312265
Update changelog after 2.0.3.8
w41ter Apr 16, 2024
fc86dd1
Fix db sync with keywords table name
w41ter Apr 16, 2024
b421cd5
remove date_num in db_sync (#59)
lsy3993 Apr 17, 2024
32ff565
Release 2.0.3.9
w41ter Apr 23, 2024
74abfc6
Add tarball target
w41ter Apr 23, 2024
4fa87ea
Add LICENSE (#63)
w41ter Apr 24, 2024
78ce9f3
Save key time point for ccr job progress (#64)
w41ter Apr 26, 2024
ac617ba
Fix keyword name in ADD PARTITION (#65)
w41ter Apr 26, 2024
e81b0b7
Fix drop table with keyword (#67)
w41ter Apr 28, 2024
2527bc1
Fix drop table keyword name if table schema conflicts (#68)
w41ter Apr 28, 2024
444beff
Fix test keyword name test (#69)
w41ter Apr 28, 2024
15ddc34
create a handler to view job details (#66)
lsy3993 Apr 28, 2024
f58dfb8
create a handler can get job progress (#72)
lsy3993 Apr 29, 2024
04fe71e
Fix keyword name in job.go (#71)
w41ter Apr 28, 2024
527a47a
Add platform as tarball suffix
w41ter May 9, 2024
baf3cf7
Update CHANGELOG for 2.0.10-rc01
w41ter May 10, 2024
0a31ab0
add more desc about operations (#78)
lsy3993 May 14, 2024
efeed94
Add postgresql as meta db (#77)
lsy3993 May 15, 2024
51f13a8
Fix job progress key time point (#80)
w41ter May 22, 2024
46fee2e
Fix request redirection (#81)
lsy3993 May 22, 2024
51fb8b9
Fix binlog lost (#86)
w41ter May 24, 2024
13fa4f0
Save prev commit seq if progress is done (#91)
w41ter May 28, 2024
3ad8d48
Change default connect & rpc timeout to 10s,30s (#94)
w41ter May 29, 2024
05670cc
add connect and rpc timeout in start (#95)
lsy3993 May 29, 2024
a51bdc5
Log snapshot meta size (#103)
w41ter Jun 4, 2024
98998a5
Add drop partition usercase
w41ter May 11, 2024
4ea94dd
Fix could not found partition id after drop partition (#82)
w41ter May 22, 2024
89aa44f
Move sql related operations into Specer (#85)
w41ter May 24, 2024
1b36ac0
Fix add partition sql and add adding partition tests (#88)
w41ter May 27, 2024
5f2c0d5
Add adding partition test (#89)
w41ter May 27, 2024
b54eb03
remove mtmv case (#90)
lsy3993 May 28, 2024
bbd2c50
Correct add list partition sql (#92)
w41ter May 28, 2024
4afc5c2
Filter temp partition upserts and add insert overwrite case (#97)
w41ter May 30, 2024
4ece35a
Fix add partition case (#98)
w41ter May 30, 2024
d0383b4
Fix add partition sql (#99)
w41ter May 30, 2024
e7e8eb1
Fix insert overwrite cases (#101)
w41ter Jun 3, 2024
a4c5bbd
fix create/drop view bug (#100)
lsy3993 Jun 3, 2024
9c6ce44
Remove useless CreateTable (#102)
w41ter Jun 3, 2024
39af0da
Check backup/restore state for test insert overwrite case (#105)
w41ter Jun 6, 2024
61c6b22
Merge branch 'dev' into branch-2.0
lsy3993 Jun 7, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 5 additions & 7 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,12 +1,9 @@
# 更新日志

## dev

### Fix

- 修复因与上下游 FE 网络中断而触发 full sync 的问题
## v 2.0.10.0

## v 2.1.3/2.0.3.10
对应 doris 2.0.10.

### Feature

Expand All @@ -20,12 +17,13 @@

## v 2.0.3.9

配合 doris 2.0.9 版本
配合 doris 2.0.9 版本.

### Feature

- 添加选项以启动 pprof server
- 允许配置 rpc 合 connection 超时
- 允许配置 rpc 和 connection 超时


### Fix

Expand Down
2 changes: 1 addition & 1 deletion pkg/ccr/base/spec.go
Original file line number Diff line number Diff line change
Expand Up @@ -677,7 +677,7 @@ func (s *Spec) CheckRestoreFinished(snapshotName string) (bool, error) {
}

func (s *Spec) GetRestoreSignatureNotMatchedTable(snapshotName string) (string, error) {
log.Debugf("get restore signature not matched table, spec: %s, snapshot: %s", s.String(), snapshotName)
log.Debugf("get restore signature not matched table, spec: %s, datebase: %s, snapshot: %s", s.String(), s.Database, snapshotName)

for i := 0; i < MAX_CHECK_RETRY_TIMES; i++ {
if restoreState, status, err := s.checkRestoreFinished(snapshotName); err != nil {
Expand Down
69 changes: 33 additions & 36 deletions regression-test/suites/db-sync/test_db_sync.groovy
Original file line number Diff line number Diff line change
Expand Up @@ -20,26 +20,20 @@ suite("test_db_sync") {
def syncerAddress = "127.0.0.1:9190"
def test_num = 0
def insert_num = 5
def date_num = "2021-01-02"
def sync_gap_time = 5000

def createUniqueTable = { tableName ->
sql """
CREATE TABLE if NOT EXISTS ${tableName}
(
`test` INT,
`id` INT,
`date_time` date NOT NULL
`id` INT
)
ENGINE=OLAP
UNIQUE KEY(`test`, `id`, `date_time`)
AUTO PARTITION BY RANGE (date_trunc(`date_time`, 'day'))
(
)
DISTRIBUTED BY HASH(id) BUCKETS AUTO
UNIQUE KEY(`test`, `id`)
DISTRIBUTED BY HASH(id) BUCKETS 1
PROPERTIES (
"replication_allocation" = "tag.location.default: 1",
"estimate_partition_size" = "10G",
"binlog.enable" = "true"
)
"""
Expand All @@ -49,43 +43,33 @@ suite("test_db_sync") {
CREATE TABLE if NOT EXISTS ${tableName}
(
`test` INT,
`date_time` date NOT NULL,
`last` INT REPLACE DEFAULT "0",
`cost` INT SUM DEFAULT "0",
`max` INT MAX DEFAULT "0",
`min` INT MIN DEFAULT "0"
)
ENGINE=OLAP
AGGREGATE KEY(`test`, `date_time`)
AUTO PARTITION BY RANGE (date_trunc(`date_time`, 'day'))
(
)
DISTRIBUTED BY HASH(`test`) BUCKETS AUTO
AGGREGATE KEY(`test`)
DISTRIBUTED BY HASH(`test`) BUCKETS 1
PROPERTIES (
"replication_allocation" = "tag.location.default: 1",
"estimate_partition_size" = "10G",
"binlog.enable" = "true"
)
"""
}

def createDuplicateTable = { tableName ->
sql """
sql """
CREATE TABLE if NOT EXISTS ${tableName}
(
`test` INT,
`id` INT,
`date_time` date NOT NULL
`id` INT
)
ENGINE=OLAP
DUPLICATE KEY(`test`, `id`, `date_time`)
AUTO PARTITION BY RANGE (date_trunc(`date_time`, 'day'))
(
)
DISTRIBUTED BY HASH(id) BUCKETS AUTO
DUPLICATE KEY(`test`, `id`)
DISTRIBUTED BY HASH(id) BUCKETS 1
PROPERTIES (
"replication_allocation" = "tag.location.default: 1",
"estimate_partition_size" = "10G",
"binlog.enable" = "true"
)
"""
Expand Down Expand Up @@ -163,25 +147,26 @@ suite("test_db_sync") {
createUniqueTable(tableUnique0)
for (int index = 0; index < insert_num; index++) {
sql """
INSERT INTO ${tableUnique0} VALUES (${test_num}, ${index}, '${date_num}')
INSERT INTO ${tableUnique0} VALUES (${test_num}, ${index})
"""
}

createAggergateTable(tableAggregate0)
for (int index = 0; index < insert_num; index++) {
sql """
INSERT INTO ${tableAggregate0} VALUES (${test_num}, '${date_num}', ${index}, ${index}, ${index}, ${index})
INSERT INTO ${tableAggregate0} VALUES (${test_num}, ${index}, ${index}, ${index}, ${index})
"""
}

createDuplicateTable(tableDuplicate0)
for (int index = 0; index < insert_num; index++) {
sql """
INSERT INTO ${tableDuplicate0} VALUES (0, 99, '${date_num}')
INSERT INTO ${tableDuplicate0} VALUES (0, 99)
"""
}

sql "ALTER DATABASE ${context.dbName} SET properties (\"binlog.enable\" = \"true\")"
sql "sync"

String response
httpTest {
Expand Down Expand Up @@ -209,20 +194,21 @@ suite("test_db_sync") {
test_num = 1
for (int index = 0; index < insert_num; index++) {
sql """
INSERT INTO ${tableUnique0} VALUES (${test_num}, ${index}, '${date_num}')
INSERT INTO ${tableUnique0} VALUES (${test_num}, ${index})
"""
}
for (int index = 0; index < insert_num; index++) {
sql """
INSERT INTO ${tableAggregate0} VALUES (${test_num}, '${date_num}', ${index}, ${index}, ${index}, ${index})
INSERT INTO ${tableAggregate0} VALUES (${test_num}, ${index}, ${index}, ${index}, ${index})
"""
}
for (int index = 0; index < insert_num; index++) {
sql """
INSERT INTO ${tableDuplicate0} VALUES (0, 99, '${date_num}')
INSERT INTO ${tableDuplicate0} VALUES (0, 99)
"""
}

sql "sync"
assertTrue(checkSelectTimesOf("SELECT * FROM ${tableUnique0} WHERE test=${test_num}",
insert_num, 30))
assertTrue(checkSelectTimesOf("SELECT * FROM ${tableAggregate0} WHERE test=${test_num}",
Expand All @@ -246,17 +232,22 @@ suite("test_db_sync") {

for (int index = 0; index < insert_num; index++) {
sql """
INSERT INTO ${tableUnique1} VALUES (${test_num}, ${index}, '${date_num}')
INSERT INTO ${tableUnique1} VALUES (${test_num}, ${index})
"""
}
for (int index = 0; index < insert_num; index++) {
sql """
INSERT INTO ${tableAggregate1} VALUES (${test_num}, ${index}, ${index}, ${index}, ${index})
"""
}
for (int index = 0; index < insert_num; index++) {
sql """
INSERT INTO ${tableAggregate1} VALUES (${test_num}, '${date_num}', ${index}, ${index}, ${index}, ${index})
INSERT INTO ${tableDuplicate1} VALUES (0, 99)
"""
}
for (int index = 0; index < insert_num; index++) {
sql """
INSERT INTO ${tableDuplicate1} VALUES (0, 99, '${date_num}')
INSERT INTO ${keywordTableName} VALUES (${test_num}, ${index})
"""
}
for (int index = 0; index < insert_num; index++) {
Expand All @@ -265,6 +256,7 @@ suite("test_db_sync") {
"""
}

sql "sync"
assertTrue(checkShowTimesOf("SHOW CREATE TABLE TEST_${context.dbName}.${tableUnique1}",
exist, 30, "target"))
assertTrue(checkSelectTimesOf("SELECT * FROM ${tableUnique1} WHERE test=${test_num}",
Expand All @@ -291,6 +283,7 @@ suite("test_db_sync") {
sql "DROP TABLE ${tableDuplicate1}"
sql "DROP TABLE ${keywordTableName}"

sql "sync"
assertTrue(checkShowTimesOf("SHOW TABLES LIKE '${tableUnique1}'",
notExist, 30, "target"))
assertTrue(checkShowTimesOf("SHOW TABLES LIKE '${tableAggregate1}'",
Expand All @@ -313,10 +306,11 @@ suite("test_db_sync") {
test_num = 4
for (int index = 0; index < insert_num; index++) {
sql """
INSERT INTO ${tableUnique0} VALUES (${test_num}, ${index}, '${date_num}')
INSERT INTO ${tableUnique0} VALUES (${test_num}, ${index})
"""
}

sql "sync"
assertTrue(!checkSelectTimesOf("SELECT * FROM ${tableUnique0} WHERE test=${test_num}",
insert_num, 3))

Expand All @@ -328,6 +322,7 @@ suite("test_db_sync") {
op "post"
result response
}
sql "sync"
assertTrue(checkSelectTimesOf("SELECT * FROM ${tableUnique0} WHERE test=${test_num}",
insert_num, 30))

Expand Down Expand Up @@ -357,6 +352,7 @@ suite("test_db_sync") {
assertTrue(desynced)
}

sql "sync"
checkDesynced(tableUnique0)
checkDesynced(tableAggregate0)
checkDesynced(tableDuplicate0)
Expand All @@ -375,10 +371,11 @@ suite("test_db_sync") {

for (int index = 0; index < insert_num; index++) {
sql """
INSERT INTO ${tableUnique0} VALUES (${test_num}, ${index}, '${date_num}')
INSERT INTO ${tableUnique0} VALUES (${test_num}, ${index})
"""
}

sql "sync"
assertTrue(!checkSelectTimesOf("SELECT * FROM ${tableUnique0} WHERE test=${test_num}",
insert_num, 5))
}
3 changes: 3 additions & 0 deletions regression-test/suites/table-sync/test_bitmap_index.groovy
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,9 @@ suite("test_bitmap_index") {
} else {
res = target_sql "${sqlString}"
}

logger.info("res: ${res}")

if (myClosure.call(res)) {
ret = true
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ suite("test_bloomfilter_index") {
} else {
res = target_sql "${sqlString}"
}
logger.info("res: ${res}")
if (myClosure.call(res)) {
ret = true
}
Expand Down
Loading