From a17ef300bdf67e0a4ea42054b3186e3bbf65a7aa Mon Sep 17 00:00:00 2001 From: w41ter Date: Thu, 8 Aug 2024 10:37:01 +0800 Subject: [PATCH] Restore snapshot with clean_restore during fullsync --- pkg/ccr/job.go | 6 +- pkg/rpc/fe.go | 27 +- .../backendservice/BackendService.go | 225 +++ .../backendservice/k-BackendService.go | 153 +++ pkg/rpc/kitex_gen/descriptors/Descriptors.go | 5 + .../frontendservice/FrontendService.go | 1217 ++++++++++++++++- .../frontendservice/k-FrontendService.go | 822 ++++++++++- .../PaloInternalService.go | 504 +++++++ .../k-PaloInternalService.go | 364 +++++ pkg/rpc/kitex_gen/plannodes/PlanNodes.go | 122 +- pkg/rpc/kitex_gen/plannodes/k-PlanNodes.go | 75 + pkg/rpc/thrift/AgentService.thrift | 2 +- pkg/rpc/thrift/BackendService.thrift | 3 + pkg/rpc/thrift/Descriptors.thrift | 3 +- pkg/rpc/thrift/FrontendService.thrift | 19 +- pkg/rpc/thrift/PaloInternalService.thrift | 22 +- pkg/rpc/thrift/PlanNodes.thrift | 1 + .../test_db_sync_clean_restore.groovy | 261 ++++ 18 files changed, 3702 insertions(+), 129 deletions(-) create mode 100644 regression-test/suites/db-sync-clean-restore/test_db_sync_clean_restore.groovy diff --git a/pkg/ccr/job.go b/pkg/ccr/job.go index d5a52aa5..cd8315b7 100644 --- a/pkg/ccr/job.go +++ b/pkg/ccr/job.go @@ -410,7 +410,8 @@ func (j *Job) partialSync() error { } tableRefs = append(tableRefs, tableRef) } - restoreResp, err := destRpc.RestoreSnapshot(dest, tableRefs, restoreSnapshotName, snapshotResp) + cleanRestore := false // DO NOT drop exists tables and partitions + restoreResp, err := destRpc.RestoreSnapshot(dest, tableRefs, restoreSnapshotName, snapshotResp, cleanRestore) if err != nil { return err } @@ -608,7 +609,8 @@ func (j *Job) fullSync() error { } tableRefs = append(tableRefs, tableRef) } - restoreResp, err := destRpc.RestoreSnapshot(dest, tableRefs, restoreSnapshotName, snapshotResp) + cleanRestore := true // drop exists tables and partitions. + restoreResp, err := destRpc.RestoreSnapshot(dest, tableRefs, restoreSnapshotName, snapshotResp, cleanRestore) if err != nil { return err } diff --git a/pkg/rpc/fe.go b/pkg/rpc/fe.go index 9d9302b3..b14743d2 100644 --- a/pkg/rpc/fe.go +++ b/pkg/rpc/fe.go @@ -78,7 +78,7 @@ type IFeRpc interface { GetBinlog(*base.Spec, int64) (*festruct.TGetBinlogResult_, error) GetBinlogLag(*base.Spec, int64) (*festruct.TGetBinlogLagResult_, error) GetSnapshot(*base.Spec, string) (*festruct.TGetSnapshotResult_, error) - RestoreSnapshot(*base.Spec, []*festruct.TTableRef, string, *festruct.TGetSnapshotResult_) (*festruct.TRestoreSnapshotResult_, error) + RestoreSnapshot(*base.Spec, []*festruct.TTableRef, string, *festruct.TGetSnapshotResult_, bool) (*festruct.TRestoreSnapshotResult_, error) GetMasterToken(*base.Spec) (*festruct.TGetMasterTokenResult_, error) GetDbMeta(spec *base.Spec) (*festruct.TGetMetaResult_, error) GetTableMeta(spec *base.Spec, tableIds []int64) (*festruct.TGetMetaResult_, error) @@ -384,10 +384,10 @@ func (rpc *FeRpc) GetSnapshot(spec *base.Spec, labelName string) (*festruct.TGet return convertResult[festruct.TGetSnapshotResult_](result, err) } -func (rpc *FeRpc) RestoreSnapshot(spec *base.Spec, tableRefs []*festruct.TTableRef, label string, snapshotResult *festruct.TGetSnapshotResult_) (*festruct.TRestoreSnapshotResult_, error) { +func (rpc *FeRpc) RestoreSnapshot(spec *base.Spec, tableRefs []*festruct.TTableRef, label string, snapshotResult *festruct.TGetSnapshotResult_, cleanRestore bool) (*festruct.TRestoreSnapshotResult_, error) { // return rpc.masterClient.RestoreSnapshot(spec, tableRefs, label, snapshotResult) caller := func(client IFeRpc) (resultType, error) { - return client.RestoreSnapshot(spec, tableRefs, label, snapshotResult) + return client.RestoreSnapshot(spec, tableRefs, label, snapshotResult, cleanRestore) } result, err := rpc.callWithMasterRedirect(caller) return convertResult[festruct.TRestoreSnapshotResult_](result, err) @@ -664,7 +664,7 @@ func (rpc *singleFeClient) GetSnapshot(spec *base.Spec, labelName string) (*fest // } // // Restore Snapshot rpc -func (rpc *singleFeClient) RestoreSnapshot(spec *base.Spec, tableRefs []*festruct.TTableRef, label string, snapshotResult *festruct.TGetSnapshotResult_) (*festruct.TRestoreSnapshotResult_, error) { +func (rpc *singleFeClient) RestoreSnapshot(spec *base.Spec, tableRefs []*festruct.TTableRef, label string, snapshotResult *festruct.TGetSnapshotResult_, cleanRestore bool) (*festruct.TRestoreSnapshotResult_, error) { // NOTE: ignore meta, because it's too large log.Debugf("Call RestoreSnapshot, addr: %s, spec: %s", rpc.Address(), spec) @@ -673,19 +673,20 @@ func (rpc *singleFeClient) RestoreSnapshot(spec *base.Spec, tableRefs []*festruc properties := make(map[string]string) properties["reserve_replica"] = "true" req := &festruct.TRestoreSnapshotRequest{ - Table: &spec.Table, - LabelName: &label, - RepoName: &repoName, - TableRefs: tableRefs, - Properties: properties, - Meta: snapshotResult.GetMeta(), - JobInfo: snapshotResult.GetJobInfo(), + Table: &spec.Table, + LabelName: &label, + RepoName: &repoName, + TableRefs: tableRefs, + Properties: properties, + Meta: snapshotResult.GetMeta(), + JobInfo: snapshotResult.GetJobInfo(), + CleanRestore: &cleanRestore, } setAuthInfo(req, spec) // NOTE: ignore meta, because it's too large - log.Debugf("RestoreSnapshotRequest user %s, db %s, table %s, label name %s, properties %v", - req.GetUser(), req.GetDb(), req.GetTable(), req.GetLabelName(), properties) + log.Debugf("RestoreSnapshotRequest user %s, db %s, table %s, label name %s, properties %v, clean restore: %v", + req.GetUser(), req.GetDb(), req.GetTable(), req.GetLabelName(), properties, cleanRestore) if resp, err := client.RestoreSnapshot(context.Background(), req); err != nil { return nil, xerror.Wrapf(err, xerror.RPC, "RestoreSnapshot failed") } else { diff --git a/pkg/rpc/kitex_gen/backendservice/BackendService.go b/pkg/rpc/kitex_gen/backendservice/BackendService.go index abfb1165..26bbf661 100644 --- a/pkg/rpc/kitex_gen/backendservice/BackendService.go +++ b/pkg/rpc/kitex_gen/backendservice/BackendService.go @@ -11559,6 +11559,9 @@ type TWorkloadGroupInfo struct { MinRemoteScanThreadNum *int32 `thrift:"min_remote_scan_thread_num,11,optional" frugal:"11,optional,i32" json:"min_remote_scan_thread_num,omitempty"` SpillThresholdLowWatermark *int32 `thrift:"spill_threshold_low_watermark,12,optional" frugal:"12,optional,i32" json:"spill_threshold_low_watermark,omitempty"` SpillThresholdHighWatermark *int32 `thrift:"spill_threshold_high_watermark,13,optional" frugal:"13,optional,i32" json:"spill_threshold_high_watermark,omitempty"` + ReadBytesPerSecond *int64 `thrift:"read_bytes_per_second,14,optional" frugal:"14,optional,i64" json:"read_bytes_per_second,omitempty"` + RemoteReadBytesPerSecond *int64 `thrift:"remote_read_bytes_per_second,15,optional" frugal:"15,optional,i64" json:"remote_read_bytes_per_second,omitempty"` + Tag *string `thrift:"tag,16,optional" frugal:"16,optional,string" json:"tag,omitempty"` } func NewTWorkloadGroupInfo() *TWorkloadGroupInfo { @@ -11684,6 +11687,33 @@ func (p *TWorkloadGroupInfo) GetSpillThresholdHighWatermark() (v int32) { } return *p.SpillThresholdHighWatermark } + +var TWorkloadGroupInfo_ReadBytesPerSecond_DEFAULT int64 + +func (p *TWorkloadGroupInfo) GetReadBytesPerSecond() (v int64) { + if !p.IsSetReadBytesPerSecond() { + return TWorkloadGroupInfo_ReadBytesPerSecond_DEFAULT + } + return *p.ReadBytesPerSecond +} + +var TWorkloadGroupInfo_RemoteReadBytesPerSecond_DEFAULT int64 + +func (p *TWorkloadGroupInfo) GetRemoteReadBytesPerSecond() (v int64) { + if !p.IsSetRemoteReadBytesPerSecond() { + return TWorkloadGroupInfo_RemoteReadBytesPerSecond_DEFAULT + } + return *p.RemoteReadBytesPerSecond +} + +var TWorkloadGroupInfo_Tag_DEFAULT string + +func (p *TWorkloadGroupInfo) GetTag() (v string) { + if !p.IsSetTag() { + return TWorkloadGroupInfo_Tag_DEFAULT + } + return *p.Tag +} func (p *TWorkloadGroupInfo) SetId(val *int64) { p.Id = val } @@ -11723,6 +11753,15 @@ func (p *TWorkloadGroupInfo) SetSpillThresholdLowWatermark(val *int32) { func (p *TWorkloadGroupInfo) SetSpillThresholdHighWatermark(val *int32) { p.SpillThresholdHighWatermark = val } +func (p *TWorkloadGroupInfo) SetReadBytesPerSecond(val *int64) { + p.ReadBytesPerSecond = val +} +func (p *TWorkloadGroupInfo) SetRemoteReadBytesPerSecond(val *int64) { + p.RemoteReadBytesPerSecond = val +} +func (p *TWorkloadGroupInfo) SetTag(val *string) { + p.Tag = val +} var fieldIDToName_TWorkloadGroupInfo = map[int16]string{ 1: "id", @@ -11738,6 +11777,9 @@ var fieldIDToName_TWorkloadGroupInfo = map[int16]string{ 11: "min_remote_scan_thread_num", 12: "spill_threshold_low_watermark", 13: "spill_threshold_high_watermark", + 14: "read_bytes_per_second", + 15: "remote_read_bytes_per_second", + 16: "tag", } func (p *TWorkloadGroupInfo) IsSetId() bool { @@ -11792,6 +11834,18 @@ func (p *TWorkloadGroupInfo) IsSetSpillThresholdHighWatermark() bool { return p.SpillThresholdHighWatermark != nil } +func (p *TWorkloadGroupInfo) IsSetReadBytesPerSecond() bool { + return p.ReadBytesPerSecond != nil +} + +func (p *TWorkloadGroupInfo) IsSetRemoteReadBytesPerSecond() bool { + return p.RemoteReadBytesPerSecond != nil +} + +func (p *TWorkloadGroupInfo) IsSetTag() bool { + return p.Tag != nil +} + func (p *TWorkloadGroupInfo) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -11915,6 +11969,30 @@ func (p *TWorkloadGroupInfo) Read(iprot thrift.TProtocol) (err error) { } else if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } + case 14: + if fieldTypeId == thrift.I64 { + if err = p.ReadField14(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 15: + if fieldTypeId == thrift.I64 { + if err = p.ReadField15(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 16: + if fieldTypeId == thrift.STRING { + if err = p.ReadField16(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError @@ -12087,6 +12165,39 @@ func (p *TWorkloadGroupInfo) ReadField13(iprot thrift.TProtocol) error { p.SpillThresholdHighWatermark = _field return nil } +func (p *TWorkloadGroupInfo) ReadField14(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.ReadBytesPerSecond = _field + return nil +} +func (p *TWorkloadGroupInfo) ReadField15(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.RemoteReadBytesPerSecond = _field + return nil +} +func (p *TWorkloadGroupInfo) ReadField16(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Tag = _field + return nil +} func (p *TWorkloadGroupInfo) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 @@ -12146,6 +12257,18 @@ func (p *TWorkloadGroupInfo) Write(oprot thrift.TProtocol) (err error) { fieldId = 13 goto WriteFieldError } + if err = p.writeField14(oprot); err != nil { + fieldId = 14 + goto WriteFieldError + } + if err = p.writeField15(oprot); err != nil { + fieldId = 15 + goto WriteFieldError + } + if err = p.writeField16(oprot); err != nil { + fieldId = 16 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -12411,6 +12534,63 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) } +func (p *TWorkloadGroupInfo) writeField14(oprot thrift.TProtocol) (err error) { + if p.IsSetReadBytesPerSecond() { + if err = oprot.WriteFieldBegin("read_bytes_per_second", thrift.I64, 14); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.ReadBytesPerSecond); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 14 end error: ", p), err) +} + +func (p *TWorkloadGroupInfo) writeField15(oprot thrift.TProtocol) (err error) { + if p.IsSetRemoteReadBytesPerSecond() { + if err = oprot.WriteFieldBegin("remote_read_bytes_per_second", thrift.I64, 15); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.RemoteReadBytesPerSecond); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 15 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 15 end error: ", p), err) +} + +func (p *TWorkloadGroupInfo) writeField16(oprot thrift.TProtocol) (err error) { + if p.IsSetTag() { + if err = oprot.WriteFieldBegin("tag", thrift.STRING, 16); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Tag); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 16 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 16 end error: ", p), err) +} + func (p *TWorkloadGroupInfo) String() string { if p == nil { return "" @@ -12464,6 +12644,15 @@ func (p *TWorkloadGroupInfo) DeepEqual(ano *TWorkloadGroupInfo) bool { if !p.Field13DeepEqual(ano.SpillThresholdHighWatermark) { return false } + if !p.Field14DeepEqual(ano.ReadBytesPerSecond) { + return false + } + if !p.Field15DeepEqual(ano.RemoteReadBytesPerSecond) { + return false + } + if !p.Field16DeepEqual(ano.Tag) { + return false + } return true } @@ -12623,6 +12812,42 @@ func (p *TWorkloadGroupInfo) Field13DeepEqual(src *int32) bool { } return true } +func (p *TWorkloadGroupInfo) Field14DeepEqual(src *int64) bool { + + if p.ReadBytesPerSecond == src { + return true + } else if p.ReadBytesPerSecond == nil || src == nil { + return false + } + if *p.ReadBytesPerSecond != *src { + return false + } + return true +} +func (p *TWorkloadGroupInfo) Field15DeepEqual(src *int64) bool { + + if p.RemoteReadBytesPerSecond == src { + return true + } else if p.RemoteReadBytesPerSecond == nil || src == nil { + return false + } + if *p.RemoteReadBytesPerSecond != *src { + return false + } + return true +} +func (p *TWorkloadGroupInfo) Field16DeepEqual(src *string) bool { + + if p.Tag == src { + return true + } else if p.Tag == nil || src == nil { + return false + } + if strings.Compare(*p.Tag, *src) != 0 { + return false + } + return true +} type TWorkloadCondition struct { MetricName *TWorkloadMetricType `thrift:"metric_name,1,optional" frugal:"1,optional,TWorkloadMetricType" json:"metric_name,omitempty"` diff --git a/pkg/rpc/kitex_gen/backendservice/k-BackendService.go b/pkg/rpc/kitex_gen/backendservice/k-BackendService.go index c71a244f..4a23b77b 100644 --- a/pkg/rpc/kitex_gen/backendservice/k-BackendService.go +++ b/pkg/rpc/kitex_gen/backendservice/k-BackendService.go @@ -8856,6 +8856,48 @@ func (p *TWorkloadGroupInfo) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 14: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField14(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 15: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField15(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 16: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField16(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -9060,6 +9102,45 @@ func (p *TWorkloadGroupInfo) FastReadField13(buf []byte) (int, error) { return offset, nil } +func (p *TWorkloadGroupInfo) FastReadField14(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ReadBytesPerSecond = &v + + } + return offset, nil +} + +func (p *TWorkloadGroupInfo) FastReadField15(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.RemoteReadBytesPerSecond = &v + + } + return offset, nil +} + +func (p *TWorkloadGroupInfo) FastReadField16(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Tag = &v + + } + return offset, nil +} + // for compatibility func (p *TWorkloadGroupInfo) FastWrite(buf []byte) int { return 0 @@ -9080,8 +9161,11 @@ func (p *TWorkloadGroupInfo) FastWriteNocopy(buf []byte, binaryWriter bthrift.Bi offset += p.fastWriteField11(buf[offset:], binaryWriter) offset += p.fastWriteField12(buf[offset:], binaryWriter) offset += p.fastWriteField13(buf[offset:], binaryWriter) + offset += p.fastWriteField14(buf[offset:], binaryWriter) + offset += p.fastWriteField15(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField16(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -9105,6 +9189,9 @@ func (p *TWorkloadGroupInfo) BLength() int { l += p.field11Length() l += p.field12Length() l += p.field13Length() + l += p.field14Length() + l += p.field15Length() + l += p.field16Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -9254,6 +9341,39 @@ func (p *TWorkloadGroupInfo) fastWriteField13(buf []byte, binaryWriter bthrift.B return offset } +func (p *TWorkloadGroupInfo) fastWriteField14(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetReadBytesPerSecond() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "read_bytes_per_second", thrift.I64, 14) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.ReadBytesPerSecond) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWorkloadGroupInfo) fastWriteField15(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetRemoteReadBytesPerSecond() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "remote_read_bytes_per_second", thrift.I64, 15) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.RemoteReadBytesPerSecond) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TWorkloadGroupInfo) fastWriteField16(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTag() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tag", thrift.STRING, 16) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Tag) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TWorkloadGroupInfo) field1Length() int { l := 0 if p.IsSetId() { @@ -9397,6 +9517,39 @@ func (p *TWorkloadGroupInfo) field13Length() int { return l } +func (p *TWorkloadGroupInfo) field14Length() int { + l := 0 + if p.IsSetReadBytesPerSecond() { + l += bthrift.Binary.FieldBeginLength("read_bytes_per_second", thrift.I64, 14) + l += bthrift.Binary.I64Length(*p.ReadBytesPerSecond) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWorkloadGroupInfo) field15Length() int { + l := 0 + if p.IsSetRemoteReadBytesPerSecond() { + l += bthrift.Binary.FieldBeginLength("remote_read_bytes_per_second", thrift.I64, 15) + l += bthrift.Binary.I64Length(*p.RemoteReadBytesPerSecond) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TWorkloadGroupInfo) field16Length() int { + l := 0 + if p.IsSetTag() { + l += bthrift.Binary.FieldBeginLength("tag", thrift.STRING, 16) + l += bthrift.Binary.StringLengthNocopy(*p.Tag) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TWorkloadCondition) FastRead(buf []byte) (int, error) { var err error var offset int diff --git a/pkg/rpc/kitex_gen/descriptors/Descriptors.go b/pkg/rpc/kitex_gen/descriptors/Descriptors.go index daf6fd5e..d412605f 100644 --- a/pkg/rpc/kitex_gen/descriptors/Descriptors.go +++ b/pkg/rpc/kitex_gen/descriptors/Descriptors.go @@ -137,6 +137,7 @@ const ( TSchemaTableType_SCH_PROCS_PRIV TSchemaTableType = 45 TSchemaTableType_SCH_WORKLOAD_POLICY TSchemaTableType = 46 TSchemaTableType_SCH_TABLE_OPTIONS TSchemaTableType = 47 + TSchemaTableType_SCH_WORKLOAD_GROUP_PRIVILEGES TSchemaTableType = 48 ) func (p TSchemaTableType) String() string { @@ -237,6 +238,8 @@ func (p TSchemaTableType) String() string { return "SCH_WORKLOAD_POLICY" case TSchemaTableType_SCH_TABLE_OPTIONS: return "SCH_TABLE_OPTIONS" + case TSchemaTableType_SCH_WORKLOAD_GROUP_PRIVILEGES: + return "SCH_WORKLOAD_GROUP_PRIVILEGES" } return "" } @@ -339,6 +342,8 @@ func TSchemaTableTypeFromString(s string) (TSchemaTableType, error) { return TSchemaTableType_SCH_WORKLOAD_POLICY, nil case "SCH_TABLE_OPTIONS": return TSchemaTableType_SCH_TABLE_OPTIONS, nil + case "SCH_WORKLOAD_GROUP_PRIVILEGES": + return TSchemaTableType_SCH_WORKLOAD_GROUP_PRIVILEGES, nil } return TSchemaTableType(0), fmt.Errorf("not a valid TSchemaTableType string") } diff --git a/pkg/rpc/kitex_gen/frontendservice/FrontendService.go b/pkg/rpc/kitex_gen/frontendservice/FrontendService.go index c32dcf5f..478047d9 100644 --- a/pkg/rpc/kitex_gen/frontendservice/FrontendService.go +++ b/pkg/rpc/kitex_gen/frontendservice/FrontendService.go @@ -310,12 +310,13 @@ func (p *TFrontendPingFrontendStatusCode) Value() (driver.Value, error) { type TSchemaTableName int64 const ( - TSchemaTableName_METADATA_TABLE TSchemaTableName = 1 - TSchemaTableName_ACTIVE_QUERIES TSchemaTableName = 2 - TSchemaTableName_WORKLOAD_GROUPS TSchemaTableName = 3 - TSchemaTableName_ROUTINES_INFO TSchemaTableName = 4 - TSchemaTableName_WORKLOAD_SCHEDULE_POLICY TSchemaTableName = 5 - TSchemaTableName_TABLE_OPTIONS TSchemaTableName = 6 + TSchemaTableName_METADATA_TABLE TSchemaTableName = 1 + TSchemaTableName_ACTIVE_QUERIES TSchemaTableName = 2 + TSchemaTableName_WORKLOAD_GROUPS TSchemaTableName = 3 + TSchemaTableName_ROUTINES_INFO TSchemaTableName = 4 + TSchemaTableName_WORKLOAD_SCHEDULE_POLICY TSchemaTableName = 5 + TSchemaTableName_TABLE_OPTIONS TSchemaTableName = 6 + TSchemaTableName_WORKLOAD_GROUP_PRIVILEGES TSchemaTableName = 7 ) func (p TSchemaTableName) String() string { @@ -332,6 +333,8 @@ func (p TSchemaTableName) String() string { return "WORKLOAD_SCHEDULE_POLICY" case TSchemaTableName_TABLE_OPTIONS: return "TABLE_OPTIONS" + case TSchemaTableName_WORKLOAD_GROUP_PRIVILEGES: + return "WORKLOAD_GROUP_PRIVILEGES" } return "" } @@ -350,6 +353,8 @@ func TSchemaTableNameFromString(s string) (TSchemaTableName, error) { return TSchemaTableName_WORKLOAD_SCHEDULE_POLICY, nil case "TABLE_OPTIONS": return TSchemaTableName_TABLE_OPTIONS, nil + case "WORKLOAD_GROUP_PRIVILEGES": + return TSchemaTableName_WORKLOAD_GROUP_PRIVILEGES, nil } return TSchemaTableName(0), fmt.Errorf("not a valid TSchemaTableName string") } @@ -4087,7 +4092,7 @@ func (p *TShowVariableRequest) Field2DeepEqual(src types.TVarType) bool { } type TShowVariableResult_ struct { - Variables map[string]string `thrift:"variables,1,required" frugal:"1,required,map" json:"variables"` + Variables [][]string `thrift:"variables,1,required" frugal:"1,required,list>" json:"variables"` } func NewTShowVariableResult_() *TShowVariableResult_ { @@ -4097,10 +4102,10 @@ func NewTShowVariableResult_() *TShowVariableResult_ { func (p *TShowVariableResult_) InitDefault() { } -func (p *TShowVariableResult_) GetVariables() (v map[string]string) { +func (p *TShowVariableResult_) GetVariables() (v [][]string) { return p.Variables } -func (p *TShowVariableResult_) SetVariables(val map[string]string) { +func (p *TShowVariableResult_) SetVariables(val [][]string) { p.Variables = val } @@ -4129,7 +4134,7 @@ func (p *TShowVariableResult_) Read(iprot thrift.TProtocol) (err error) { switch fieldId { case 1: - if fieldTypeId == thrift.MAP { + if fieldTypeId == thrift.LIST { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError } @@ -4173,29 +4178,35 @@ RequiredFieldNotSetError: } func (p *TShowVariableResult_) ReadField1(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() + _, size, err := iprot.ReadListBegin() if err != nil { return err } - _field := make(map[string]string, size) + _field := make([][]string, 0, size) for i := 0; i < size; i++ { - var _key string - if v, err := iprot.ReadString(); err != nil { + _, size, err := iprot.ReadListBegin() + if err != nil { return err - } else { - _key = v } + _elem := make([]string, 0, size) + for i := 0; i < size; i++ { - var _val string - if v, err := iprot.ReadString(); err != nil { + var _elem1 string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _elem1 = v + } + + _elem = append(_elem, _elem1) + } + if err := iprot.ReadListEnd(); err != nil { return err - } else { - _val = v } - _field[_key] = _val + _field = append(_field, _elem) } - if err := iprot.ReadMapEnd(); err != nil { + if err := iprot.ReadListEnd(); err != nil { return err } p.Variables = _field @@ -4231,21 +4242,26 @@ WriteStructEndError: } func (p *TShowVariableResult_) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("variables", thrift.MAP, 1); err != nil { + if err = oprot.WriteFieldBegin("variables", thrift.LIST, 1); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.Variables)); err != nil { + if err := oprot.WriteListBegin(thrift.LIST, len(p.Variables)); err != nil { return err } - for k, v := range p.Variables { - if err := oprot.WriteString(k); err != nil { + for _, v := range p.Variables { + if err := oprot.WriteListBegin(thrift.STRING, len(v)); err != nil { return err } - if err := oprot.WriteString(v); err != nil { + for _, v := range v { + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { return err } } - if err := oprot.WriteMapEnd(); err != nil { + if err := oprot.WriteListEnd(); err != nil { return err } if err = oprot.WriteFieldEnd(); err != nil { @@ -4278,16 +4294,22 @@ func (p *TShowVariableResult_) DeepEqual(ano *TShowVariableResult_) bool { return true } -func (p *TShowVariableResult_) Field1DeepEqual(src map[string]string) bool { +func (p *TShowVariableResult_) Field1DeepEqual(src [][]string) bool { if len(p.Variables) != len(src) { return false } - for k, v := range p.Variables { - _src := src[k] - if strings.Compare(v, _src) != 0 { + for i, v := range p.Variables { + _src := src[i] + if len(v) != len(_src) { return false } + for i, v := range v { + _src1 := _src[i] + if strings.Compare(v, _src1) != 0 { + return false + } + } } return true } @@ -19360,6 +19382,635 @@ func (p *TTxnLoadInfo) Field6DeepEqual(src []*TSubTxnInfo) bool { return true } +type TGroupCommitInfo struct { + GetGroupCommitLoadBeId *bool `thrift:"getGroupCommitLoadBeId,1,optional" frugal:"1,optional,bool" json:"getGroupCommitLoadBeId,omitempty"` + GroupCommitLoadTableId *int64 `thrift:"groupCommitLoadTableId,2,optional" frugal:"2,optional,i64" json:"groupCommitLoadTableId,omitempty"` + Cluster *string `thrift:"cluster,3,optional" frugal:"3,optional,string" json:"cluster,omitempty"` + IsCloud *bool `thrift:"isCloud,4,optional" frugal:"4,optional,bool" json:"isCloud,omitempty"` + UpdateLoadData *bool `thrift:"updateLoadData,5,optional" frugal:"5,optional,bool" json:"updateLoadData,omitempty"` + TableId *int64 `thrift:"tableId,6,optional" frugal:"6,optional,i64" json:"tableId,omitempty"` + ReceiveData *int64 `thrift:"receiveData,7,optional" frugal:"7,optional,i64" json:"receiveData,omitempty"` +} + +func NewTGroupCommitInfo() *TGroupCommitInfo { + return &TGroupCommitInfo{} +} + +func (p *TGroupCommitInfo) InitDefault() { +} + +var TGroupCommitInfo_GetGroupCommitLoadBeId_DEFAULT bool + +func (p *TGroupCommitInfo) GetGetGroupCommitLoadBeId() (v bool) { + if !p.IsSetGetGroupCommitLoadBeId() { + return TGroupCommitInfo_GetGroupCommitLoadBeId_DEFAULT + } + return *p.GetGroupCommitLoadBeId +} + +var TGroupCommitInfo_GroupCommitLoadTableId_DEFAULT int64 + +func (p *TGroupCommitInfo) GetGroupCommitLoadTableId() (v int64) { + if !p.IsSetGroupCommitLoadTableId() { + return TGroupCommitInfo_GroupCommitLoadTableId_DEFAULT + } + return *p.GroupCommitLoadTableId +} + +var TGroupCommitInfo_Cluster_DEFAULT string + +func (p *TGroupCommitInfo) GetCluster() (v string) { + if !p.IsSetCluster() { + return TGroupCommitInfo_Cluster_DEFAULT + } + return *p.Cluster +} + +var TGroupCommitInfo_IsCloud_DEFAULT bool + +func (p *TGroupCommitInfo) GetIsCloud() (v bool) { + if !p.IsSetIsCloud() { + return TGroupCommitInfo_IsCloud_DEFAULT + } + return *p.IsCloud +} + +var TGroupCommitInfo_UpdateLoadData_DEFAULT bool + +func (p *TGroupCommitInfo) GetUpdateLoadData() (v bool) { + if !p.IsSetUpdateLoadData() { + return TGroupCommitInfo_UpdateLoadData_DEFAULT + } + return *p.UpdateLoadData +} + +var TGroupCommitInfo_TableId_DEFAULT int64 + +func (p *TGroupCommitInfo) GetTableId() (v int64) { + if !p.IsSetTableId() { + return TGroupCommitInfo_TableId_DEFAULT + } + return *p.TableId +} + +var TGroupCommitInfo_ReceiveData_DEFAULT int64 + +func (p *TGroupCommitInfo) GetReceiveData() (v int64) { + if !p.IsSetReceiveData() { + return TGroupCommitInfo_ReceiveData_DEFAULT + } + return *p.ReceiveData +} +func (p *TGroupCommitInfo) SetGetGroupCommitLoadBeId(val *bool) { + p.GetGroupCommitLoadBeId = val +} +func (p *TGroupCommitInfo) SetGroupCommitLoadTableId(val *int64) { + p.GroupCommitLoadTableId = val +} +func (p *TGroupCommitInfo) SetCluster(val *string) { + p.Cluster = val +} +func (p *TGroupCommitInfo) SetIsCloud(val *bool) { + p.IsCloud = val +} +func (p *TGroupCommitInfo) SetUpdateLoadData(val *bool) { + p.UpdateLoadData = val +} +func (p *TGroupCommitInfo) SetTableId(val *int64) { + p.TableId = val +} +func (p *TGroupCommitInfo) SetReceiveData(val *int64) { + p.ReceiveData = val +} + +var fieldIDToName_TGroupCommitInfo = map[int16]string{ + 1: "getGroupCommitLoadBeId", + 2: "groupCommitLoadTableId", + 3: "cluster", + 4: "isCloud", + 5: "updateLoadData", + 6: "tableId", + 7: "receiveData", +} + +func (p *TGroupCommitInfo) IsSetGetGroupCommitLoadBeId() bool { + return p.GetGroupCommitLoadBeId != nil +} + +func (p *TGroupCommitInfo) IsSetGroupCommitLoadTableId() bool { + return p.GroupCommitLoadTableId != nil +} + +func (p *TGroupCommitInfo) IsSetCluster() bool { + return p.Cluster != nil +} + +func (p *TGroupCommitInfo) IsSetIsCloud() bool { + return p.IsCloud != nil +} + +func (p *TGroupCommitInfo) IsSetUpdateLoadData() bool { + return p.UpdateLoadData != nil +} + +func (p *TGroupCommitInfo) IsSetTableId() bool { + return p.TableId != nil +} + +func (p *TGroupCommitInfo) IsSetReceiveData() bool { + return p.ReceiveData != nil +} + +func (p *TGroupCommitInfo) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.I64 { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.I64 { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGroupCommitInfo[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TGroupCommitInfo) ReadField1(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.GetGroupCommitLoadBeId = _field + return nil +} +func (p *TGroupCommitInfo) ReadField2(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.GroupCommitLoadTableId = _field + return nil +} +func (p *TGroupCommitInfo) ReadField3(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Cluster = _field + return nil +} +func (p *TGroupCommitInfo) ReadField4(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.IsCloud = _field + return nil +} +func (p *TGroupCommitInfo) ReadField5(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.UpdateLoadData = _field + return nil +} +func (p *TGroupCommitInfo) ReadField6(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.TableId = _field + return nil +} +func (p *TGroupCommitInfo) ReadField7(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.ReceiveData = _field + return nil +} + +func (p *TGroupCommitInfo) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TGroupCommitInfo"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TGroupCommitInfo) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetGetGroupCommitLoadBeId() { + if err = oprot.WriteFieldBegin("getGroupCommitLoadBeId", thrift.BOOL, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.GetGroupCommitLoadBeId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TGroupCommitInfo) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetGroupCommitLoadTableId() { + if err = oprot.WriteFieldBegin("groupCommitLoadTableId", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.GroupCommitLoadTableId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TGroupCommitInfo) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetCluster() { + if err = oprot.WriteFieldBegin("cluster", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Cluster); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TGroupCommitInfo) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetIsCloud() { + if err = oprot.WriteFieldBegin("isCloud", thrift.BOOL, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.IsCloud); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TGroupCommitInfo) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetUpdateLoadData() { + if err = oprot.WriteFieldBegin("updateLoadData", thrift.BOOL, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.UpdateLoadData); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TGroupCommitInfo) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetTableId() { + if err = oprot.WriteFieldBegin("tableId", thrift.I64, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.TableId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TGroupCommitInfo) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetReceiveData() { + if err = oprot.WriteFieldBegin("receiveData", thrift.I64, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.ReceiveData); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TGroupCommitInfo) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGroupCommitInfo(%+v)", *p) + +} + +func (p *TGroupCommitInfo) DeepEqual(ano *TGroupCommitInfo) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.GetGroupCommitLoadBeId) { + return false + } + if !p.Field2DeepEqual(ano.GroupCommitLoadTableId) { + return false + } + if !p.Field3DeepEqual(ano.Cluster) { + return false + } + if !p.Field4DeepEqual(ano.IsCloud) { + return false + } + if !p.Field5DeepEqual(ano.UpdateLoadData) { + return false + } + if !p.Field6DeepEqual(ano.TableId) { + return false + } + if !p.Field7DeepEqual(ano.ReceiveData) { + return false + } + return true +} + +func (p *TGroupCommitInfo) Field1DeepEqual(src *bool) bool { + + if p.GetGroupCommitLoadBeId == src { + return true + } else if p.GetGroupCommitLoadBeId == nil || src == nil { + return false + } + if *p.GetGroupCommitLoadBeId != *src { + return false + } + return true +} +func (p *TGroupCommitInfo) Field2DeepEqual(src *int64) bool { + + if p.GroupCommitLoadTableId == src { + return true + } else if p.GroupCommitLoadTableId == nil || src == nil { + return false + } + if *p.GroupCommitLoadTableId != *src { + return false + } + return true +} +func (p *TGroupCommitInfo) Field3DeepEqual(src *string) bool { + + if p.Cluster == src { + return true + } else if p.Cluster == nil || src == nil { + return false + } + if strings.Compare(*p.Cluster, *src) != 0 { + return false + } + return true +} +func (p *TGroupCommitInfo) Field4DeepEqual(src *bool) bool { + + if p.IsCloud == src { + return true + } else if p.IsCloud == nil || src == nil { + return false + } + if *p.IsCloud != *src { + return false + } + return true +} +func (p *TGroupCommitInfo) Field5DeepEqual(src *bool) bool { + + if p.UpdateLoadData == src { + return true + } else if p.UpdateLoadData == nil || src == nil { + return false + } + if *p.UpdateLoadData != *src { + return false + } + return true +} +func (p *TGroupCommitInfo) Field6DeepEqual(src *int64) bool { + + if p.TableId == src { + return true + } else if p.TableId == nil || src == nil { + return false + } + if *p.TableId != *src { + return false + } + return true +} +func (p *TGroupCommitInfo) Field7DeepEqual(src *int64) bool { + + if p.ReceiveData == src { + return true + } else if p.ReceiveData == nil || src == nil { + return false + } + if *p.ReceiveData != *src { + return false + } + return true +} + type TMasterOpRequest struct { User string `thrift:"user,1,required" frugal:"1,required,string" json:"user"` Db string `thrift:"db,2,required" frugal:"2,required,string" json:"db"` @@ -19390,6 +20041,7 @@ type TMasterOpRequest struct { CancelQeury *bool `thrift:"cancel_qeury,27,optional" frugal:"27,optional,bool" json:"cancel_qeury,omitempty"` UserVariables map[string]*exprs.TExprNode `thrift:"user_variables,28,optional" frugal:"28,optional,map" json:"user_variables,omitempty"` TxnLoadInfo *TTxnLoadInfo `thrift:"txnLoadInfo,29,optional" frugal:"29,optional,TTxnLoadInfo" json:"txnLoadInfo,omitempty"` + GroupCommitInfo *TGroupCommitInfo `thrift:"groupCommitInfo,30,optional" frugal:"30,optional,TGroupCommitInfo" json:"groupCommitInfo,omitempty"` CloudCluster *string `thrift:"cloud_cluster,1000,optional" frugal:"1000,optional,string" json:"cloud_cluster,omitempty"` NoAuth *bool `thrift:"noAuth,1001,optional" frugal:"1001,optional,bool" json:"noAuth,omitempty"` } @@ -19647,6 +20299,15 @@ func (p *TMasterOpRequest) GetTxnLoadInfo() (v *TTxnLoadInfo) { return p.TxnLoadInfo } +var TMasterOpRequest_GroupCommitInfo_DEFAULT *TGroupCommitInfo + +func (p *TMasterOpRequest) GetGroupCommitInfo() (v *TGroupCommitInfo) { + if !p.IsSetGroupCommitInfo() { + return TMasterOpRequest_GroupCommitInfo_DEFAULT + } + return p.GroupCommitInfo +} + var TMasterOpRequest_CloudCluster_DEFAULT string func (p *TMasterOpRequest) GetCloudCluster() (v string) { @@ -19751,6 +20412,9 @@ func (p *TMasterOpRequest) SetUserVariables(val map[string]*exprs.TExprNode) { func (p *TMasterOpRequest) SetTxnLoadInfo(val *TTxnLoadInfo) { p.TxnLoadInfo = val } +func (p *TMasterOpRequest) SetGroupCommitInfo(val *TGroupCommitInfo) { + p.GroupCommitInfo = val +} func (p *TMasterOpRequest) SetCloudCluster(val *string) { p.CloudCluster = val } @@ -19788,6 +20452,7 @@ var fieldIDToName_TMasterOpRequest = map[int16]string{ 27: "cancel_qeury", 28: "user_variables", 29: "txnLoadInfo", + 30: "groupCommitInfo", 1000: "cloud_cluster", 1001: "noAuth", } @@ -19896,6 +20561,10 @@ func (p *TMasterOpRequest) IsSetTxnLoadInfo() bool { return p.TxnLoadInfo != nil } +func (p *TMasterOpRequest) IsSetGroupCommitInfo() bool { + return p.GroupCommitInfo != nil +} + func (p *TMasterOpRequest) IsSetCloudCluster() bool { return p.CloudCluster != nil } @@ -20161,6 +20830,14 @@ func (p *TMasterOpRequest) Read(iprot thrift.TProtocol) (err error) { } else if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } + case 30: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField30(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } case 1000: if fieldTypeId == thrift.STRING { if err = p.ReadField1000(iprot); err != nil { @@ -20580,6 +21257,14 @@ func (p *TMasterOpRequest) ReadField29(iprot thrift.TProtocol) error { p.TxnLoadInfo = _field return nil } +func (p *TMasterOpRequest) ReadField30(iprot thrift.TProtocol) error { + _field := NewTGroupCommitInfo() + if err := _field.Read(iprot); err != nil { + return err + } + p.GroupCommitInfo = _field + return nil +} func (p *TMasterOpRequest) ReadField1000(iprot thrift.TProtocol) error { var _field *string @@ -20725,6 +21410,10 @@ func (p *TMasterOpRequest) Write(oprot thrift.TProtocol) (err error) { fieldId = 29 goto WriteFieldError } + if err = p.writeField30(oprot); err != nil { + fieldId = 30 + goto WriteFieldError + } if err = p.writeField1000(oprot); err != nil { fieldId = 1000 goto WriteFieldError @@ -21329,6 +22018,25 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 29 end error: ", p), err) } +func (p *TMasterOpRequest) writeField30(oprot thrift.TProtocol) (err error) { + if p.IsSetGroupCommitInfo() { + if err = oprot.WriteFieldBegin("groupCommitInfo", thrift.STRUCT, 30); err != nil { + goto WriteFieldBeginError + } + if err := p.GroupCommitInfo.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 30 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 30 end error: ", p), err) +} + func (p *TMasterOpRequest) writeField1000(oprot thrift.TProtocol) (err error) { if p.IsSetCloudCluster() { if err = oprot.WriteFieldBegin("cloud_cluster", thrift.STRING, 1000); err != nil { @@ -21468,6 +22176,9 @@ func (p *TMasterOpRequest) DeepEqual(ano *TMasterOpRequest) bool { if !p.Field29DeepEqual(ano.TxnLoadInfo) { return false } + if !p.Field30DeepEqual(ano.GroupCommitInfo) { + return false + } if !p.Field1000DeepEqual(ano.CloudCluster) { return false } @@ -21788,6 +22499,13 @@ func (p *TMasterOpRequest) Field29DeepEqual(src *TTxnLoadInfo) bool { } return true } +func (p *TMasterOpRequest) Field30DeepEqual(src *TGroupCommitInfo) bool { + + if !p.GroupCommitInfo.DeepEqual(src) { + return false + } + return true +} func (p *TMasterOpRequest) Field1000DeepEqual(src *string) bool { if p.CloudCluster == src { @@ -22700,15 +23418,16 @@ func (p *TShowResultSet) Field2DeepEqual(src [][]string) bool { } type TMasterOpResult_ struct { - MaxJournalId int64 `thrift:"maxJournalId,1,required" frugal:"1,required,i64" json:"maxJournalId"` - Packet []byte `thrift:"packet,2,required" frugal:"2,required,binary" json:"packet"` - ResultSet *TShowResultSet `thrift:"resultSet,3,optional" frugal:"3,optional,TShowResultSet" json:"resultSet,omitempty"` - QueryId *types.TUniqueId `thrift:"queryId,4,optional" frugal:"4,optional,types.TUniqueId" json:"queryId,omitempty"` - Status *string `thrift:"status,5,optional" frugal:"5,optional,string" json:"status,omitempty"` - StatusCode *int32 `thrift:"statusCode,6,optional" frugal:"6,optional,i32" json:"statusCode,omitempty"` - ErrMessage *string `thrift:"errMessage,7,optional" frugal:"7,optional,string" json:"errMessage,omitempty"` - QueryResultBufList [][]byte `thrift:"queryResultBufList,8,optional" frugal:"8,optional,list" json:"queryResultBufList,omitempty"` - TxnLoadInfo *TTxnLoadInfo `thrift:"txnLoadInfo,9,optional" frugal:"9,optional,TTxnLoadInfo" json:"txnLoadInfo,omitempty"` + MaxJournalId int64 `thrift:"maxJournalId,1,required" frugal:"1,required,i64" json:"maxJournalId"` + Packet []byte `thrift:"packet,2,required" frugal:"2,required,binary" json:"packet"` + ResultSet *TShowResultSet `thrift:"resultSet,3,optional" frugal:"3,optional,TShowResultSet" json:"resultSet,omitempty"` + QueryId *types.TUniqueId `thrift:"queryId,4,optional" frugal:"4,optional,types.TUniqueId" json:"queryId,omitempty"` + Status *string `thrift:"status,5,optional" frugal:"5,optional,string" json:"status,omitempty"` + StatusCode *int32 `thrift:"statusCode,6,optional" frugal:"6,optional,i32" json:"statusCode,omitempty"` + ErrMessage *string `thrift:"errMessage,7,optional" frugal:"7,optional,string" json:"errMessage,omitempty"` + QueryResultBufList [][]byte `thrift:"queryResultBufList,8,optional" frugal:"8,optional,list" json:"queryResultBufList,omitempty"` + TxnLoadInfo *TTxnLoadInfo `thrift:"txnLoadInfo,9,optional" frugal:"9,optional,TTxnLoadInfo" json:"txnLoadInfo,omitempty"` + GroupCommitLoadBeId *int64 `thrift:"groupCommitLoadBeId,10,optional" frugal:"10,optional,i64" json:"groupCommitLoadBeId,omitempty"` } func NewTMasterOpResult_() *TMasterOpResult_ { @@ -22788,6 +23507,15 @@ func (p *TMasterOpResult_) GetTxnLoadInfo() (v *TTxnLoadInfo) { } return p.TxnLoadInfo } + +var TMasterOpResult__GroupCommitLoadBeId_DEFAULT int64 + +func (p *TMasterOpResult_) GetGroupCommitLoadBeId() (v int64) { + if !p.IsSetGroupCommitLoadBeId() { + return TMasterOpResult__GroupCommitLoadBeId_DEFAULT + } + return *p.GroupCommitLoadBeId +} func (p *TMasterOpResult_) SetMaxJournalId(val int64) { p.MaxJournalId = val } @@ -22815,17 +23543,21 @@ func (p *TMasterOpResult_) SetQueryResultBufList(val [][]byte) { func (p *TMasterOpResult_) SetTxnLoadInfo(val *TTxnLoadInfo) { p.TxnLoadInfo = val } +func (p *TMasterOpResult_) SetGroupCommitLoadBeId(val *int64) { + p.GroupCommitLoadBeId = val +} var fieldIDToName_TMasterOpResult_ = map[int16]string{ - 1: "maxJournalId", - 2: "packet", - 3: "resultSet", - 4: "queryId", - 5: "status", - 6: "statusCode", - 7: "errMessage", - 8: "queryResultBufList", - 9: "txnLoadInfo", + 1: "maxJournalId", + 2: "packet", + 3: "resultSet", + 4: "queryId", + 5: "status", + 6: "statusCode", + 7: "errMessage", + 8: "queryResultBufList", + 9: "txnLoadInfo", + 10: "groupCommitLoadBeId", } func (p *TMasterOpResult_) IsSetResultSet() bool { @@ -22856,6 +23588,10 @@ func (p *TMasterOpResult_) IsSetTxnLoadInfo() bool { return p.TxnLoadInfo != nil } +func (p *TMasterOpResult_) IsSetGroupCommitLoadBeId() bool { + return p.GroupCommitLoadBeId != nil +} + func (p *TMasterOpResult_) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -22951,6 +23687,14 @@ func (p *TMasterOpResult_) Read(iprot thrift.TProtocol) (err error) { } else if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } + case 10: + if fieldTypeId == thrift.I64 { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError @@ -23093,6 +23837,17 @@ func (p *TMasterOpResult_) ReadField9(iprot thrift.TProtocol) error { p.TxnLoadInfo = _field return nil } +func (p *TMasterOpResult_) ReadField10(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.GroupCommitLoadBeId = _field + return nil +} func (p *TMasterOpResult_) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 @@ -23136,6 +23891,10 @@ func (p *TMasterOpResult_) Write(oprot thrift.TProtocol) (err error) { fieldId = 9 goto WriteFieldError } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -23329,6 +24088,25 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) } +func (p *TMasterOpResult_) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetGroupCommitLoadBeId() { + if err = oprot.WriteFieldBegin("groupCommitLoadBeId", thrift.I64, 10); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.GroupCommitLoadBeId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} + func (p *TMasterOpResult_) String() string { if p == nil { return "" @@ -23370,6 +24148,9 @@ func (p *TMasterOpResult_) DeepEqual(ano *TMasterOpResult_) bool { if !p.Field9DeepEqual(ano.TxnLoadInfo) { return false } + if !p.Field10DeepEqual(ano.GroupCommitLoadBeId) { + return false + } return true } @@ -23457,6 +24238,18 @@ func (p *TMasterOpResult_) Field9DeepEqual(src *TTxnLoadInfo) bool { } return true } +func (p *TMasterOpResult_) Field10DeepEqual(src *int64) bool { + + if p.GroupCommitLoadBeId == src { + return true + } else if p.GroupCommitLoadBeId == nil || src == nil { + return false + } + if *p.GroupCommitLoadBeId != *src { + return false + } + return true +} type TUpdateExportTaskStatusRequest struct { ProtocolVersion FrontendServiceVersion `thrift:"protocolVersion,1,required" frugal:"1,required,FrontendServiceVersion" json:"protocolVersion"` @@ -34452,6 +35245,9 @@ type TLoadTxnCommitRequest struct { Tbls []string `thrift:"tbls,15,optional" frugal:"15,optional,list" json:"tbls,omitempty"` TableId *int64 `thrift:"table_id,16,optional" frugal:"16,optional,i64" json:"table_id,omitempty"` AuthCodeUuid *string `thrift:"auth_code_uuid,17,optional" frugal:"17,optional,string" json:"auth_code_uuid,omitempty"` + GroupCommit *bool `thrift:"groupCommit,18,optional" frugal:"18,optional,bool" json:"groupCommit,omitempty"` + ReceiveBytes *int64 `thrift:"receiveBytes,19,optional" frugal:"19,optional,i64" json:"receiveBytes,omitempty"` + BackendId *int64 `thrift:"backendId,20,optional" frugal:"20,optional,i64" json:"backendId,omitempty"` } func NewTLoadTxnCommitRequest() *TLoadTxnCommitRequest { @@ -34583,6 +35379,33 @@ func (p *TLoadTxnCommitRequest) GetAuthCodeUuid() (v string) { } return *p.AuthCodeUuid } + +var TLoadTxnCommitRequest_GroupCommit_DEFAULT bool + +func (p *TLoadTxnCommitRequest) GetGroupCommit() (v bool) { + if !p.IsSetGroupCommit() { + return TLoadTxnCommitRequest_GroupCommit_DEFAULT + } + return *p.GroupCommit +} + +var TLoadTxnCommitRequest_ReceiveBytes_DEFAULT int64 + +func (p *TLoadTxnCommitRequest) GetReceiveBytes() (v int64) { + if !p.IsSetReceiveBytes() { + return TLoadTxnCommitRequest_ReceiveBytes_DEFAULT + } + return *p.ReceiveBytes +} + +var TLoadTxnCommitRequest_BackendId_DEFAULT int64 + +func (p *TLoadTxnCommitRequest) GetBackendId() (v int64) { + if !p.IsSetBackendId() { + return TLoadTxnCommitRequest_BackendId_DEFAULT + } + return *p.BackendId +} func (p *TLoadTxnCommitRequest) SetCluster(val *string) { p.Cluster = val } @@ -34634,6 +35457,15 @@ func (p *TLoadTxnCommitRequest) SetTableId(val *int64) { func (p *TLoadTxnCommitRequest) SetAuthCodeUuid(val *string) { p.AuthCodeUuid = val } +func (p *TLoadTxnCommitRequest) SetGroupCommit(val *bool) { + p.GroupCommit = val +} +func (p *TLoadTxnCommitRequest) SetReceiveBytes(val *int64) { + p.ReceiveBytes = val +} +func (p *TLoadTxnCommitRequest) SetBackendId(val *int64) { + p.BackendId = val +} var fieldIDToName_TLoadTxnCommitRequest = map[int16]string{ 1: "cluster", @@ -34653,6 +35485,9 @@ var fieldIDToName_TLoadTxnCommitRequest = map[int16]string{ 15: "tbls", 16: "table_id", 17: "auth_code_uuid", + 18: "groupCommit", + 19: "receiveBytes", + 20: "backendId", } func (p *TLoadTxnCommitRequest) IsSetCluster() bool { @@ -34699,6 +35534,18 @@ func (p *TLoadTxnCommitRequest) IsSetAuthCodeUuid() bool { return p.AuthCodeUuid != nil } +func (p *TLoadTxnCommitRequest) IsSetGroupCommit() bool { + return p.GroupCommit != nil +} + +func (p *TLoadTxnCommitRequest) IsSetReceiveBytes() bool { + return p.ReceiveBytes != nil +} + +func (p *TLoadTxnCommitRequest) IsSetBackendId() bool { + return p.BackendId != nil +} + func (p *TLoadTxnCommitRequest) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -34866,6 +35713,30 @@ func (p *TLoadTxnCommitRequest) Read(iprot thrift.TProtocol) (err error) { } else if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } + case 18: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField18(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 19: + if fieldTypeId == thrift.I64 { + if err = p.ReadField19(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 20: + if fieldTypeId == thrift.I64 { + if err = p.ReadField20(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError @@ -35134,6 +36005,39 @@ func (p *TLoadTxnCommitRequest) ReadField17(iprot thrift.TProtocol) error { p.AuthCodeUuid = _field return nil } +func (p *TLoadTxnCommitRequest) ReadField18(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.GroupCommit = _field + return nil +} +func (p *TLoadTxnCommitRequest) ReadField19(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.ReceiveBytes = _field + return nil +} +func (p *TLoadTxnCommitRequest) ReadField20(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.BackendId = _field + return nil +} func (p *TLoadTxnCommitRequest) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 @@ -35209,6 +36113,18 @@ func (p *TLoadTxnCommitRequest) Write(oprot thrift.TProtocol) (err error) { fieldId = 17 goto WriteFieldError } + if err = p.writeField18(oprot); err != nil { + fieldId = 18 + goto WriteFieldError + } + if err = p.writeField19(oprot); err != nil { + fieldId = 19 + goto WriteFieldError + } + if err = p.writeField20(oprot); err != nil { + fieldId = 20 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -35554,6 +36470,63 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 17 end error: ", p), err) } +func (p *TLoadTxnCommitRequest) writeField18(oprot thrift.TProtocol) (err error) { + if p.IsSetGroupCommit() { + if err = oprot.WriteFieldBegin("groupCommit", thrift.BOOL, 18); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.GroupCommit); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 18 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 18 end error: ", p), err) +} + +func (p *TLoadTxnCommitRequest) writeField19(oprot thrift.TProtocol) (err error) { + if p.IsSetReceiveBytes() { + if err = oprot.WriteFieldBegin("receiveBytes", thrift.I64, 19); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.ReceiveBytes); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 19 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 19 end error: ", p), err) +} + +func (p *TLoadTxnCommitRequest) writeField20(oprot thrift.TProtocol) (err error) { + if p.IsSetBackendId() { + if err = oprot.WriteFieldBegin("backendId", thrift.I64, 20); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.BackendId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 20 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 20 end error: ", p), err) +} + func (p *TLoadTxnCommitRequest) String() string { if p == nil { return "" @@ -35619,6 +36592,15 @@ func (p *TLoadTxnCommitRequest) DeepEqual(ano *TLoadTxnCommitRequest) bool { if !p.Field17DeepEqual(ano.AuthCodeUuid) { return false } + if !p.Field18DeepEqual(ano.GroupCommit) { + return false + } + if !p.Field19DeepEqual(ano.ReceiveBytes) { + return false + } + if !p.Field20DeepEqual(ano.BackendId) { + return false + } return true } @@ -35793,6 +36775,42 @@ func (p *TLoadTxnCommitRequest) Field17DeepEqual(src *string) bool { } return true } +func (p *TLoadTxnCommitRequest) Field18DeepEqual(src *bool) bool { + + if p.GroupCommit == src { + return true + } else if p.GroupCommit == nil || src == nil { + return false + } + if *p.GroupCommit != *src { + return false + } + return true +} +func (p *TLoadTxnCommitRequest) Field19DeepEqual(src *int64) bool { + + if p.ReceiveBytes == src { + return true + } else if p.ReceiveBytes == nil || src == nil { + return false + } + if *p.ReceiveBytes != *src { + return false + } + return true +} +func (p *TLoadTxnCommitRequest) Field20DeepEqual(src *int64) bool { + + if p.BackendId == src { + return true + } else if p.BackendId == nil || src == nil { + return false + } + if *p.BackendId != *src { + return false + } + return true +} type TLoadTxnCommitResult_ struct { Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"` @@ -54892,18 +55910,19 @@ func (p *TTableRef) Field3DeepEqual(src *string) bool { } type TRestoreSnapshotRequest struct { - Cluster *string `thrift:"cluster,1,optional" frugal:"1,optional,string" json:"cluster,omitempty"` - User *string `thrift:"user,2,optional" frugal:"2,optional,string" json:"user,omitempty"` - Passwd *string `thrift:"passwd,3,optional" frugal:"3,optional,string" json:"passwd,omitempty"` - Db *string `thrift:"db,4,optional" frugal:"4,optional,string" json:"db,omitempty"` - Table *string `thrift:"table,5,optional" frugal:"5,optional,string" json:"table,omitempty"` - Token *string `thrift:"token,6,optional" frugal:"6,optional,string" json:"token,omitempty"` - LabelName *string `thrift:"label_name,7,optional" frugal:"7,optional,string" json:"label_name,omitempty"` - RepoName *string `thrift:"repo_name,8,optional" frugal:"8,optional,string" json:"repo_name,omitempty"` - TableRefs []*TTableRef `thrift:"table_refs,9,optional" frugal:"9,optional,list" json:"table_refs,omitempty"` - Properties map[string]string `thrift:"properties,10,optional" frugal:"10,optional,map" json:"properties,omitempty"` - Meta []byte `thrift:"meta,11,optional" frugal:"11,optional,binary" json:"meta,omitempty"` - JobInfo []byte `thrift:"job_info,12,optional" frugal:"12,optional,binary" json:"job_info,omitempty"` + Cluster *string `thrift:"cluster,1,optional" frugal:"1,optional,string" json:"cluster,omitempty"` + User *string `thrift:"user,2,optional" frugal:"2,optional,string" json:"user,omitempty"` + Passwd *string `thrift:"passwd,3,optional" frugal:"3,optional,string" json:"passwd,omitempty"` + Db *string `thrift:"db,4,optional" frugal:"4,optional,string" json:"db,omitempty"` + Table *string `thrift:"table,5,optional" frugal:"5,optional,string" json:"table,omitempty"` + Token *string `thrift:"token,6,optional" frugal:"6,optional,string" json:"token,omitempty"` + LabelName *string `thrift:"label_name,7,optional" frugal:"7,optional,string" json:"label_name,omitempty"` + RepoName *string `thrift:"repo_name,8,optional" frugal:"8,optional,string" json:"repo_name,omitempty"` + TableRefs []*TTableRef `thrift:"table_refs,9,optional" frugal:"9,optional,list" json:"table_refs,omitempty"` + Properties map[string]string `thrift:"properties,10,optional" frugal:"10,optional,map" json:"properties,omitempty"` + Meta []byte `thrift:"meta,11,optional" frugal:"11,optional,binary" json:"meta,omitempty"` + JobInfo []byte `thrift:"job_info,12,optional" frugal:"12,optional,binary" json:"job_info,omitempty"` + CleanRestore *bool `thrift:"clean_restore,13,optional" frugal:"13,optional,bool" json:"clean_restore,omitempty"` } func NewTRestoreSnapshotRequest() *TRestoreSnapshotRequest { @@ -55020,6 +56039,15 @@ func (p *TRestoreSnapshotRequest) GetJobInfo() (v []byte) { } return p.JobInfo } + +var TRestoreSnapshotRequest_CleanRestore_DEFAULT bool + +func (p *TRestoreSnapshotRequest) GetCleanRestore() (v bool) { + if !p.IsSetCleanRestore() { + return TRestoreSnapshotRequest_CleanRestore_DEFAULT + } + return *p.CleanRestore +} func (p *TRestoreSnapshotRequest) SetCluster(val *string) { p.Cluster = val } @@ -55056,6 +56084,9 @@ func (p *TRestoreSnapshotRequest) SetMeta(val []byte) { func (p *TRestoreSnapshotRequest) SetJobInfo(val []byte) { p.JobInfo = val } +func (p *TRestoreSnapshotRequest) SetCleanRestore(val *bool) { + p.CleanRestore = val +} var fieldIDToName_TRestoreSnapshotRequest = map[int16]string{ 1: "cluster", @@ -55070,6 +56101,7 @@ var fieldIDToName_TRestoreSnapshotRequest = map[int16]string{ 10: "properties", 11: "meta", 12: "job_info", + 13: "clean_restore", } func (p *TRestoreSnapshotRequest) IsSetCluster() bool { @@ -55120,6 +56152,10 @@ func (p *TRestoreSnapshotRequest) IsSetJobInfo() bool { return p.JobInfo != nil } +func (p *TRestoreSnapshotRequest) IsSetCleanRestore() bool { + return p.CleanRestore != nil +} + func (p *TRestoreSnapshotRequest) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -55235,6 +56271,14 @@ func (p *TRestoreSnapshotRequest) Read(iprot thrift.TProtocol) (err error) { } else if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } + case 13: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField13(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError @@ -55426,6 +56470,17 @@ func (p *TRestoreSnapshotRequest) ReadField12(iprot thrift.TProtocol) error { p.JobInfo = _field return nil } +func (p *TRestoreSnapshotRequest) ReadField13(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.CleanRestore = _field + return nil +} func (p *TRestoreSnapshotRequest) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 @@ -55481,6 +56536,10 @@ func (p *TRestoreSnapshotRequest) Write(oprot thrift.TProtocol) (err error) { fieldId = 12 goto WriteFieldError } + if err = p.writeField13(oprot); err != nil { + fieldId = 13 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -55746,6 +56805,25 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) } +func (p *TRestoreSnapshotRequest) writeField13(oprot thrift.TProtocol) (err error) { + if p.IsSetCleanRestore() { + if err = oprot.WriteFieldBegin("clean_restore", thrift.BOOL, 13); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.CleanRestore); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) +} + func (p *TRestoreSnapshotRequest) String() string { if p == nil { return "" @@ -55796,6 +56874,9 @@ func (p *TRestoreSnapshotRequest) DeepEqual(ano *TRestoreSnapshotRequest) bool { if !p.Field12DeepEqual(ano.JobInfo) { return false } + if !p.Field13DeepEqual(ano.CleanRestore) { + return false + } return true } @@ -55935,6 +57016,18 @@ func (p *TRestoreSnapshotRequest) Field12DeepEqual(src []byte) bool { } return true } +func (p *TRestoreSnapshotRequest) Field13DeepEqual(src *bool) bool { + + if p.CleanRestore == src { + return true + } else if p.CleanRestore == nil || src == nil { + return false + } + if *p.CleanRestore != *src { + return false + } + return true +} type TRestoreSnapshotResult_ struct { Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"` diff --git a/pkg/rpc/kitex_gen/frontendservice/k-FrontendService.go b/pkg/rpc/kitex_gen/frontendservice/k-FrontendService.go index 95408b52..f9dc2041 100644 --- a/pkg/rpc/kitex_gen/frontendservice/k-FrontendService.go +++ b/pkg/rpc/kitex_gen/frontendservice/k-FrontendService.go @@ -2622,7 +2622,7 @@ func (p *TShowVariableResult_) FastRead(buf []byte) (int, error) { } switch fieldId { case 1: - if fieldTypeId == thrift.MAP { + if fieldTypeId == thrift.LIST { l, err = p.FastReadField1(buf[offset:]) offset += l if err != nil { @@ -2680,36 +2680,41 @@ RequiredFieldNotSetError: func (p *TShowVariableResult_) FastReadField1(buf []byte) (int, error) { offset := 0 - _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) offset += l if err != nil { return offset, err } - p.Variables = make(map[string]string, size) + p.Variables = make([][]string, 0, size) for i := 0; i < size; i++ { - var _key string - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { return offset, err - } else { - offset += l + } + _elem := make([]string, 0, size) + for i := 0; i < size; i++ { + var _elem1 string + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l - _key = v + _elem1 = v - } + } - var _val string - if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + _elem = append(_elem, _elem1) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l - - _val = v - } - p.Variables[_key] = _val + p.Variables = append(p.Variables, _elem) } - if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { return offset, err } else { offset += l @@ -2746,36 +2751,42 @@ func (p *TShowVariableResult_) BLength() int { func (p *TShowVariableResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 - offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "variables", thrift.MAP, 1) - mapBeginOffset := offset - offset += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, 0) + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "variables", thrift.LIST, 1) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.LIST, 0) var length int - for k, v := range p.Variables { + for _, v := range p.Variables { length++ + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRING, 0) + var length int + for _, v := range v { + length++ + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, k) - - offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, v) - + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) } - bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.STRING, thrift.STRING, length) - offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.LIST, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) return offset } func (p *TShowVariableResult_) field1Length() int { l := 0 - l += bthrift.Binary.FieldBeginLength("variables", thrift.MAP, 1) - l += bthrift.Binary.MapBeginLength(thrift.STRING, thrift.STRING, len(p.Variables)) - for k, v := range p.Variables { - - l += bthrift.Binary.StringLengthNocopy(k) - - l += bthrift.Binary.StringLengthNocopy(v) + l += bthrift.Binary.FieldBeginLength("variables", thrift.LIST, 1) + l += bthrift.Binary.ListBeginLength(thrift.LIST, len(p.Variables)) + for _, v := range p.Variables { + l += bthrift.Binary.ListBeginLength(thrift.STRING, len(v)) + for _, v := range v { + l += bthrift.Binary.StringLengthNocopy(v) + } + l += bthrift.Binary.ListEndLength() } - l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.ListEndLength() l += bthrift.Binary.FieldEndLength() return l } @@ -14165,6 +14176,445 @@ func (p *TTxnLoadInfo) field6Length() int { return l } +func (p *TGroupCommitInfo) FastRead(buf []byte) (int, error) { + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + _, l, err = bthrift.Binary.ReadStructBegin(buf) + offset += l + if err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGroupCommitInfo[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TGroupCommitInfo) FastReadField1(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.GetGroupCommitLoadBeId = &v + + } + return offset, nil +} + +func (p *TGroupCommitInfo) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.GroupCommitLoadTableId = &v + + } + return offset, nil +} + +func (p *TGroupCommitInfo) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Cluster = &v + + } + return offset, nil +} + +func (p *TGroupCommitInfo) FastReadField4(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.IsCloud = &v + + } + return offset, nil +} + +func (p *TGroupCommitInfo) FastReadField5(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.UpdateLoadData = &v + + } + return offset, nil +} + +func (p *TGroupCommitInfo) FastReadField6(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TableId = &v + + } + return offset, nil +} + +func (p *TGroupCommitInfo) FastReadField7(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ReceiveData = &v + + } + return offset, nil +} + +// for compatibility +func (p *TGroupCommitInfo) FastWrite(buf []byte) int { + return 0 +} + +func (p *TGroupCommitInfo) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGroupCommitInfo") + if p != nil { + offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + } + offset += bthrift.Binary.WriteFieldStop(buf[offset:]) + offset += bthrift.Binary.WriteStructEnd(buf[offset:]) + return offset +} + +func (p *TGroupCommitInfo) BLength() int { + l := 0 + l += bthrift.Binary.StructBeginLength("TGroupCommitInfo") + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + } + l += bthrift.Binary.FieldStopLength() + l += bthrift.Binary.StructEndLength() + return l +} + +func (p *TGroupCommitInfo) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetGetGroupCommitLoadBeId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "getGroupCommitLoadBeId", thrift.BOOL, 1) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.GetGroupCommitLoadBeId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGroupCommitInfo) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetGroupCommitLoadTableId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "groupCommitLoadTableId", thrift.I64, 2) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.GroupCommitLoadTableId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGroupCommitInfo) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCluster() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Cluster) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGroupCommitInfo) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIsCloud() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "isCloud", thrift.BOOL, 4) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.IsCloud) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGroupCommitInfo) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetUpdateLoadData() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "updateLoadData", thrift.BOOL, 5) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.UpdateLoadData) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGroupCommitInfo) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTableId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tableId", thrift.I64, 6) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.TableId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGroupCommitInfo) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetReceiveData() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "receiveData", thrift.I64, 7) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.ReceiveData) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TGroupCommitInfo) field1Length() int { + l := 0 + if p.IsSetGetGroupCommitLoadBeId() { + l += bthrift.Binary.FieldBeginLength("getGroupCommitLoadBeId", thrift.BOOL, 1) + l += bthrift.Binary.BoolLength(*p.GetGroupCommitLoadBeId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGroupCommitInfo) field2Length() int { + l := 0 + if p.IsSetGroupCommitLoadTableId() { + l += bthrift.Binary.FieldBeginLength("groupCommitLoadTableId", thrift.I64, 2) + l += bthrift.Binary.I64Length(*p.GroupCommitLoadTableId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGroupCommitInfo) field3Length() int { + l := 0 + if p.IsSetCluster() { + l += bthrift.Binary.FieldBeginLength("cluster", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.Cluster) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGroupCommitInfo) field4Length() int { + l := 0 + if p.IsSetIsCloud() { + l += bthrift.Binary.FieldBeginLength("isCloud", thrift.BOOL, 4) + l += bthrift.Binary.BoolLength(*p.IsCloud) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGroupCommitInfo) field5Length() int { + l := 0 + if p.IsSetUpdateLoadData() { + l += bthrift.Binary.FieldBeginLength("updateLoadData", thrift.BOOL, 5) + l += bthrift.Binary.BoolLength(*p.UpdateLoadData) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGroupCommitInfo) field6Length() int { + l := 0 + if p.IsSetTableId() { + l += bthrift.Binary.FieldBeginLength("tableId", thrift.I64, 6) + l += bthrift.Binary.I64Length(*p.TableId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TGroupCommitInfo) field7Length() int { + l := 0 + if p.IsSetReceiveData() { + l += bthrift.Binary.FieldBeginLength("receiveData", thrift.I64, 7) + l += bthrift.Binary.I64Length(*p.ReceiveData) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TMasterOpRequest) FastRead(buf []byte) (int, error) { var err error var offset int @@ -14599,6 +15049,20 @@ func (p *TMasterOpRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 30: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField30(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } case 1000: if fieldTypeId == thrift.STRING { l, err = p.FastReadField1000(buf[offset:]) @@ -15135,6 +15599,19 @@ func (p *TMasterOpRequest) FastReadField29(buf []byte) (int, error) { return offset, nil } +func (p *TMasterOpRequest) FastReadField30(buf []byte) (int, error) { + offset := 0 + + tmp := NewTGroupCommitInfo() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.GroupCommitInfo = tmp + return offset, nil +} + func (p *TMasterOpRequest) FastReadField1000(buf []byte) (int, error) { offset := 0 @@ -15200,6 +15677,7 @@ func (p *TMasterOpRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.Bina offset += p.fastWriteField26(buf[offset:], binaryWriter) offset += p.fastWriteField28(buf[offset:], binaryWriter) offset += p.fastWriteField29(buf[offset:], binaryWriter) + offset += p.fastWriteField30(buf[offset:], binaryWriter) offset += p.fastWriteField1000(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) @@ -15240,6 +15718,7 @@ func (p *TMasterOpRequest) BLength() int { l += p.field27Length() l += p.field28Length() l += p.field29Length() + l += p.field30Length() l += p.field1000Length() l += p.field1001Length() } @@ -15588,6 +16067,16 @@ func (p *TMasterOpRequest) fastWriteField29(buf []byte, binaryWriter bthrift.Bin return offset } +func (p *TMasterOpRequest) fastWriteField30(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetGroupCommitInfo() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "groupCommitInfo", thrift.STRUCT, 30) + offset += p.GroupCommitInfo.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TMasterOpRequest) fastWriteField1000(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetCloudCluster() { @@ -15938,6 +16427,16 @@ func (p *TMasterOpRequest) field29Length() int { return l } +func (p *TMasterOpRequest) field30Length() int { + l := 0 + if p.IsSetGroupCommitInfo() { + l += bthrift.Binary.FieldBeginLength("groupCommitInfo", thrift.STRUCT, 30) + l += p.GroupCommitInfo.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TMasterOpRequest) field1000Length() int { l := 0 if p.IsSetCloudCluster() { @@ -16812,6 +17311,20 @@ func (p *TMasterOpResult_) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 10: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -16994,6 +17507,19 @@ func (p *TMasterOpResult_) FastReadField9(buf []byte) (int, error) { return offset, nil } +func (p *TMasterOpResult_) FastReadField10(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.GroupCommitLoadBeId = &v + + } + return offset, nil +} + // for compatibility func (p *TMasterOpResult_) FastWrite(buf []byte) int { return 0 @@ -17005,6 +17531,7 @@ func (p *TMasterOpResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.Bina if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) offset += p.fastWriteField4(buf[offset:], binaryWriter) @@ -17031,6 +17558,7 @@ func (p *TMasterOpResult_) BLength() int { l += p.field7Length() l += p.field8Length() l += p.field9Length() + l += p.field10Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -17137,6 +17665,17 @@ func (p *TMasterOpResult_) fastWriteField9(buf []byte, binaryWriter bthrift.Bina return offset } +func (p *TMasterOpResult_) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetGroupCommitLoadBeId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "groupCommitLoadBeId", thrift.I64, 10) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.GroupCommitLoadBeId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TMasterOpResult_) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("maxJournalId", thrift.I64, 1) @@ -17233,6 +17772,17 @@ func (p *TMasterOpResult_) field9Length() int { return l } +func (p *TMasterOpResult_) field10Length() int { + l := 0 + if p.IsSetGroupCommitLoadBeId() { + l += bthrift.Binary.FieldBeginLength("groupCommitLoadBeId", thrift.I64, 10) + l += bthrift.Binary.I64Length(*p.GroupCommitLoadBeId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TUpdateExportTaskStatusRequest) FastRead(buf []byte) (int, error) { var err error var offset int @@ -25413,6 +25963,48 @@ func (p *TLoadTxnCommitRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 18: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField18(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 19: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField19(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 20: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField20(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -25737,6 +26329,45 @@ func (p *TLoadTxnCommitRequest) FastReadField17(buf []byte) (int, error) { return offset, nil } +func (p *TLoadTxnCommitRequest) FastReadField18(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.GroupCommit = &v + + } + return offset, nil +} + +func (p *TLoadTxnCommitRequest) FastReadField19(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.ReceiveBytes = &v + + } + return offset, nil +} + +func (p *TLoadTxnCommitRequest) FastReadField20(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.BackendId = &v + + } + return offset, nil +} + // for compatibility func (p *TLoadTxnCommitRequest) FastWrite(buf []byte) int { return 0 @@ -25752,6 +26383,9 @@ func (p *TLoadTxnCommitRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift offset += p.fastWriteField12(buf[offset:], binaryWriter) offset += p.fastWriteField14(buf[offset:], binaryWriter) offset += p.fastWriteField16(buf[offset:], binaryWriter) + offset += p.fastWriteField18(buf[offset:], binaryWriter) + offset += p.fastWriteField19(buf[offset:], binaryWriter) + offset += p.fastWriteField20(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) @@ -25790,6 +26424,9 @@ func (p *TLoadTxnCommitRequest) BLength() int { l += p.field15Length() l += p.field16Length() l += p.field17Length() + l += p.field18Length() + l += p.field19Length() + l += p.field20Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -25985,6 +26622,39 @@ func (p *TLoadTxnCommitRequest) fastWriteField17(buf []byte, binaryWriter bthrif return offset } +func (p *TLoadTxnCommitRequest) fastWriteField18(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetGroupCommit() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "groupCommit", thrift.BOOL, 18) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.GroupCommit) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxnCommitRequest) fastWriteField19(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetReceiveBytes() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "receiveBytes", thrift.I64, 19) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.ReceiveBytes) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TLoadTxnCommitRequest) fastWriteField20(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetBackendId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "backendId", thrift.I64, 20) + offset += bthrift.Binary.WriteI64(buf[offset:], *p.BackendId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TLoadTxnCommitRequest) field1Length() int { l := 0 if p.IsSetCluster() { @@ -26166,6 +26836,39 @@ func (p *TLoadTxnCommitRequest) field17Length() int { return l } +func (p *TLoadTxnCommitRequest) field18Length() int { + l := 0 + if p.IsSetGroupCommit() { + l += bthrift.Binary.FieldBeginLength("groupCommit", thrift.BOOL, 18) + l += bthrift.Binary.BoolLength(*p.GroupCommit) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxnCommitRequest) field19Length() int { + l := 0 + if p.IsSetReceiveBytes() { + l += bthrift.Binary.FieldBeginLength("receiveBytes", thrift.I64, 19) + l += bthrift.Binary.I64Length(*p.ReceiveBytes) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TLoadTxnCommitRequest) field20Length() int { + l := 0 + if p.IsSetBackendId() { + l += bthrift.Binary.FieldBeginLength("backendId", thrift.I64, 20) + l += bthrift.Binary.I64Length(*p.BackendId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TLoadTxnCommitResult_) FastRead(buf []byte) (int, error) { var err error var offset int @@ -40529,6 +41232,20 @@ func (p *TRestoreSnapshotRequest) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 13: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField13(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -40763,6 +41480,19 @@ func (p *TRestoreSnapshotRequest) FastReadField12(buf []byte) (int, error) { return offset, nil } +func (p *TRestoreSnapshotRequest) FastReadField13(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.CleanRestore = &v + + } + return offset, nil +} + // for compatibility func (p *TRestoreSnapshotRequest) FastWrite(buf []byte) int { return 0 @@ -40772,6 +41502,7 @@ func (p *TRestoreSnapshotRequest) FastWriteNocopy(buf []byte, binaryWriter bthri offset := 0 offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TRestoreSnapshotRequest") if p != nil { + offset += p.fastWriteField13(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) offset += p.fastWriteField2(buf[offset:], binaryWriter) offset += p.fastWriteField3(buf[offset:], binaryWriter) @@ -40806,6 +41537,7 @@ func (p *TRestoreSnapshotRequest) BLength() int { l += p.field10Length() l += p.field11Length() l += p.field12Length() + l += p.field13Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -40962,6 +41694,17 @@ func (p *TRestoreSnapshotRequest) fastWriteField12(buf []byte, binaryWriter bthr return offset } +func (p *TRestoreSnapshotRequest) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCleanRestore() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "clean_restore", thrift.BOOL, 13) + offset += bthrift.Binary.WriteBool(buf[offset:], *p.CleanRestore) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TRestoreSnapshotRequest) field1Length() int { l := 0 if p.IsSetCluster() { @@ -41104,6 +41847,17 @@ func (p *TRestoreSnapshotRequest) field12Length() int { return l } +func (p *TRestoreSnapshotRequest) field13Length() int { + l := 0 + if p.IsSetCleanRestore() { + l += bthrift.Binary.FieldBeginLength("clean_restore", thrift.BOOL, 13) + l += bthrift.Binary.BoolLength(*p.CleanRestore) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TRestoreSnapshotResult_) FastRead(buf []byte) (int, error) { var err error var offset int diff --git a/pkg/rpc/kitex_gen/palointernalservice/PaloInternalService.go b/pkg/rpc/kitex_gen/palointernalservice/PaloInternalService.go index c8e41926..b7f32e16 100644 --- a/pkg/rpc/kitex_gen/palointernalservice/PaloInternalService.go +++ b/pkg/rpc/kitex_gen/palointernalservice/PaloInternalService.go @@ -1740,6 +1740,13 @@ type TQueryOptions struct { EnableNoNeedReadDataOpt bool `thrift:"enable_no_need_read_data_opt,116,optional" frugal:"116,optional,bool" json:"enable_no_need_read_data_opt,omitempty"` ReadCsvEmptyLineAsNull bool `thrift:"read_csv_empty_line_as_null,117,optional" frugal:"117,optional,bool" json:"read_csv_empty_line_as_null,omitempty"` SerdeDialect TSerdeDialect `thrift:"serde_dialect,118,optional" frugal:"118,optional,TSerdeDialect" json:"serde_dialect,omitempty"` + EnableMatchWithoutInvertedIndex bool `thrift:"enable_match_without_inverted_index,119,optional" frugal:"119,optional,bool" json:"enable_match_without_inverted_index,omitempty"` + EnableFallbackOnMissingInvertedIndex bool `thrift:"enable_fallback_on_missing_inverted_index,120,optional" frugal:"120,optional,bool" json:"enable_fallback_on_missing_inverted_index,omitempty"` + KeepCarriageReturn bool `thrift:"keep_carriage_return,121,optional" frugal:"121,optional,bool" json:"keep_carriage_return,omitempty"` + RuntimeBloomFilterMinSize int32 `thrift:"runtime_bloom_filter_min_size,122,optional" frugal:"122,optional,i32" json:"runtime_bloom_filter_min_size,omitempty"` + HiveParquetUseColumnNames bool `thrift:"hive_parquet_use_column_names,123,optional" frugal:"123,optional,bool" json:"hive_parquet_use_column_names,omitempty"` + HiveOrcUseColumnNames bool `thrift:"hive_orc_use_column_names,124,optional" frugal:"124,optional,bool" json:"hive_orc_use_column_names,omitempty"` + EnableSegmentCache bool `thrift:"enable_segment_cache,125,optional" frugal:"125,optional,bool" json:"enable_segment_cache,omitempty"` DisableFileCache bool `thrift:"disable_file_cache,1000,optional" frugal:"1000,optional,bool" json:"disable_file_cache,omitempty"` } @@ -1844,6 +1851,13 @@ func NewTQueryOptions() *TQueryOptions { EnableNoNeedReadDataOpt: true, ReadCsvEmptyLineAsNull: false, SerdeDialect: TSerdeDialect_DORIS, + EnableMatchWithoutInvertedIndex: true, + EnableFallbackOnMissingInvertedIndex: true, + KeepCarriageReturn: false, + RuntimeBloomFilterMinSize: 1048576, + HiveParquetUseColumnNames: true, + HiveOrcUseColumnNames: true, + EnableSegmentCache: true, DisableFileCache: false, } } @@ -1947,6 +1961,13 @@ func (p *TQueryOptions) InitDefault() { p.EnableNoNeedReadDataOpt = true p.ReadCsvEmptyLineAsNull = false p.SerdeDialect = TSerdeDialect_DORIS + p.EnableMatchWithoutInvertedIndex = true + p.EnableFallbackOnMissingInvertedIndex = true + p.KeepCarriageReturn = false + p.RuntimeBloomFilterMinSize = 1048576 + p.HiveParquetUseColumnNames = true + p.HiveOrcUseColumnNames = true + p.EnableSegmentCache = true p.DisableFileCache = false } @@ -2931,6 +2952,69 @@ func (p *TQueryOptions) GetSerdeDialect() (v TSerdeDialect) { return p.SerdeDialect } +var TQueryOptions_EnableMatchWithoutInvertedIndex_DEFAULT bool = true + +func (p *TQueryOptions) GetEnableMatchWithoutInvertedIndex() (v bool) { + if !p.IsSetEnableMatchWithoutInvertedIndex() { + return TQueryOptions_EnableMatchWithoutInvertedIndex_DEFAULT + } + return p.EnableMatchWithoutInvertedIndex +} + +var TQueryOptions_EnableFallbackOnMissingInvertedIndex_DEFAULT bool = true + +func (p *TQueryOptions) GetEnableFallbackOnMissingInvertedIndex() (v bool) { + if !p.IsSetEnableFallbackOnMissingInvertedIndex() { + return TQueryOptions_EnableFallbackOnMissingInvertedIndex_DEFAULT + } + return p.EnableFallbackOnMissingInvertedIndex +} + +var TQueryOptions_KeepCarriageReturn_DEFAULT bool = false + +func (p *TQueryOptions) GetKeepCarriageReturn() (v bool) { + if !p.IsSetKeepCarriageReturn() { + return TQueryOptions_KeepCarriageReturn_DEFAULT + } + return p.KeepCarriageReturn +} + +var TQueryOptions_RuntimeBloomFilterMinSize_DEFAULT int32 = 1048576 + +func (p *TQueryOptions) GetRuntimeBloomFilterMinSize() (v int32) { + if !p.IsSetRuntimeBloomFilterMinSize() { + return TQueryOptions_RuntimeBloomFilterMinSize_DEFAULT + } + return p.RuntimeBloomFilterMinSize +} + +var TQueryOptions_HiveParquetUseColumnNames_DEFAULT bool = true + +func (p *TQueryOptions) GetHiveParquetUseColumnNames() (v bool) { + if !p.IsSetHiveParquetUseColumnNames() { + return TQueryOptions_HiveParquetUseColumnNames_DEFAULT + } + return p.HiveParquetUseColumnNames +} + +var TQueryOptions_HiveOrcUseColumnNames_DEFAULT bool = true + +func (p *TQueryOptions) GetHiveOrcUseColumnNames() (v bool) { + if !p.IsSetHiveOrcUseColumnNames() { + return TQueryOptions_HiveOrcUseColumnNames_DEFAULT + } + return p.HiveOrcUseColumnNames +} + +var TQueryOptions_EnableSegmentCache_DEFAULT bool = true + +func (p *TQueryOptions) GetEnableSegmentCache() (v bool) { + if !p.IsSetEnableSegmentCache() { + return TQueryOptions_EnableSegmentCache_DEFAULT + } + return p.EnableSegmentCache +} + var TQueryOptions_DisableFileCache_DEFAULT bool = false func (p *TQueryOptions) GetDisableFileCache() (v bool) { @@ -3266,6 +3350,27 @@ func (p *TQueryOptions) SetReadCsvEmptyLineAsNull(val bool) { func (p *TQueryOptions) SetSerdeDialect(val TSerdeDialect) { p.SerdeDialect = val } +func (p *TQueryOptions) SetEnableMatchWithoutInvertedIndex(val bool) { + p.EnableMatchWithoutInvertedIndex = val +} +func (p *TQueryOptions) SetEnableFallbackOnMissingInvertedIndex(val bool) { + p.EnableFallbackOnMissingInvertedIndex = val +} +func (p *TQueryOptions) SetKeepCarriageReturn(val bool) { + p.KeepCarriageReturn = val +} +func (p *TQueryOptions) SetRuntimeBloomFilterMinSize(val int32) { + p.RuntimeBloomFilterMinSize = val +} +func (p *TQueryOptions) SetHiveParquetUseColumnNames(val bool) { + p.HiveParquetUseColumnNames = val +} +func (p *TQueryOptions) SetHiveOrcUseColumnNames(val bool) { + p.HiveOrcUseColumnNames = val +} +func (p *TQueryOptions) SetEnableSegmentCache(val bool) { + p.EnableSegmentCache = val +} func (p *TQueryOptions) SetDisableFileCache(val bool) { p.DisableFileCache = val } @@ -3380,6 +3485,13 @@ var fieldIDToName_TQueryOptions = map[int16]string{ 116: "enable_no_need_read_data_opt", 117: "read_csv_empty_line_as_null", 118: "serde_dialect", + 119: "enable_match_without_inverted_index", + 120: "enable_fallback_on_missing_inverted_index", + 121: "keep_carriage_return", + 122: "runtime_bloom_filter_min_size", + 123: "hive_parquet_use_column_names", + 124: "hive_orc_use_column_names", + 125: "enable_segment_cache", 1000: "disable_file_cache", } @@ -3819,6 +3931,34 @@ func (p *TQueryOptions) IsSetSerdeDialect() bool { return p.SerdeDialect != TQueryOptions_SerdeDialect_DEFAULT } +func (p *TQueryOptions) IsSetEnableMatchWithoutInvertedIndex() bool { + return p.EnableMatchWithoutInvertedIndex != TQueryOptions_EnableMatchWithoutInvertedIndex_DEFAULT +} + +func (p *TQueryOptions) IsSetEnableFallbackOnMissingInvertedIndex() bool { + return p.EnableFallbackOnMissingInvertedIndex != TQueryOptions_EnableFallbackOnMissingInvertedIndex_DEFAULT +} + +func (p *TQueryOptions) IsSetKeepCarriageReturn() bool { + return p.KeepCarriageReturn != TQueryOptions_KeepCarriageReturn_DEFAULT +} + +func (p *TQueryOptions) IsSetRuntimeBloomFilterMinSize() bool { + return p.RuntimeBloomFilterMinSize != TQueryOptions_RuntimeBloomFilterMinSize_DEFAULT +} + +func (p *TQueryOptions) IsSetHiveParquetUseColumnNames() bool { + return p.HiveParquetUseColumnNames != TQueryOptions_HiveParquetUseColumnNames_DEFAULT +} + +func (p *TQueryOptions) IsSetHiveOrcUseColumnNames() bool { + return p.HiveOrcUseColumnNames != TQueryOptions_HiveOrcUseColumnNames_DEFAULT +} + +func (p *TQueryOptions) IsSetEnableSegmentCache() bool { + return p.EnableSegmentCache != TQueryOptions_EnableSegmentCache_DEFAULT +} + func (p *TQueryOptions) IsSetDisableFileCache() bool { return p.DisableFileCache != TQueryOptions_DisableFileCache_DEFAULT } @@ -4714,6 +4854,62 @@ func (p *TQueryOptions) Read(iprot thrift.TProtocol) (err error) { } else if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } + case 119: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField119(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 120: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField120(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 121: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField121(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 122: + if fieldTypeId == thrift.I32 { + if err = p.ReadField122(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 123: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField123(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 124: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField124(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 125: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField125(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } case 1000: if fieldTypeId == thrift.BOOL { if err = p.ReadField1000(iprot); err != nil { @@ -5947,6 +6143,83 @@ func (p *TQueryOptions) ReadField118(iprot thrift.TProtocol) error { p.SerdeDialect = _field return nil } +func (p *TQueryOptions) ReadField119(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableMatchWithoutInvertedIndex = _field + return nil +} +func (p *TQueryOptions) ReadField120(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableFallbackOnMissingInvertedIndex = _field + return nil +} +func (p *TQueryOptions) ReadField121(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.KeepCarriageReturn = _field + return nil +} +func (p *TQueryOptions) ReadField122(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.RuntimeBloomFilterMinSize = _field + return nil +} +func (p *TQueryOptions) ReadField123(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.HiveParquetUseColumnNames = _field + return nil +} +func (p *TQueryOptions) ReadField124(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.HiveOrcUseColumnNames = _field + return nil +} +func (p *TQueryOptions) ReadField125(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.EnableSegmentCache = _field + return nil +} func (p *TQueryOptions) ReadField1000(iprot thrift.TProtocol) error { var _field bool @@ -6401,6 +6674,34 @@ func (p *TQueryOptions) Write(oprot thrift.TProtocol) (err error) { fieldId = 118 goto WriteFieldError } + if err = p.writeField119(oprot); err != nil { + fieldId = 119 + goto WriteFieldError + } + if err = p.writeField120(oprot); err != nil { + fieldId = 120 + goto WriteFieldError + } + if err = p.writeField121(oprot); err != nil { + fieldId = 121 + goto WriteFieldError + } + if err = p.writeField122(oprot); err != nil { + fieldId = 122 + goto WriteFieldError + } + if err = p.writeField123(oprot); err != nil { + fieldId = 123 + goto WriteFieldError + } + if err = p.writeField124(oprot); err != nil { + fieldId = 124 + goto WriteFieldError + } + if err = p.writeField125(oprot); err != nil { + fieldId = 125 + goto WriteFieldError + } if err = p.writeField1000(oprot); err != nil { fieldId = 1000 goto WriteFieldError @@ -8494,6 +8795,139 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 118 end error: ", p), err) } +func (p *TQueryOptions) writeField119(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableMatchWithoutInvertedIndex() { + if err = oprot.WriteFieldBegin("enable_match_without_inverted_index", thrift.BOOL, 119); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.EnableMatchWithoutInvertedIndex); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 119 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 119 end error: ", p), err) +} + +func (p *TQueryOptions) writeField120(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableFallbackOnMissingInvertedIndex() { + if err = oprot.WriteFieldBegin("enable_fallback_on_missing_inverted_index", thrift.BOOL, 120); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.EnableFallbackOnMissingInvertedIndex); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 120 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 120 end error: ", p), err) +} + +func (p *TQueryOptions) writeField121(oprot thrift.TProtocol) (err error) { + if p.IsSetKeepCarriageReturn() { + if err = oprot.WriteFieldBegin("keep_carriage_return", thrift.BOOL, 121); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.KeepCarriageReturn); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 121 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 121 end error: ", p), err) +} + +func (p *TQueryOptions) writeField122(oprot thrift.TProtocol) (err error) { + if p.IsSetRuntimeBloomFilterMinSize() { + if err = oprot.WriteFieldBegin("runtime_bloom_filter_min_size", thrift.I32, 122); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.RuntimeBloomFilterMinSize); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 122 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 122 end error: ", p), err) +} + +func (p *TQueryOptions) writeField123(oprot thrift.TProtocol) (err error) { + if p.IsSetHiveParquetUseColumnNames() { + if err = oprot.WriteFieldBegin("hive_parquet_use_column_names", thrift.BOOL, 123); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.HiveParquetUseColumnNames); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 123 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 123 end error: ", p), err) +} + +func (p *TQueryOptions) writeField124(oprot thrift.TProtocol) (err error) { + if p.IsSetHiveOrcUseColumnNames() { + if err = oprot.WriteFieldBegin("hive_orc_use_column_names", thrift.BOOL, 124); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.HiveOrcUseColumnNames); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 124 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 124 end error: ", p), err) +} + +func (p *TQueryOptions) writeField125(oprot thrift.TProtocol) (err error) { + if p.IsSetEnableSegmentCache() { + if err = oprot.WriteFieldBegin("enable_segment_cache", thrift.BOOL, 125); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.EnableSegmentCache); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 125 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 125 end error: ", p), err) +} + func (p *TQueryOptions) writeField1000(oprot thrift.TProtocol) (err error) { if p.IsSetDisableFileCache() { if err = oprot.WriteFieldBegin("disable_file_cache", thrift.BOOL, 1000); err != nil { @@ -8854,6 +9288,27 @@ func (p *TQueryOptions) DeepEqual(ano *TQueryOptions) bool { if !p.Field118DeepEqual(ano.SerdeDialect) { return false } + if !p.Field119DeepEqual(ano.EnableMatchWithoutInvertedIndex) { + return false + } + if !p.Field120DeepEqual(ano.EnableFallbackOnMissingInvertedIndex) { + return false + } + if !p.Field121DeepEqual(ano.KeepCarriageReturn) { + return false + } + if !p.Field122DeepEqual(ano.RuntimeBloomFilterMinSize) { + return false + } + if !p.Field123DeepEqual(ano.HiveParquetUseColumnNames) { + return false + } + if !p.Field124DeepEqual(ano.HiveOrcUseColumnNames) { + return false + } + if !p.Field125DeepEqual(ano.EnableSegmentCache) { + return false + } if !p.Field1000DeepEqual(ano.DisableFileCache) { return false } @@ -9673,6 +10128,55 @@ func (p *TQueryOptions) Field118DeepEqual(src TSerdeDialect) bool { } return true } +func (p *TQueryOptions) Field119DeepEqual(src bool) bool { + + if p.EnableMatchWithoutInvertedIndex != src { + return false + } + return true +} +func (p *TQueryOptions) Field120DeepEqual(src bool) bool { + + if p.EnableFallbackOnMissingInvertedIndex != src { + return false + } + return true +} +func (p *TQueryOptions) Field121DeepEqual(src bool) bool { + + if p.KeepCarriageReturn != src { + return false + } + return true +} +func (p *TQueryOptions) Field122DeepEqual(src int32) bool { + + if p.RuntimeBloomFilterMinSize != src { + return false + } + return true +} +func (p *TQueryOptions) Field123DeepEqual(src bool) bool { + + if p.HiveParquetUseColumnNames != src { + return false + } + return true +} +func (p *TQueryOptions) Field124DeepEqual(src bool) bool { + + if p.HiveOrcUseColumnNames != src { + return false + } + return true +} +func (p *TQueryOptions) Field125DeepEqual(src bool) bool { + + if p.EnableSegmentCache != src { + return false + } + return true +} func (p *TQueryOptions) Field1000DeepEqual(src bool) bool { if p.DisableFileCache != src { diff --git a/pkg/rpc/kitex_gen/palointernalservice/k-PaloInternalService.go b/pkg/rpc/kitex_gen/palointernalservice/k-PaloInternalService.go index 282ead7c..78f01f3a 100644 --- a/pkg/rpc/kitex_gen/palointernalservice/k-PaloInternalService.go +++ b/pkg/rpc/kitex_gen/palointernalservice/k-PaloInternalService.go @@ -2663,6 +2663,104 @@ func (p *TQueryOptions) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 119: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField119(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 120: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField120(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 121: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField121(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 122: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField122(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 123: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField123(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 124: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField124(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 125: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField125(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } case 1000: if fieldTypeId == thrift.BOOL { l, err = p.FastReadField1000(buf[offset:]) @@ -4227,6 +4325,104 @@ func (p *TQueryOptions) FastReadField118(buf []byte) (int, error) { return offset, nil } +func (p *TQueryOptions) FastReadField119(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableMatchWithoutInvertedIndex = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField120(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableFallbackOnMissingInvertedIndex = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField121(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.KeepCarriageReturn = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField122(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.RuntimeBloomFilterMinSize = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField123(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.HiveParquetUseColumnNames = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField124(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.HiveOrcUseColumnNames = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField125(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.EnableSegmentCache = v + + } + return offset, nil +} + func (p *TQueryOptions) FastReadField1000(buf []byte) (int, error) { offset := 0 @@ -4354,6 +4550,13 @@ func (p *TQueryOptions) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryW offset += p.fastWriteField115(buf[offset:], binaryWriter) offset += p.fastWriteField116(buf[offset:], binaryWriter) offset += p.fastWriteField117(buf[offset:], binaryWriter) + offset += p.fastWriteField119(buf[offset:], binaryWriter) + offset += p.fastWriteField120(buf[offset:], binaryWriter) + offset += p.fastWriteField121(buf[offset:], binaryWriter) + offset += p.fastWriteField122(buf[offset:], binaryWriter) + offset += p.fastWriteField123(buf[offset:], binaryWriter) + offset += p.fastWriteField124(buf[offset:], binaryWriter) + offset += p.fastWriteField125(buf[offset:], binaryWriter) offset += p.fastWriteField1000(buf[offset:], binaryWriter) offset += p.fastWriteField18(buf[offset:], binaryWriter) offset += p.fastWriteField42(buf[offset:], binaryWriter) @@ -4479,6 +4682,13 @@ func (p *TQueryOptions) BLength() int { l += p.field116Length() l += p.field117Length() l += p.field118Length() + l += p.field119Length() + l += p.field120Length() + l += p.field121Length() + l += p.field122Length() + l += p.field123Length() + l += p.field124Length() + l += p.field125Length() l += p.field1000Length() } l += bthrift.Binary.FieldStopLength() @@ -5684,6 +5894,83 @@ func (p *TQueryOptions) fastWriteField118(buf []byte, binaryWriter bthrift.Binar return offset } +func (p *TQueryOptions) fastWriteField119(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetEnableMatchWithoutInvertedIndex() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_match_without_inverted_index", thrift.BOOL, 119) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableMatchWithoutInvertedIndex) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryOptions) fastWriteField120(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetEnableFallbackOnMissingInvertedIndex() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_fallback_on_missing_inverted_index", thrift.BOOL, 120) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableFallbackOnMissingInvertedIndex) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryOptions) fastWriteField121(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetKeepCarriageReturn() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "keep_carriage_return", thrift.BOOL, 121) + offset += bthrift.Binary.WriteBool(buf[offset:], p.KeepCarriageReturn) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryOptions) fastWriteField122(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetRuntimeBloomFilterMinSize() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "runtime_bloom_filter_min_size", thrift.I32, 122) + offset += bthrift.Binary.WriteI32(buf[offset:], p.RuntimeBloomFilterMinSize) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryOptions) fastWriteField123(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetHiveParquetUseColumnNames() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "hive_parquet_use_column_names", thrift.BOOL, 123) + offset += bthrift.Binary.WriteBool(buf[offset:], p.HiveParquetUseColumnNames) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryOptions) fastWriteField124(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetHiveOrcUseColumnNames() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "hive_orc_use_column_names", thrift.BOOL, 124) + offset += bthrift.Binary.WriteBool(buf[offset:], p.HiveOrcUseColumnNames) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryOptions) fastWriteField125(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetEnableSegmentCache() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_segment_cache", thrift.BOOL, 125) + offset += bthrift.Binary.WriteBool(buf[offset:], p.EnableSegmentCache) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TQueryOptions) fastWriteField1000(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetDisableFileCache() { @@ -6893,6 +7180,83 @@ func (p *TQueryOptions) field118Length() int { return l } +func (p *TQueryOptions) field119Length() int { + l := 0 + if p.IsSetEnableMatchWithoutInvertedIndex() { + l += bthrift.Binary.FieldBeginLength("enable_match_without_inverted_index", thrift.BOOL, 119) + l += bthrift.Binary.BoolLength(p.EnableMatchWithoutInvertedIndex) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryOptions) field120Length() int { + l := 0 + if p.IsSetEnableFallbackOnMissingInvertedIndex() { + l += bthrift.Binary.FieldBeginLength("enable_fallback_on_missing_inverted_index", thrift.BOOL, 120) + l += bthrift.Binary.BoolLength(p.EnableFallbackOnMissingInvertedIndex) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryOptions) field121Length() int { + l := 0 + if p.IsSetKeepCarriageReturn() { + l += bthrift.Binary.FieldBeginLength("keep_carriage_return", thrift.BOOL, 121) + l += bthrift.Binary.BoolLength(p.KeepCarriageReturn) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryOptions) field122Length() int { + l := 0 + if p.IsSetRuntimeBloomFilterMinSize() { + l += bthrift.Binary.FieldBeginLength("runtime_bloom_filter_min_size", thrift.I32, 122) + l += bthrift.Binary.I32Length(p.RuntimeBloomFilterMinSize) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryOptions) field123Length() int { + l := 0 + if p.IsSetHiveParquetUseColumnNames() { + l += bthrift.Binary.FieldBeginLength("hive_parquet_use_column_names", thrift.BOOL, 123) + l += bthrift.Binary.BoolLength(p.HiveParquetUseColumnNames) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryOptions) field124Length() int { + l := 0 + if p.IsSetHiveOrcUseColumnNames() { + l += bthrift.Binary.FieldBeginLength("hive_orc_use_column_names", thrift.BOOL, 124) + l += bthrift.Binary.BoolLength(p.HiveOrcUseColumnNames) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryOptions) field125Length() int { + l := 0 + if p.IsSetEnableSegmentCache() { + l += bthrift.Binary.FieldBeginLength("enable_segment_cache", thrift.BOOL, 125) + l += bthrift.Binary.BoolLength(p.EnableSegmentCache) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TQueryOptions) field1000Length() int { l := 0 if p.IsSetDisableFileCache() { diff --git a/pkg/rpc/kitex_gen/plannodes/PlanNodes.go b/pkg/rpc/kitex_gen/plannodes/PlanNodes.go index fa36d09e..ea0ff85f 100644 --- a/pkg/rpc/kitex_gen/plannodes/PlanNodes.go +++ b/pkg/rpc/kitex_gen/plannodes/PlanNodes.go @@ -26873,19 +26873,20 @@ func (p *TCsvScanNode) Field10DeepEqual(src map[string]*TMiniLoadEtlFunction) bo } type TSchemaScanNode struct { - TupleId types.TTupleId `thrift:"tuple_id,1,required" frugal:"1,required,i32" json:"tuple_id"` - TableName string `thrift:"table_name,2,required" frugal:"2,required,string" json:"table_name"` - Db *string `thrift:"db,3,optional" frugal:"3,optional,string" json:"db,omitempty"` - Table *string `thrift:"table,4,optional" frugal:"4,optional,string" json:"table,omitempty"` - Wild *string `thrift:"wild,5,optional" frugal:"5,optional,string" json:"wild,omitempty"` - User *string `thrift:"user,6,optional" frugal:"6,optional,string" json:"user,omitempty"` - Ip *string `thrift:"ip,7,optional" frugal:"7,optional,string" json:"ip,omitempty"` - Port *int32 `thrift:"port,8,optional" frugal:"8,optional,i32" json:"port,omitempty"` - ThreadId *int64 `thrift:"thread_id,9,optional" frugal:"9,optional,i64" json:"thread_id,omitempty"` - UserIp *string `thrift:"user_ip,10,optional" frugal:"10,optional,string" json:"user_ip,omitempty"` - CurrentUserIdent *types.TUserIdentity `thrift:"current_user_ident,11,optional" frugal:"11,optional,types.TUserIdentity" json:"current_user_ident,omitempty"` - ShowHiddenCloumns bool `thrift:"show_hidden_cloumns,12,optional" frugal:"12,optional,bool" json:"show_hidden_cloumns,omitempty"` - Catalog *string `thrift:"catalog,14,optional" frugal:"14,optional,string" json:"catalog,omitempty"` + TupleId types.TTupleId `thrift:"tuple_id,1,required" frugal:"1,required,i32" json:"tuple_id"` + TableName string `thrift:"table_name,2,required" frugal:"2,required,string" json:"table_name"` + Db *string `thrift:"db,3,optional" frugal:"3,optional,string" json:"db,omitempty"` + Table *string `thrift:"table,4,optional" frugal:"4,optional,string" json:"table,omitempty"` + Wild *string `thrift:"wild,5,optional" frugal:"5,optional,string" json:"wild,omitempty"` + User *string `thrift:"user,6,optional" frugal:"6,optional,string" json:"user,omitempty"` + Ip *string `thrift:"ip,7,optional" frugal:"7,optional,string" json:"ip,omitempty"` + Port *int32 `thrift:"port,8,optional" frugal:"8,optional,i32" json:"port,omitempty"` + ThreadId *int64 `thrift:"thread_id,9,optional" frugal:"9,optional,i64" json:"thread_id,omitempty"` + UserIp *string `thrift:"user_ip,10,optional" frugal:"10,optional,string" json:"user_ip,omitempty"` + CurrentUserIdent *types.TUserIdentity `thrift:"current_user_ident,11,optional" frugal:"11,optional,types.TUserIdentity" json:"current_user_ident,omitempty"` + ShowHiddenCloumns bool `thrift:"show_hidden_cloumns,12,optional" frugal:"12,optional,bool" json:"show_hidden_cloumns,omitempty"` + Catalog *string `thrift:"catalog,14,optional" frugal:"14,optional,string" json:"catalog,omitempty"` + FeAddrList []*types.TNetworkAddress `thrift:"fe_addr_list,15,optional" frugal:"15,optional,list" json:"fe_addr_list,omitempty"` } func NewTSchemaScanNode() *TSchemaScanNode { @@ -27005,6 +27006,15 @@ func (p *TSchemaScanNode) GetCatalog() (v string) { } return *p.Catalog } + +var TSchemaScanNode_FeAddrList_DEFAULT []*types.TNetworkAddress + +func (p *TSchemaScanNode) GetFeAddrList() (v []*types.TNetworkAddress) { + if !p.IsSetFeAddrList() { + return TSchemaScanNode_FeAddrList_DEFAULT + } + return p.FeAddrList +} func (p *TSchemaScanNode) SetTupleId(val types.TTupleId) { p.TupleId = val } @@ -27044,6 +27054,9 @@ func (p *TSchemaScanNode) SetShowHiddenCloumns(val bool) { func (p *TSchemaScanNode) SetCatalog(val *string) { p.Catalog = val } +func (p *TSchemaScanNode) SetFeAddrList(val []*types.TNetworkAddress) { + p.FeAddrList = val +} var fieldIDToName_TSchemaScanNode = map[int16]string{ 1: "tuple_id", @@ -27059,6 +27072,7 @@ var fieldIDToName_TSchemaScanNode = map[int16]string{ 11: "current_user_ident", 12: "show_hidden_cloumns", 14: "catalog", + 15: "fe_addr_list", } func (p *TSchemaScanNode) IsSetDb() bool { @@ -27105,6 +27119,10 @@ func (p *TSchemaScanNode) IsSetCatalog() bool { return p.Catalog != nil } +func (p *TSchemaScanNode) IsSetFeAddrList() bool { + return p.FeAddrList != nil +} + func (p *TSchemaScanNode) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -27232,6 +27250,14 @@ func (p *TSchemaScanNode) Read(iprot thrift.TProtocol) (err error) { } else if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } + case 15: + if fieldTypeId == thrift.LIST { + if err = p.ReadField15(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError @@ -27412,6 +27438,29 @@ func (p *TSchemaScanNode) ReadField14(iprot thrift.TProtocol) error { p.Catalog = _field return nil } +func (p *TSchemaScanNode) ReadField15(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*types.TNetworkAddress, 0, size) + values := make([]types.TNetworkAddress, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.FeAddrList = _field + return nil +} func (p *TSchemaScanNode) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 @@ -27471,6 +27520,10 @@ func (p *TSchemaScanNode) Write(oprot thrift.TProtocol) (err error) { fieldId = 14 goto WriteFieldError } + if err = p.writeField15(oprot); err != nil { + fieldId = 15 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -27732,6 +27785,33 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 14 end error: ", p), err) } +func (p *TSchemaScanNode) writeField15(oprot thrift.TProtocol) (err error) { + if p.IsSetFeAddrList() { + if err = oprot.WriteFieldBegin("fe_addr_list", thrift.LIST, 15); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.FeAddrList)); err != nil { + return err + } + for _, v := range p.FeAddrList { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 15 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 15 end error: ", p), err) +} + func (p *TSchemaScanNode) String() string { if p == nil { return "" @@ -27785,6 +27865,9 @@ func (p *TSchemaScanNode) DeepEqual(ano *TSchemaScanNode) bool { if !p.Field14DeepEqual(ano.Catalog) { return false } + if !p.Field15DeepEqual(ano.FeAddrList) { + return false + } return true } @@ -27924,6 +28007,19 @@ func (p *TSchemaScanNode) Field14DeepEqual(src *string) bool { } return true } +func (p *TSchemaScanNode) Field15DeepEqual(src []*types.TNetworkAddress) bool { + + if len(p.FeAddrList) != len(src) { + return false + } + for i, v := range p.FeAddrList { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} type TMetaScanNode struct { TupleId types.TTupleId `thrift:"tuple_id,1,required" frugal:"1,required,i32" json:"tuple_id"` diff --git a/pkg/rpc/kitex_gen/plannodes/k-PlanNodes.go b/pkg/rpc/kitex_gen/plannodes/k-PlanNodes.go index f0ec181a..e8ce3923 100644 --- a/pkg/rpc/kitex_gen/plannodes/k-PlanNodes.go +++ b/pkg/rpc/kitex_gen/plannodes/k-PlanNodes.go @@ -19462,6 +19462,20 @@ func (p *TSchemaScanNode) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 15: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField15(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -19680,6 +19694,33 @@ func (p *TSchemaScanNode) FastReadField14(buf []byte) (int, error) { return offset, nil } +func (p *TSchemaScanNode) FastReadField15(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.FeAddrList = make([]*types.TNetworkAddress, 0, size) + for i := 0; i < size; i++ { + _elem := types.NewTNetworkAddress() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.FeAddrList = append(p.FeAddrList, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + // for compatibility func (p *TSchemaScanNode) FastWrite(buf []byte) int { return 0 @@ -19702,6 +19743,7 @@ func (p *TSchemaScanNode) FastWriteNocopy(buf []byte, binaryWriter bthrift.Binar offset += p.fastWriteField10(buf[offset:], binaryWriter) offset += p.fastWriteField11(buf[offset:], binaryWriter) offset += p.fastWriteField14(buf[offset:], binaryWriter) + offset += p.fastWriteField15(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -19725,6 +19767,7 @@ func (p *TSchemaScanNode) BLength() int { l += p.field11Length() l += p.field12Length() l += p.field14Length() + l += p.field15Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -19869,6 +19912,24 @@ func (p *TSchemaScanNode) fastWriteField14(buf []byte, binaryWriter bthrift.Bina return offset } +func (p *TSchemaScanNode) fastWriteField15(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFeAddrList() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fe_addr_list", thrift.LIST, 15) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.FeAddrList { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TSchemaScanNode) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("tuple_id", thrift.I32, 1) @@ -20007,6 +20068,20 @@ func (p *TSchemaScanNode) field14Length() int { return l } +func (p *TSchemaScanNode) field15Length() int { + l := 0 + if p.IsSetFeAddrList() { + l += bthrift.Binary.FieldBeginLength("fe_addr_list", thrift.LIST, 15) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.FeAddrList)) + for _, v := range p.FeAddrList { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TMetaScanNode) FastRead(buf []byte) (int, error) { var err error var offset int diff --git a/pkg/rpc/thrift/AgentService.thrift b/pkg/rpc/thrift/AgentService.thrift index 8d24e64c..767ede90 100644 --- a/pkg/rpc/thrift/AgentService.thrift +++ b/pkg/rpc/thrift/AgentService.thrift @@ -47,7 +47,7 @@ struct TTabletSchema { 19: optional list cluster_key_idxes // col unique id for row store column 20: optional list row_store_col_cids - 21: optional i64 row_store_page_size = 16384; + 21: optional i64 row_store_page_size = 16384 } // this enum stands for different storage format in src_backends diff --git a/pkg/rpc/thrift/BackendService.thrift b/pkg/rpc/thrift/BackendService.thrift index 26cf411f..1e52d94f 100644 --- a/pkg/rpc/thrift/BackendService.thrift +++ b/pkg/rpc/thrift/BackendService.thrift @@ -266,6 +266,9 @@ struct TWorkloadGroupInfo { 11: optional i32 min_remote_scan_thread_num 12: optional i32 spill_threshold_low_watermark 13: optional i32 spill_threshold_high_watermark + 14: optional i64 read_bytes_per_second + 15: optional i64 remote_read_bytes_per_second + 16: optional string tag } enum TWorkloadMetricType { diff --git a/pkg/rpc/thrift/Descriptors.thrift b/pkg/rpc/thrift/Descriptors.thrift index cb844c93..20042adc 100644 --- a/pkg/rpc/thrift/Descriptors.thrift +++ b/pkg/rpc/thrift/Descriptors.thrift @@ -133,7 +133,8 @@ enum TSchemaTableType { SCH_USER, SCH_PROCS_PRIV, SCH_WORKLOAD_POLICY, - SCH_TABLE_OPTIONS; + SCH_TABLE_OPTIONS, + SCH_WORKLOAD_GROUP_PRIVILEGES; } enum THdfsCompression { diff --git a/pkg/rpc/thrift/FrontendService.thrift b/pkg/rpc/thrift/FrontendService.thrift index ecade162..569a2211 100644 --- a/pkg/rpc/thrift/FrontendService.thrift +++ b/pkg/rpc/thrift/FrontendService.thrift @@ -105,7 +105,7 @@ struct TShowVariableRequest { // Results of a call to describeTable() struct TShowVariableResult { - 1: required map variables + 1: required list> variables } // Valid table file formats @@ -540,6 +540,16 @@ struct TTxnLoadInfo { 6: optional list subTxnInfos } +struct TGroupCommitInfo{ + 1: optional bool getGroupCommitLoadBeId + 2: optional i64 groupCommitLoadTableId + 3: optional string cluster + 4: optional bool isCloud + 5: optional bool updateLoadData + 6: optional i64 tableId + 7: optional i64 receiveData +} + struct TMasterOpRequest { 1: required string user 2: required string db @@ -573,6 +583,7 @@ struct TMasterOpRequest { 28: optional map user_variables // transaction load 29: optional TTxnLoadInfo txnLoadInfo + 30: optional TGroupCommitInfo groupCommitInfo // selectdb cloud 1000: optional string cloud_cluster @@ -606,6 +617,7 @@ struct TMasterOpResult { 8: optional list queryResultBufList; // transaction load 9: optional TTxnLoadInfo txnLoadInfo; + 10: optional i64 groupCommitLoadBeId; } struct TUpdateExportTaskStatusRequest { @@ -817,6 +829,9 @@ struct TLoadTxnCommitRequest { 15: optional list tbls 16: optional i64 table_id 17: optional string auth_code_uuid + 18: optional bool groupCommit + 19: optional i64 receiveBytes + 20: optional i64 backendId } struct TLoadTxnCommitResult { @@ -987,6 +1002,7 @@ enum TSchemaTableName { ROUTINES_INFO = 4, // db information_schema's table WORKLOAD_SCHEDULE_POLICY = 5, TABLE_OPTIONS = 6, + WORKLOAD_GROUP_PRIVILEGES = 7, } struct TMetadataTableRequestParams { @@ -1228,6 +1244,7 @@ struct TRestoreSnapshotRequest { 10: optional map properties 11: optional binary meta 12: optional binary job_info + 13: optional bool clean_restore } struct TRestoreSnapshotResult { diff --git a/pkg/rpc/thrift/PaloInternalService.thrift b/pkg/rpc/thrift/PaloInternalService.thrift index 0e0a87ea..a75e06f3 100644 --- a/pkg/rpc/thrift/PaloInternalService.thrift +++ b/pkg/rpc/thrift/PaloInternalService.thrift @@ -193,7 +193,7 @@ struct TQueryOptions { // non-pipelinex engine removed. always true. 57: optional bool enable_pipeline_engine = true - 58: optional i32 repeat_max_num = 0 + 58: optional i32 repeat_max_num = 0 // Deprecated 59: optional i64 external_sort_bytes_threshold = 0 @@ -308,7 +308,7 @@ struct TQueryOptions { 113: optional bool enable_local_merge_sort = false; 114: optional bool enable_parallel_result_sink = false; - + 115: optional bool enable_short_circuit_query_access_column_store = false; 116: optional bool enable_no_need_read_data_opt = true; @@ -316,7 +316,25 @@ struct TQueryOptions { 117: optional bool read_csv_empty_line_as_null = false; 118: optional TSerdeDialect serde_dialect = TSerdeDialect.DORIS; + + 119: optional bool enable_match_without_inverted_index = true; + + 120: optional bool enable_fallback_on_missing_inverted_index = true; + + 121: optional bool keep_carriage_return = false; // \n,\r\n split line in CSV. + + 122: optional i32 runtime_bloom_filter_min_size = 1048576; + + //Access Parquet/ORC columns by name by default. Set this property to `false` to access columns + //by their ordinal position in the Hive table definition. + 123: optional bool hive_parquet_use_column_names = true; + 124: optional bool hive_orc_use_column_names = true; + + 125: optional bool enable_segment_cache = true; + // For cloud, to control if the content would be written into file cache + // In write path, to control if the content would be written into file cache. + // In read path, read from file cache or remote storage when execute query. 1000: optional bool disable_file_cache = false } diff --git a/pkg/rpc/thrift/PlanNodes.thrift b/pkg/rpc/thrift/PlanNodes.thrift index cdc5e49d..26d7983a 100644 --- a/pkg/rpc/thrift/PlanNodes.thrift +++ b/pkg/rpc/thrift/PlanNodes.thrift @@ -698,6 +698,7 @@ struct TSchemaScanNode { 12: optional bool show_hidden_cloumns = false // 13: optional list table_structure // deprecated 14: optional string catalog + 15: optional list fe_addr_list } struct TMetaScanNode { diff --git a/regression-test/suites/db-sync-clean-restore/test_db_sync_clean_restore.groovy b/regression-test/suites/db-sync-clean-restore/test_db_sync_clean_restore.groovy new file mode 100644 index 00000000..ee952c04 --- /dev/null +++ b/regression-test/suites/db-sync-clean-restore/test_db_sync_clean_restore.groovy @@ -0,0 +1,261 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_db_sync_clean_restore") { + + def tableName = "tbl_db_sync_clean_restore_" + UUID.randomUUID().toString().replace("-", "") + def syncerAddress = "127.0.0.1:9190" + def test_num = 0 + def insert_num = 20 + def sync_gap_time = 5000 + def opPartitonName = "less" + String response + + def checkSelectTimesOf = { sqlString, rowSize, times -> Boolean + def tmpRes = target_sql "${sqlString}" + while (tmpRes.size() != rowSize) { + sleep(sync_gap_time) + if (--times > 0) { + tmpRes = target_sql "${sqlString}" + } else { + break + } + } + return tmpRes.size() == rowSize + } + + def checkShowTimesOf = { sqlString, myClosure, times, func = "sql" -> Boolean + Boolean ret = false + List> res + while (times > 0) { + try { + if (func == "sql") { + res = sql "${sqlString}" + } else { + res = target_sql "${sqlString}" + } + if (myClosure.call(res)) { + ret = true + } + } catch (Exception e) { } + + if (ret) { + break + } else if (--times > 0) { + sleep(sync_gap_time) + } + } + + return ret + } + + def checkRestoreFinishTimesOf = { checkTable, times -> Boolean + Boolean ret = false + while (times > 0) { + def sqlInfo = target_sql "SHOW RESTORE FROM TEST_${context.dbName}" + for (List row : sqlInfo) { + if ((row[10] as String).contains(checkTable)) { + ret = (row[4] as String) == "FINISHED" + } + } + + if (ret) { + break + } else if (--times > 0) { + sleep(sync_gap_time) + } + } + + return ret + } + + def exist = { res -> Boolean + return res.size() != 0 + } + def notExist = { res -> Boolean + return res.size() == 0 + } + + sql "ALTER DATABASE ${context.dbName} SET properties (\"binlog.enable\" = \"true\")" + + sql """ + CREATE TABLE if NOT EXISTS ${tableName}_1 + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_0` VALUES LESS THAN ("0"), + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("10"), + PARTITION `${opPartitonName}_2` VALUES LESS THAN ("20") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + target_sql """ + CREATE TABLE if NOT EXISTS ${tableName}_1 + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_0` VALUES LESS THAN ("0"), + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("10"), + PARTITION `${opPartitonName}_2` VALUES LESS THAN ("20") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ) + """ + + sql """ + CREATE TABLE if NOT EXISTS ${tableName}_2 + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_0` VALUES LESS THAN ("0"), + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("10"), + PARTITION `${opPartitonName}_2` VALUES LESS THAN ("20") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + target_sql """ + CREATE TABLE if NOT EXISTS ${tableName}_2 + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_0` VALUES LESS THAN ("0"), + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("10"), + PARTITION `${opPartitonName}_2` VALUES LESS THAN ("20") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ) + """ + + sql """ + CREATE TABLE if NOT EXISTS ${tableName}_3 + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_0` VALUES LESS THAN ("0"), + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("10"), + PARTITION `${opPartitonName}_2` VALUES LESS THAN ("20") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + target_sql """ + CREATE TABLE if NOT EXISTS ${tableName}_3 + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + PARTITION BY RANGE(`id`) + ( + PARTITION `${opPartitonName}_0` VALUES LESS THAN ("0"), + PARTITION `${opPartitonName}_1` VALUES LESS THAN ("10"), + PARTITION `${opPartitonName}_2` VALUES LESS THAN ("20") + ) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ) + """ + + List values = [] + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index})") + } + + sql """ INSERT INTO ${tableName}_1 VALUES ${values.join(",")} """ + sql """ INSERT INTO ${tableName}_2 VALUES ${values.join(",")} """ + sql """ INSERT INTO ${tableName}_3 VALUES ${values.join(",")} """ + target_sql """ INSERT INTO ${tableName}_1 VALUES ${values.join(",")} """ + target_sql """ INSERT INTO ${tableName}_2 VALUES ${values.join(",")} """ + target_sql """ INSERT INTO ${tableName}_3 VALUES ${values.join(",")} """ + + def v = target_sql "SELECT * FROM ${tableName}_1" + assertEquals(v.size(), insert_num); + v = target_sql "SELECT * FROM ${tableName}_2" + assertEquals(v.size(), insert_num); + v = target_sql "SELECT * FROM ${tableName}_3" + assertEquals(v.size(), insert_num); + + sql "DROP TABLE ${tableName}_1 FORCE" + sql "ALTER TABLE ${tableName}_2 DROP PARTITION ${opPartitonName}_0 FORCE" + sql "ALTER TABLE ${tableName}_2 DROP PARTITION ${opPartitonName}_1 FORCE" + sql "sync" + + httpTest { + uri "/create_ccr" + endpoint syncerAddress + def bodyJson = get_ccr_body "" + body "${bodyJson}" + op "post" + result response + } + + assertTrue(checkRestoreFinishTimesOf("${tableName}_3", 60)) + + v = target_sql "SELECT * FROM ${tableName}_3" + assertTrue(v.size() == insert_num); + v = target_sql "SELECT * FROM ${tableName}_2" + assertTrue(v.size() == (insert_num-10)); + v = target_sql """ SHOW TABLES LIKE "${tableName}_1" """ + assertTrue(v.size() == 0); +} + +