From 6b52ce7c722da047e966bf14b0d58ae9fbba837b Mon Sep 17 00:00:00 2001 From: walter Date: Wed, 25 Sep 2024 14:08:34 +0800 Subject: [PATCH] Filter dropped indexes to avoid META NOT FOUND (#185) --- pkg/ccr/ingest_binlog_job.go | 8 + pkg/ccr/meta.go | 4 + pkg/ccr/metaer.go | 1 + pkg/ccr/thrift_meta.go | 12 + pkg/rpc/kitex_gen/datasinks/DataSinks.go | 96 ++ pkg/rpc/kitex_gen/datasinks/k-DataSinks.go | 75 + pkg/rpc/kitex_gen/descriptors/Descriptors.go | 166 +- .../kitex_gen/descriptors/k-Descriptors.go | 102 ++ .../frontendservice/FrontendService.go | 96 ++ .../frontendservice/k-FrontendService.go | 78 + .../heartbeatservice/HeartbeatService.go | 186 ++- .../heartbeatservice/k-HeartbeatService.go | 102 ++ .../PaloInternalService.go | 1397 ++++++++++++++++- .../k-PaloInternalService.go | 1035 +++++++++++- pkg/rpc/kitex_gen/plannodes/PlanNodes.go | 152 +- pkg/rpc/kitex_gen/plannodes/k-PlanNodes.go | 102 ++ pkg/rpc/thrift/DataSinks.thrift | 1 + pkg/rpc/thrift/Descriptors.thrift | 10 +- pkg/rpc/thrift/FrontendService.thrift | 1 + pkg/rpc/thrift/HeartbeatService.thrift | 2 + pkg/rpc/thrift/PaloInternalService.thrift | 21 +- pkg/rpc/thrift/PlanNodes.thrift | 5 +- regression-test/common/helper.groovy | 29 +- .../test_filter_dropped_indexes.groovy | 120 ++ 24 files changed, 3626 insertions(+), 175 deletions(-) create mode 100644 regression-test/suites/table-schema-change/test_filter_dropped_indexes.groovy diff --git a/pkg/ccr/ingest_binlog_job.go b/pkg/ccr/ingest_binlog_job.go index 4f47776d..3206b710 100644 --- a/pkg/ccr/ingest_binlog_job.go +++ b/pkg/ccr/ingest_binlog_job.go @@ -378,6 +378,9 @@ func (j *IngestBinlogJob) preparePartition(srcTableId, destTableId int64, partit } for _, indexId := range indexIds { + if j.srcMeta.IsIndexDropped(indexId) { + continue + } srcIndexMeta, ok := srcIndexIdMap[indexId] if !ok { j.setError(xerror.Errorf(xerror.Meta, "index id %v not found in src meta", indexId)) @@ -400,6 +403,11 @@ func (j *IngestBinlogJob) preparePartition(srcTableId, destTableId int64, partit destPartitionId: destPartitionId, } for _, indexId := range indexIds { + if j.srcMeta.IsIndexDropped(indexId) { + log.Infof("skip the dropped index %d", indexId) + continue + } + srcIndexMeta := srcIndexIdMap[indexId] destIndexMeta := destIndexNameMap[getSrcIndexName(job, srcIndexMeta)] prepareIndexArg.srcIndexMeta = srcIndexMeta diff --git a/pkg/ccr/meta.go b/pkg/ccr/meta.go index e6fbd9ef..47dea723 100644 --- a/pkg/ccr/meta.go +++ b/pkg/ccr/meta.go @@ -1177,3 +1177,7 @@ func (m *Meta) IsPartitionDropped(partitionId int64) bool { func (m *Meta) IsTableDropped(partitionId int64) bool { panic("IsTableDropped is not supported, please use ThriftMeta instead") } + +func (m *Meta) IsIndexDropped(indexId int64) bool { + panic("IsIndexDropped is not supported, please use ThriftMeta instead") +} diff --git a/pkg/ccr/metaer.go b/pkg/ccr/metaer.go index d40f9f23..5ca5c2f2 100644 --- a/pkg/ccr/metaer.go +++ b/pkg/ccr/metaer.go @@ -77,6 +77,7 @@ type IngestBinlogMetaer interface { GetBackendMap() (map[int64]*base.Backend, error) IsPartitionDropped(partitionId int64) bool IsTableDropped(tableId int64) bool + IsIndexDropped(indexId int64) bool } type Metaer interface { diff --git a/pkg/ccr/thrift_meta.go b/pkg/ccr/thrift_meta.go index e9be9927..e7a14c8a 100644 --- a/pkg/ccr/thrift_meta.go +++ b/pkg/ccr/thrift_meta.go @@ -137,11 +137,16 @@ func NewThriftMeta(spec *base.Spec, rpcFactory rpc.IRpcFactory, tableIds []int64 for _, table := range dbMeta.GetDroppedTables() { droppedTables[table] = struct{}{} } + droppedIndexes := make(map[int64]struct{}) + for _, index := range dbMeta.GetDroppedIndexes() { + droppedIndexes[index] = struct{}{} + } return &ThriftMeta{ meta: meta, droppedPartitions: droppedPartitions, droppedTables: droppedTables, + droppedIndexes: droppedIndexes, }, nil } @@ -149,6 +154,7 @@ type ThriftMeta struct { meta *Meta droppedPartitions map[int64]struct{} droppedTables map[int64]struct{} + droppedIndexes map[int64]struct{} } func (tm *ThriftMeta) GetTablets(tableId, partitionId, indexId int64) (*btree.Map[int64, *TabletMeta], error) { @@ -246,3 +252,9 @@ func (tm *ThriftMeta) IsTableDropped(tableId int64) bool { _, ok := tm.droppedTables[tableId] return ok } + +// Whether the target index are dropped +func (tm *ThriftMeta) IsIndexDropped(tableId int64) bool { + _, ok := tm.droppedIndexes[tableId] + return ok +} diff --git a/pkg/rpc/kitex_gen/datasinks/DataSinks.go b/pkg/rpc/kitex_gen/datasinks/DataSinks.go index 882412d0..3b8244c7 100644 --- a/pkg/rpc/kitex_gen/datasinks/DataSinks.go +++ b/pkg/rpc/kitex_gen/datasinks/DataSinks.go @@ -3269,6 +3269,7 @@ type TDataStreamSink struct { TabletSinkLocation *descriptors.TOlapTableLocationParam `thrift:"tablet_sink_location,10,optional" frugal:"10,optional,descriptors.TOlapTableLocationParam" json:"tablet_sink_location,omitempty"` TabletSinkTxnId *int64 `thrift:"tablet_sink_txn_id,11,optional" frugal:"11,optional,i64" json:"tablet_sink_txn_id,omitempty"` TabletSinkTupleId *types.TTupleId `thrift:"tablet_sink_tuple_id,12,optional" frugal:"12,optional,i32" json:"tablet_sink_tuple_id,omitempty"` + TabletSinkExprs []*exprs.TExpr `thrift:"tablet_sink_exprs,13,optional" frugal:"13,optional,list" json:"tablet_sink_exprs,omitempty"` } func NewTDataStreamSink() *TDataStreamSink { @@ -3380,6 +3381,15 @@ func (p *TDataStreamSink) GetTabletSinkTupleId() (v types.TTupleId) { } return *p.TabletSinkTupleId } + +var TDataStreamSink_TabletSinkExprs_DEFAULT []*exprs.TExpr + +func (p *TDataStreamSink) GetTabletSinkExprs() (v []*exprs.TExpr) { + if !p.IsSetTabletSinkExprs() { + return TDataStreamSink_TabletSinkExprs_DEFAULT + } + return p.TabletSinkExprs +} func (p *TDataStreamSink) SetDestNodeId(val types.TPlanNodeId) { p.DestNodeId = val } @@ -3416,6 +3426,9 @@ func (p *TDataStreamSink) SetTabletSinkTxnId(val *int64) { func (p *TDataStreamSink) SetTabletSinkTupleId(val *types.TTupleId) { p.TabletSinkTupleId = val } +func (p *TDataStreamSink) SetTabletSinkExprs(val []*exprs.TExpr) { + p.TabletSinkExprs = val +} var fieldIDToName_TDataStreamSink = map[int16]string{ 1: "dest_node_id", @@ -3430,6 +3443,7 @@ var fieldIDToName_TDataStreamSink = map[int16]string{ 10: "tablet_sink_location", 11: "tablet_sink_txn_id", 12: "tablet_sink_tuple_id", + 13: "tablet_sink_exprs", } func (p *TDataStreamSink) IsSetOutputPartition() bool { @@ -3476,6 +3490,10 @@ func (p *TDataStreamSink) IsSetTabletSinkTupleId() bool { return p.TabletSinkTupleId != nil } +func (p *TDataStreamSink) IsSetTabletSinkExprs() bool { + return p.TabletSinkExprs != nil +} + func (p *TDataStreamSink) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -3595,6 +3613,14 @@ func (p *TDataStreamSink) Read(iprot thrift.TProtocol) (err error) { } else if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } + case 13: + if fieldTypeId == thrift.LIST { + if err = p.ReadField13(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError @@ -3791,6 +3817,29 @@ func (p *TDataStreamSink) ReadField12(iprot thrift.TProtocol) error { p.TabletSinkTupleId = _field return nil } +func (p *TDataStreamSink) ReadField13(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*exprs.TExpr, 0, size) + values := make([]exprs.TExpr, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.TabletSinkExprs = _field + return nil +} func (p *TDataStreamSink) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 @@ -3846,6 +3895,10 @@ func (p *TDataStreamSink) Write(oprot thrift.TProtocol) (err error) { fieldId = 12 goto WriteFieldError } + if err = p.writeField13(oprot); err != nil { + fieldId = 13 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -4112,6 +4165,33 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) } +func (p *TDataStreamSink) writeField13(oprot thrift.TProtocol) (err error) { + if p.IsSetTabletSinkExprs() { + if err = oprot.WriteFieldBegin("tablet_sink_exprs", thrift.LIST, 13); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.TabletSinkExprs)); err != nil { + return err + } + for _, v := range p.TabletSinkExprs { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) +} + func (p *TDataStreamSink) String() string { if p == nil { return "" @@ -4162,6 +4242,9 @@ func (p *TDataStreamSink) DeepEqual(ano *TDataStreamSink) bool { if !p.Field12DeepEqual(ano.TabletSinkTupleId) { return false } + if !p.Field13DeepEqual(ano.TabletSinkExprs) { + return false + } return true } @@ -4287,6 +4370,19 @@ func (p *TDataStreamSink) Field12DeepEqual(src *types.TTupleId) bool { } return true } +func (p *TDataStreamSink) Field13DeepEqual(src []*exprs.TExpr) bool { + + if len(p.TabletSinkExprs) != len(src) { + return false + } + for i, v := range p.TabletSinkExprs { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} type TMultiCastDataStreamSink struct { Sinks []*TDataStreamSink `thrift:"sinks,1,optional" frugal:"1,optional,list" json:"sinks,omitempty"` diff --git a/pkg/rpc/kitex_gen/datasinks/k-DataSinks.go b/pkg/rpc/kitex_gen/datasinks/k-DataSinks.go index 10205a53..8f4a005f 100644 --- a/pkg/rpc/kitex_gen/datasinks/k-DataSinks.go +++ b/pkg/rpc/kitex_gen/datasinks/k-DataSinks.go @@ -2144,6 +2144,20 @@ func (p *TDataStreamSink) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 13: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField13(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -2389,6 +2403,33 @@ func (p *TDataStreamSink) FastReadField12(buf []byte) (int, error) { return offset, nil } +func (p *TDataStreamSink) FastReadField13(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.TabletSinkExprs = make([]*exprs.TExpr, 0, size) + for i := 0; i < size; i++ { + _elem := exprs.NewTExpr() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.TabletSinkExprs = append(p.TabletSinkExprs, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + // for compatibility func (p *TDataStreamSink) FastWrite(buf []byte) int { return 0 @@ -2410,6 +2451,7 @@ func (p *TDataStreamSink) FastWriteNocopy(buf []byte, binaryWriter bthrift.Binar offset += p.fastWriteField8(buf[offset:], binaryWriter) offset += p.fastWriteField9(buf[offset:], binaryWriter) offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField13(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -2432,6 +2474,7 @@ func (p *TDataStreamSink) BLength() int { l += p.field10Length() l += p.field11Length() l += p.field12Length() + l += p.field13Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -2583,6 +2626,24 @@ func (p *TDataStreamSink) fastWriteField12(buf []byte, binaryWriter bthrift.Bina return offset } +func (p *TDataStreamSink) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTabletSinkExprs() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablet_sink_exprs", thrift.LIST, 13) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.TabletSinkExprs { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TDataStreamSink) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("dest_node_id", thrift.I32, 1) @@ -2716,6 +2777,20 @@ func (p *TDataStreamSink) field12Length() int { return l } +func (p *TDataStreamSink) field13Length() int { + l := 0 + if p.IsSetTabletSinkExprs() { + l += bthrift.Binary.FieldBeginLength("tablet_sink_exprs", thrift.LIST, 13) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.TabletSinkExprs)) + for _, v := range p.TabletSinkExprs { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TMultiCastDataStreamSink) FastRead(buf []byte) (int, error) { var err error var offset int diff --git a/pkg/rpc/kitex_gen/descriptors/Descriptors.go b/pkg/rpc/kitex_gen/descriptors/Descriptors.go index 7d7d499b..d9935917 100644 --- a/pkg/rpc/kitex_gen/descriptors/Descriptors.go +++ b/pkg/rpc/kitex_gen/descriptors/Descriptors.go @@ -13822,6 +13822,8 @@ type TMCTable struct { PublicAccess *string `thrift:"public_access,6,optional" frugal:"6,optional,string" json:"public_access,omitempty"` OdpsUrl *string `thrift:"odps_url,7,optional" frugal:"7,optional,string" json:"odps_url,omitempty"` TunnelUrl *string `thrift:"tunnel_url,8,optional" frugal:"8,optional,string" json:"tunnel_url,omitempty"` + Endpoint *string `thrift:"endpoint,9,optional" frugal:"9,optional,string" json:"endpoint,omitempty"` + Quota *string `thrift:"quota,10,optional" frugal:"10,optional,string" json:"quota,omitempty"` } func NewTMCTable() *TMCTable { @@ -13902,6 +13904,24 @@ func (p *TMCTable) GetTunnelUrl() (v string) { } return *p.TunnelUrl } + +var TMCTable_Endpoint_DEFAULT string + +func (p *TMCTable) GetEndpoint() (v string) { + if !p.IsSetEndpoint() { + return TMCTable_Endpoint_DEFAULT + } + return *p.Endpoint +} + +var TMCTable_Quota_DEFAULT string + +func (p *TMCTable) GetQuota() (v string) { + if !p.IsSetQuota() { + return TMCTable_Quota_DEFAULT + } + return *p.Quota +} func (p *TMCTable) SetRegion(val *string) { p.Region = val } @@ -13926,16 +13946,24 @@ func (p *TMCTable) SetOdpsUrl(val *string) { func (p *TMCTable) SetTunnelUrl(val *string) { p.TunnelUrl = val } +func (p *TMCTable) SetEndpoint(val *string) { + p.Endpoint = val +} +func (p *TMCTable) SetQuota(val *string) { + p.Quota = val +} var fieldIDToName_TMCTable = map[int16]string{ - 1: "region", - 2: "project", - 3: "table", - 4: "access_key", - 5: "secret_key", - 6: "public_access", - 7: "odps_url", - 8: "tunnel_url", + 1: "region", + 2: "project", + 3: "table", + 4: "access_key", + 5: "secret_key", + 6: "public_access", + 7: "odps_url", + 8: "tunnel_url", + 9: "endpoint", + 10: "quota", } func (p *TMCTable) IsSetRegion() bool { @@ -13970,6 +13998,14 @@ func (p *TMCTable) IsSetTunnelUrl() bool { return p.TunnelUrl != nil } +func (p *TMCTable) IsSetEndpoint() bool { + return p.Endpoint != nil +} + +func (p *TMCTable) IsSetQuota() bool { + return p.Quota != nil +} + func (p *TMCTable) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -14053,6 +14089,22 @@ func (p *TMCTable) Read(iprot thrift.TProtocol) (err error) { } else if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } + case 9: + if fieldTypeId == thrift.STRING { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.STRING { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError @@ -14170,6 +14222,28 @@ func (p *TMCTable) ReadField8(iprot thrift.TProtocol) error { p.TunnelUrl = _field return nil } +func (p *TMCTable) ReadField9(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Endpoint = _field + return nil +} +func (p *TMCTable) ReadField10(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Quota = _field + return nil +} func (p *TMCTable) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 @@ -14209,6 +14283,14 @@ func (p *TMCTable) Write(oprot thrift.TProtocol) (err error) { fieldId = 8 goto WriteFieldError } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -14379,6 +14461,44 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) } +func (p *TMCTable) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetEndpoint() { + if err = oprot.WriteFieldBegin("endpoint", thrift.STRING, 9); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Endpoint); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} + +func (p *TMCTable) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetQuota() { + if err = oprot.WriteFieldBegin("quota", thrift.STRING, 10); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Quota); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} + func (p *TMCTable) String() string { if p == nil { return "" @@ -14417,6 +14537,12 @@ func (p *TMCTable) DeepEqual(ano *TMCTable) bool { if !p.Field8DeepEqual(ano.TunnelUrl) { return false } + if !p.Field9DeepEqual(ano.Endpoint) { + return false + } + if !p.Field10DeepEqual(ano.Quota) { + return false + } return true } @@ -14516,6 +14642,30 @@ func (p *TMCTable) Field8DeepEqual(src *string) bool { } return true } +func (p *TMCTable) Field9DeepEqual(src *string) bool { + + if p.Endpoint == src { + return true + } else if p.Endpoint == nil || src == nil { + return false + } + if strings.Compare(*p.Endpoint, *src) != 0 { + return false + } + return true +} +func (p *TMCTable) Field10DeepEqual(src *string) bool { + + if p.Quota == src { + return true + } else if p.Quota == nil || src == nil { + return false + } + if strings.Compare(*p.Quota, *src) != 0 { + return false + } + return true +} type TTrinoConnectorTable struct { DbName *string `thrift:"db_name,1,optional" frugal:"1,optional,string" json:"db_name,omitempty"` diff --git a/pkg/rpc/kitex_gen/descriptors/k-Descriptors.go b/pkg/rpc/kitex_gen/descriptors/k-Descriptors.go index ac847f11..40acec01 100644 --- a/pkg/rpc/kitex_gen/descriptors/k-Descriptors.go +++ b/pkg/rpc/kitex_gen/descriptors/k-Descriptors.go @@ -10436,6 +10436,34 @@ func (p *TMCTable) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 9: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 10: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -10575,6 +10603,32 @@ func (p *TMCTable) FastReadField8(buf []byte) (int, error) { return offset, nil } +func (p *TMCTable) FastReadField9(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Endpoint = &v + + } + return offset, nil +} + +func (p *TMCTable) FastReadField10(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.Quota = &v + + } + return offset, nil +} + // for compatibility func (p *TMCTable) FastWrite(buf []byte) int { return 0 @@ -10592,6 +10646,8 @@ func (p *TMCTable) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter offset += p.fastWriteField6(buf[offset:], binaryWriter) offset += p.fastWriteField7(buf[offset:], binaryWriter) offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -10610,6 +10666,8 @@ func (p *TMCTable) BLength() int { l += p.field6Length() l += p.field7Length() l += p.field8Length() + l += p.field9Length() + l += p.field10Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -10704,6 +10762,28 @@ func (p *TMCTable) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter return offset } +func (p *TMCTable) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetEndpoint() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "endpoint", thrift.STRING, 9) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Endpoint) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMCTable) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetQuota() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "quota", thrift.STRING, 10) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Quota) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TMCTable) field1Length() int { l := 0 if p.IsSetRegion() { @@ -10792,6 +10872,28 @@ func (p *TMCTable) field8Length() int { return l } +func (p *TMCTable) field9Length() int { + l := 0 + if p.IsSetEndpoint() { + l += bthrift.Binary.FieldBeginLength("endpoint", thrift.STRING, 9) + l += bthrift.Binary.StringLengthNocopy(*p.Endpoint) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMCTable) field10Length() int { + l := 0 + if p.IsSetQuota() { + l += bthrift.Binary.FieldBeginLength("quota", thrift.STRING, 10) + l += bthrift.Binary.StringLengthNocopy(*p.Quota) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TTrinoConnectorTable) FastRead(buf []byte) (int, error) { var err error var offset int diff --git a/pkg/rpc/kitex_gen/frontendservice/FrontendService.go b/pkg/rpc/kitex_gen/frontendservice/FrontendService.go index d2e4d05e..fd1e872c 100644 --- a/pkg/rpc/kitex_gen/frontendservice/FrontendService.go +++ b/pkg/rpc/kitex_gen/frontendservice/FrontendService.go @@ -69968,6 +69968,7 @@ type TGetMetaDBMeta struct { Tables []*TGetMetaTableMeta `thrift:"tables,3,optional" frugal:"3,optional,list" json:"tables,omitempty"` DroppedPartitions []int64 `thrift:"dropped_partitions,4,optional" frugal:"4,optional,list" json:"dropped_partitions,omitempty"` DroppedTables []int64 `thrift:"dropped_tables,5,optional" frugal:"5,optional,list" json:"dropped_tables,omitempty"` + DroppedIndexes []int64 `thrift:"dropped_indexes,6,optional" frugal:"6,optional,list" json:"dropped_indexes,omitempty"` } func NewTGetMetaDBMeta() *TGetMetaDBMeta { @@ -70021,6 +70022,15 @@ func (p *TGetMetaDBMeta) GetDroppedTables() (v []int64) { } return p.DroppedTables } + +var TGetMetaDBMeta_DroppedIndexes_DEFAULT []int64 + +func (p *TGetMetaDBMeta) GetDroppedIndexes() (v []int64) { + if !p.IsSetDroppedIndexes() { + return TGetMetaDBMeta_DroppedIndexes_DEFAULT + } + return p.DroppedIndexes +} func (p *TGetMetaDBMeta) SetId(val *int64) { p.Id = val } @@ -70036,6 +70046,9 @@ func (p *TGetMetaDBMeta) SetDroppedPartitions(val []int64) { func (p *TGetMetaDBMeta) SetDroppedTables(val []int64) { p.DroppedTables = val } +func (p *TGetMetaDBMeta) SetDroppedIndexes(val []int64) { + p.DroppedIndexes = val +} var fieldIDToName_TGetMetaDBMeta = map[int16]string{ 1: "id", @@ -70043,6 +70056,7 @@ var fieldIDToName_TGetMetaDBMeta = map[int16]string{ 3: "tables", 4: "dropped_partitions", 5: "dropped_tables", + 6: "dropped_indexes", } func (p *TGetMetaDBMeta) IsSetId() bool { @@ -70065,6 +70079,10 @@ func (p *TGetMetaDBMeta) IsSetDroppedTables() bool { return p.DroppedTables != nil } +func (p *TGetMetaDBMeta) IsSetDroppedIndexes() bool { + return p.DroppedIndexes != nil +} + func (p *TGetMetaDBMeta) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -70124,6 +70142,14 @@ func (p *TGetMetaDBMeta) Read(iprot thrift.TProtocol) (err error) { } else if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } + case 6: + if fieldTypeId == thrift.LIST { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError @@ -70244,6 +70270,29 @@ func (p *TGetMetaDBMeta) ReadField5(iprot thrift.TProtocol) error { p.DroppedTables = _field return nil } +func (p *TGetMetaDBMeta) ReadField6(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]int64, 0, size) + for i := 0; i < size; i++ { + + var _elem int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.DroppedIndexes = _field + return nil +} func (p *TGetMetaDBMeta) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 @@ -70271,6 +70320,10 @@ func (p *TGetMetaDBMeta) Write(oprot thrift.TProtocol) (err error) { fieldId = 5 goto WriteFieldError } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -70408,6 +70461,33 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) } +func (p *TGetMetaDBMeta) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetDroppedIndexes() { + if err = oprot.WriteFieldBegin("dropped_indexes", thrift.LIST, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.I64, len(p.DroppedIndexes)); err != nil { + return err + } + for _, v := range p.DroppedIndexes { + if err := oprot.WriteI64(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + func (p *TGetMetaDBMeta) String() string { if p == nil { return "" @@ -70437,6 +70517,9 @@ func (p *TGetMetaDBMeta) DeepEqual(ano *TGetMetaDBMeta) bool { if !p.Field5DeepEqual(ano.DroppedTables) { return false } + if !p.Field6DeepEqual(ano.DroppedIndexes) { + return false + } return true } @@ -70503,6 +70586,19 @@ func (p *TGetMetaDBMeta) Field5DeepEqual(src []int64) bool { } return true } +func (p *TGetMetaDBMeta) Field6DeepEqual(src []int64) bool { + + if len(p.DroppedIndexes) != len(src) { + return false + } + for i, v := range p.DroppedIndexes { + _src := src[i] + if v != _src { + return false + } + } + return true +} type TGetMetaResult_ struct { Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"` diff --git a/pkg/rpc/kitex_gen/frontendservice/k-FrontendService.go b/pkg/rpc/kitex_gen/frontendservice/k-FrontendService.go index fc640213..61b54585 100644 --- a/pkg/rpc/kitex_gen/frontendservice/k-FrontendService.go +++ b/pkg/rpc/kitex_gen/frontendservice/k-FrontendService.go @@ -51395,6 +51395,20 @@ func (p *TGetMetaDBMeta) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 6: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -51543,6 +51557,36 @@ func (p *TGetMetaDBMeta) FastReadField5(buf []byte) (int, error) { return offset, nil } +func (p *TGetMetaDBMeta) FastReadField6(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.DroppedIndexes = make([]int64, 0, size) + for i := 0; i < size; i++ { + var _elem int64 + if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.DroppedIndexes = append(p.DroppedIndexes, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + // for compatibility func (p *TGetMetaDBMeta) FastWrite(buf []byte) int { return 0 @@ -51557,6 +51601,7 @@ func (p *TGetMetaDBMeta) FastWriteNocopy(buf []byte, binaryWriter bthrift.Binary offset += p.fastWriteField3(buf[offset:], binaryWriter) offset += p.fastWriteField4(buf[offset:], binaryWriter) offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -51572,6 +51617,7 @@ func (p *TGetMetaDBMeta) BLength() int { l += p.field3Length() l += p.field4Length() l += p.field5Length() + l += p.field6Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -51656,6 +51702,25 @@ func (p *TGetMetaDBMeta) fastWriteField5(buf []byte, binaryWriter bthrift.Binary return offset } +func (p *TGetMetaDBMeta) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDroppedIndexes() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "dropped_indexes", thrift.LIST, 6) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.I64, 0) + var length int + for _, v := range p.DroppedIndexes { + length++ + offset += bthrift.Binary.WriteI64(buf[offset:], v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I64, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TGetMetaDBMeta) field1Length() int { l := 0 if p.IsSetId() { @@ -51718,6 +51783,19 @@ func (p *TGetMetaDBMeta) field5Length() int { return l } +func (p *TGetMetaDBMeta) field6Length() int { + l := 0 + if p.IsSetDroppedIndexes() { + l += bthrift.Binary.FieldBeginLength("dropped_indexes", thrift.LIST, 6) + l += bthrift.Binary.ListBeginLength(thrift.I64, len(p.DroppedIndexes)) + var tmpV int64 + l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.DroppedIndexes) + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TGetMetaResult_) FastRead(buf []byte) (int, error) { var err error var offset int diff --git a/pkg/rpc/kitex_gen/heartbeatservice/HeartbeatService.go b/pkg/rpc/kitex_gen/heartbeatservice/HeartbeatService.go index 4ba4c0d4..a1aa7548 100644 --- a/pkg/rpc/kitex_gen/heartbeatservice/HeartbeatService.go +++ b/pkg/rpc/kitex_gen/heartbeatservice/HeartbeatService.go @@ -262,15 +262,17 @@ func (p *TFrontendInfo) Field2DeepEqual(src *int64) bool { } type TMasterInfo struct { - NetworkAddress *types.TNetworkAddress `thrift:"network_address,1,required" frugal:"1,required,types.TNetworkAddress" json:"network_address"` - ClusterId types.TClusterId `thrift:"cluster_id,2,required" frugal:"2,required,i32" json:"cluster_id"` - Epoch types.TEpoch `thrift:"epoch,3,required" frugal:"3,required,i64" json:"epoch"` - Token *string `thrift:"token,4,optional" frugal:"4,optional,string" json:"token,omitempty"` - BackendIp *string `thrift:"backend_ip,5,optional" frugal:"5,optional,string" json:"backend_ip,omitempty"` - HttpPort *types.TPort `thrift:"http_port,6,optional" frugal:"6,optional,i32" json:"http_port,omitempty"` - HeartbeatFlags *int64 `thrift:"heartbeat_flags,7,optional" frugal:"7,optional,i64" json:"heartbeat_flags,omitempty"` - BackendId *int64 `thrift:"backend_id,8,optional" frugal:"8,optional,i64" json:"backend_id,omitempty"` - FrontendInfos []*TFrontendInfo `thrift:"frontend_infos,9,optional" frugal:"9,optional,list" json:"frontend_infos,omitempty"` + NetworkAddress *types.TNetworkAddress `thrift:"network_address,1,required" frugal:"1,required,types.TNetworkAddress" json:"network_address"` + ClusterId types.TClusterId `thrift:"cluster_id,2,required" frugal:"2,required,i32" json:"cluster_id"` + Epoch types.TEpoch `thrift:"epoch,3,required" frugal:"3,required,i64" json:"epoch"` + Token *string `thrift:"token,4,optional" frugal:"4,optional,string" json:"token,omitempty"` + BackendIp *string `thrift:"backend_ip,5,optional" frugal:"5,optional,string" json:"backend_ip,omitempty"` + HttpPort *types.TPort `thrift:"http_port,6,optional" frugal:"6,optional,i32" json:"http_port,omitempty"` + HeartbeatFlags *int64 `thrift:"heartbeat_flags,7,optional" frugal:"7,optional,i64" json:"heartbeat_flags,omitempty"` + BackendId *int64 `thrift:"backend_id,8,optional" frugal:"8,optional,i64" json:"backend_id,omitempty"` + FrontendInfos []*TFrontendInfo `thrift:"frontend_infos,9,optional" frugal:"9,optional,list" json:"frontend_infos,omitempty"` + MetaServiceEndpoint *string `thrift:"meta_service_endpoint,10,optional" frugal:"10,optional,string" json:"meta_service_endpoint,omitempty"` + CloudUniqueId *string `thrift:"cloud_unique_id,11,optional" frugal:"11,optional,string" json:"cloud_unique_id,omitempty"` } func NewTMasterInfo() *TMasterInfo { @@ -350,6 +352,24 @@ func (p *TMasterInfo) GetFrontendInfos() (v []*TFrontendInfo) { } return p.FrontendInfos } + +var TMasterInfo_MetaServiceEndpoint_DEFAULT string + +func (p *TMasterInfo) GetMetaServiceEndpoint() (v string) { + if !p.IsSetMetaServiceEndpoint() { + return TMasterInfo_MetaServiceEndpoint_DEFAULT + } + return *p.MetaServiceEndpoint +} + +var TMasterInfo_CloudUniqueId_DEFAULT string + +func (p *TMasterInfo) GetCloudUniqueId() (v string) { + if !p.IsSetCloudUniqueId() { + return TMasterInfo_CloudUniqueId_DEFAULT + } + return *p.CloudUniqueId +} func (p *TMasterInfo) SetNetworkAddress(val *types.TNetworkAddress) { p.NetworkAddress = val } @@ -377,17 +397,25 @@ func (p *TMasterInfo) SetBackendId(val *int64) { func (p *TMasterInfo) SetFrontendInfos(val []*TFrontendInfo) { p.FrontendInfos = val } +func (p *TMasterInfo) SetMetaServiceEndpoint(val *string) { + p.MetaServiceEndpoint = val +} +func (p *TMasterInfo) SetCloudUniqueId(val *string) { + p.CloudUniqueId = val +} var fieldIDToName_TMasterInfo = map[int16]string{ - 1: "network_address", - 2: "cluster_id", - 3: "epoch", - 4: "token", - 5: "backend_ip", - 6: "http_port", - 7: "heartbeat_flags", - 8: "backend_id", - 9: "frontend_infos", + 1: "network_address", + 2: "cluster_id", + 3: "epoch", + 4: "token", + 5: "backend_ip", + 6: "http_port", + 7: "heartbeat_flags", + 8: "backend_id", + 9: "frontend_infos", + 10: "meta_service_endpoint", + 11: "cloud_unique_id", } func (p *TMasterInfo) IsSetNetworkAddress() bool { @@ -418,6 +446,14 @@ func (p *TMasterInfo) IsSetFrontendInfos() bool { return p.FrontendInfos != nil } +func (p *TMasterInfo) IsSetMetaServiceEndpoint() bool { + return p.MetaServiceEndpoint != nil +} + +func (p *TMasterInfo) IsSetCloudUniqueId() bool { + return p.CloudUniqueId != nil +} + func (p *TMasterInfo) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -515,6 +551,22 @@ func (p *TMasterInfo) Read(iprot thrift.TProtocol) (err error) { } else if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } + case 10: + if fieldTypeId == thrift.STRING { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 11: + if fieldTypeId == thrift.STRING { + if err = p.ReadField11(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError @@ -668,6 +720,28 @@ func (p *TMasterInfo) ReadField9(iprot thrift.TProtocol) error { p.FrontendInfos = _field return nil } +func (p *TMasterInfo) ReadField10(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.MetaServiceEndpoint = _field + return nil +} +func (p *TMasterInfo) ReadField11(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.CloudUniqueId = _field + return nil +} func (p *TMasterInfo) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 @@ -711,6 +785,14 @@ func (p *TMasterInfo) Write(oprot thrift.TProtocol) (err error) { fieldId = 9 goto WriteFieldError } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } + if err = p.writeField11(oprot); err != nil { + fieldId = 11 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -902,6 +984,44 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) } +func (p *TMasterInfo) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetMetaServiceEndpoint() { + if err = oprot.WriteFieldBegin("meta_service_endpoint", thrift.STRING, 10); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.MetaServiceEndpoint); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} + +func (p *TMasterInfo) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetCloudUniqueId() { + if err = oprot.WriteFieldBegin("cloud_unique_id", thrift.STRING, 11); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.CloudUniqueId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) +} + func (p *TMasterInfo) String() string { if p == nil { return "" @@ -943,6 +1063,12 @@ func (p *TMasterInfo) DeepEqual(ano *TMasterInfo) bool { if !p.Field9DeepEqual(ano.FrontendInfos) { return false } + if !p.Field10DeepEqual(ano.MetaServiceEndpoint) { + return false + } + if !p.Field11DeepEqual(ano.CloudUniqueId) { + return false + } return true } @@ -1040,6 +1166,30 @@ func (p *TMasterInfo) Field9DeepEqual(src []*TFrontendInfo) bool { } return true } +func (p *TMasterInfo) Field10DeepEqual(src *string) bool { + + if p.MetaServiceEndpoint == src { + return true + } else if p.MetaServiceEndpoint == nil || src == nil { + return false + } + if strings.Compare(*p.MetaServiceEndpoint, *src) != 0 { + return false + } + return true +} +func (p *TMasterInfo) Field11DeepEqual(src *string) bool { + + if p.CloudUniqueId == src { + return true + } else if p.CloudUniqueId == nil || src == nil { + return false + } + if strings.Compare(*p.CloudUniqueId, *src) != 0 { + return false + } + return true +} type TBackendInfo struct { BePort types.TPort `thrift:"be_port,1,required" frugal:"1,required,i32" json:"be_port"` diff --git a/pkg/rpc/kitex_gen/heartbeatservice/k-HeartbeatService.go b/pkg/rpc/kitex_gen/heartbeatservice/k-HeartbeatService.go index 349de0d8..224ad205 100644 --- a/pkg/rpc/kitex_gen/heartbeatservice/k-HeartbeatService.go +++ b/pkg/rpc/kitex_gen/heartbeatservice/k-HeartbeatService.go @@ -366,6 +366,34 @@ func (p *TMasterInfo) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 10: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 11: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField11(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -550,6 +578,32 @@ func (p *TMasterInfo) FastReadField9(buf []byte) (int, error) { return offset, nil } +func (p *TMasterInfo) FastReadField10(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.MetaServiceEndpoint = &v + + } + return offset, nil +} + +func (p *TMasterInfo) FastReadField11(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.CloudUniqueId = &v + + } + return offset, nil +} + // for compatibility func (p *TMasterInfo) FastWrite(buf []byte) int { return 0 @@ -568,6 +622,8 @@ func (p *TMasterInfo) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWri offset += p.fastWriteField4(buf[offset:], binaryWriter) offset += p.fastWriteField5(buf[offset:], binaryWriter) offset += p.fastWriteField9(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField11(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -587,6 +643,8 @@ func (p *TMasterInfo) BLength() int { l += p.field7Length() l += p.field8Length() l += p.field9Length() + l += p.field10Length() + l += p.field11Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -692,6 +750,28 @@ func (p *TMasterInfo) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWri return offset } +func (p *TMasterInfo) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetMetaServiceEndpoint() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "meta_service_endpoint", thrift.STRING, 10) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.MetaServiceEndpoint) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMasterInfo) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCloudUniqueId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cloud_unique_id", thrift.STRING, 11) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.CloudUniqueId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TMasterInfo) field1Length() int { l := 0 l += bthrift.Binary.FieldBeginLength("network_address", thrift.STRUCT, 1) @@ -787,6 +867,28 @@ func (p *TMasterInfo) field9Length() int { return l } +func (p *TMasterInfo) field10Length() int { + l := 0 + if p.IsSetMetaServiceEndpoint() { + l += bthrift.Binary.FieldBeginLength("meta_service_endpoint", thrift.STRING, 10) + l += bthrift.Binary.StringLengthNocopy(*p.MetaServiceEndpoint) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMasterInfo) field11Length() int { + l := 0 + if p.IsSetCloudUniqueId() { + l += bthrift.Binary.FieldBeginLength("cloud_unique_id", thrift.STRING, 11) + l += bthrift.Binary.StringLengthNocopy(*p.CloudUniqueId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TBackendInfo) FastRead(buf []byte) (int, error) { var err error var offset int diff --git a/pkg/rpc/kitex_gen/palointernalservice/PaloInternalService.go b/pkg/rpc/kitex_gen/palointernalservice/PaloInternalService.go index 22e1596c..04e9b1e6 100644 --- a/pkg/rpc/kitex_gen/palointernalservice/PaloInternalService.go +++ b/pkg/rpc/kitex_gen/palointernalservice/PaloInternalService.go @@ -1753,6 +1753,9 @@ type TQueryOptions struct { RpcVerboseProfileMaxInstanceCount int32 `thrift:"rpc_verbose_profile_max_instance_count,129,optional" frugal:"129,optional,i32" json:"rpc_verbose_profile_max_instance_count,omitempty"` EnableAdaptivePipelineTaskSerialReadOnLimit bool `thrift:"enable_adaptive_pipeline_task_serial_read_on_limit,130,optional" frugal:"130,optional,bool" json:"enable_adaptive_pipeline_task_serial_read_on_limit,omitempty"` AdaptivePipelineTaskSerialReadOnLimit int32 `thrift:"adaptive_pipeline_task_serial_read_on_limit,131,optional" frugal:"131,optional,i32" json:"adaptive_pipeline_task_serial_read_on_limit,omitempty"` + ParallelPrepareThreshold int32 `thrift:"parallel_prepare_threshold,132,optional" frugal:"132,optional,i32" json:"parallel_prepare_threshold,omitempty"` + PartitionTopnMaxPartitions int32 `thrift:"partition_topn_max_partitions,133,optional" frugal:"133,optional,i32" json:"partition_topn_max_partitions,omitempty"` + PartitionTopnPrePartitionRows int32 `thrift:"partition_topn_pre_partition_rows,134,optional" frugal:"134,optional,i32" json:"partition_topn_pre_partition_rows,omitempty"` DisableFileCache bool `thrift:"disable_file_cache,1000,optional" frugal:"1000,optional,bool" json:"disable_file_cache,omitempty"` } @@ -1870,6 +1873,9 @@ func NewTQueryOptions() *TQueryOptions { RpcVerboseProfileMaxInstanceCount: 0, EnableAdaptivePipelineTaskSerialReadOnLimit: true, AdaptivePipelineTaskSerialReadOnLimit: 10000, + ParallelPrepareThreshold: 0, + PartitionTopnMaxPartitions: 1024, + PartitionTopnPrePartitionRows: 1000, DisableFileCache: false, } } @@ -1986,6 +1992,9 @@ func (p *TQueryOptions) InitDefault() { p.RpcVerboseProfileMaxInstanceCount = 0 p.EnableAdaptivePipelineTaskSerialReadOnLimit = true p.AdaptivePipelineTaskSerialReadOnLimit = 10000 + p.ParallelPrepareThreshold = 0 + p.PartitionTopnMaxPartitions = 1024 + p.PartitionTopnPrePartitionRows = 1000 p.DisableFileCache = false } @@ -3087,6 +3096,33 @@ func (p *TQueryOptions) GetAdaptivePipelineTaskSerialReadOnLimit() (v int32) { return p.AdaptivePipelineTaskSerialReadOnLimit } +var TQueryOptions_ParallelPrepareThreshold_DEFAULT int32 = 0 + +func (p *TQueryOptions) GetParallelPrepareThreshold() (v int32) { + if !p.IsSetParallelPrepareThreshold() { + return TQueryOptions_ParallelPrepareThreshold_DEFAULT + } + return p.ParallelPrepareThreshold +} + +var TQueryOptions_PartitionTopnMaxPartitions_DEFAULT int32 = 1024 + +func (p *TQueryOptions) GetPartitionTopnMaxPartitions() (v int32) { + if !p.IsSetPartitionTopnMaxPartitions() { + return TQueryOptions_PartitionTopnMaxPartitions_DEFAULT + } + return p.PartitionTopnMaxPartitions +} + +var TQueryOptions_PartitionTopnPrePartitionRows_DEFAULT int32 = 1000 + +func (p *TQueryOptions) GetPartitionTopnPrePartitionRows() (v int32) { + if !p.IsSetPartitionTopnPrePartitionRows() { + return TQueryOptions_PartitionTopnPrePartitionRows_DEFAULT + } + return p.PartitionTopnPrePartitionRows +} + var TQueryOptions_DisableFileCache_DEFAULT bool = false func (p *TQueryOptions) GetDisableFileCache() (v bool) { @@ -3461,6 +3497,15 @@ func (p *TQueryOptions) SetEnableAdaptivePipelineTaskSerialReadOnLimit(val bool) func (p *TQueryOptions) SetAdaptivePipelineTaskSerialReadOnLimit(val int32) { p.AdaptivePipelineTaskSerialReadOnLimit = val } +func (p *TQueryOptions) SetParallelPrepareThreshold(val int32) { + p.ParallelPrepareThreshold = val +} +func (p *TQueryOptions) SetPartitionTopnMaxPartitions(val int32) { + p.PartitionTopnMaxPartitions = val +} +func (p *TQueryOptions) SetPartitionTopnPrePartitionRows(val int32) { + p.PartitionTopnPrePartitionRows = val +} func (p *TQueryOptions) SetDisableFileCache(val bool) { p.DisableFileCache = val } @@ -3588,6 +3633,9 @@ var fieldIDToName_TQueryOptions = map[int16]string{ 129: "rpc_verbose_profile_max_instance_count", 130: "enable_adaptive_pipeline_task_serial_read_on_limit", 131: "adaptive_pipeline_task_serial_read_on_limit", + 132: "parallel_prepare_threshold", + 133: "partition_topn_max_partitions", + 134: "partition_topn_pre_partition_rows", 1000: "disable_file_cache", } @@ -4079,6 +4127,18 @@ func (p *TQueryOptions) IsSetAdaptivePipelineTaskSerialReadOnLimit() bool { return p.AdaptivePipelineTaskSerialReadOnLimit != TQueryOptions_AdaptivePipelineTaskSerialReadOnLimit_DEFAULT } +func (p *TQueryOptions) IsSetParallelPrepareThreshold() bool { + return p.ParallelPrepareThreshold != TQueryOptions_ParallelPrepareThreshold_DEFAULT +} + +func (p *TQueryOptions) IsSetPartitionTopnMaxPartitions() bool { + return p.PartitionTopnMaxPartitions != TQueryOptions_PartitionTopnMaxPartitions_DEFAULT +} + +func (p *TQueryOptions) IsSetPartitionTopnPrePartitionRows() bool { + return p.PartitionTopnPrePartitionRows != TQueryOptions_PartitionTopnPrePartitionRows_DEFAULT +} + func (p *TQueryOptions) IsSetDisableFileCache() bool { return p.DisableFileCache != TQueryOptions_DisableFileCache_DEFAULT } @@ -5078,6 +5138,30 @@ func (p *TQueryOptions) Read(iprot thrift.TProtocol) (err error) { } else if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } + case 132: + if fieldTypeId == thrift.I32 { + if err = p.ReadField132(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 133: + if fieldTypeId == thrift.I32 { + if err = p.ReadField133(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 134: + if fieldTypeId == thrift.I32 { + if err = p.ReadField134(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } case 1000: if fieldTypeId == thrift.BOOL { if err = p.ReadField1000(iprot); err != nil { @@ -6454,6 +6538,39 @@ func (p *TQueryOptions) ReadField131(iprot thrift.TProtocol) error { p.AdaptivePipelineTaskSerialReadOnLimit = _field return nil } +func (p *TQueryOptions) ReadField132(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.ParallelPrepareThreshold = _field + return nil +} +func (p *TQueryOptions) ReadField133(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.PartitionTopnMaxPartitions = _field + return nil +} +func (p *TQueryOptions) ReadField134(iprot thrift.TProtocol) error { + + var _field int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = v + } + p.PartitionTopnPrePartitionRows = _field + return nil +} func (p *TQueryOptions) ReadField1000(iprot thrift.TProtocol) error { var _field bool @@ -6960,6 +7077,18 @@ func (p *TQueryOptions) Write(oprot thrift.TProtocol) (err error) { fieldId = 131 goto WriteFieldError } + if err = p.writeField132(oprot); err != nil { + fieldId = 132 + goto WriteFieldError + } + if err = p.writeField133(oprot); err != nil { + fieldId = 133 + goto WriteFieldError + } + if err = p.writeField134(oprot); err != nil { + fieldId = 134 + goto WriteFieldError + } if err = p.writeField1000(oprot); err != nil { fieldId = 1000 goto WriteFieldError @@ -9300,6 +9429,63 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 131 end error: ", p), err) } +func (p *TQueryOptions) writeField132(oprot thrift.TProtocol) (err error) { + if p.IsSetParallelPrepareThreshold() { + if err = oprot.WriteFieldBegin("parallel_prepare_threshold", thrift.I32, 132); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.ParallelPrepareThreshold); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 132 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 132 end error: ", p), err) +} + +func (p *TQueryOptions) writeField133(oprot thrift.TProtocol) (err error) { + if p.IsSetPartitionTopnMaxPartitions() { + if err = oprot.WriteFieldBegin("partition_topn_max_partitions", thrift.I32, 133); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.PartitionTopnMaxPartitions); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 133 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 133 end error: ", p), err) +} + +func (p *TQueryOptions) writeField134(oprot thrift.TProtocol) (err error) { + if p.IsSetPartitionTopnPrePartitionRows() { + if err = oprot.WriteFieldBegin("partition_topn_pre_partition_rows", thrift.I32, 134); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(p.PartitionTopnPrePartitionRows); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 134 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 134 end error: ", p), err) +} + func (p *TQueryOptions) writeField1000(oprot thrift.TProtocol) (err error) { if p.IsSetDisableFileCache() { if err = oprot.WriteFieldBegin("disable_file_cache", thrift.BOOL, 1000); err != nil { @@ -9699,6 +9885,15 @@ func (p *TQueryOptions) DeepEqual(ano *TQueryOptions) bool { if !p.Field131DeepEqual(ano.AdaptivePipelineTaskSerialReadOnLimit) { return false } + if !p.Field132DeepEqual(ano.ParallelPrepareThreshold) { + return false + } + if !p.Field133DeepEqual(ano.PartitionTopnMaxPartitions) { + return false + } + if !p.Field134DeepEqual(ano.PartitionTopnPrePartitionRows) { + return false + } if !p.Field1000DeepEqual(ano.DisableFileCache) { return false } @@ -10609,6 +10804,27 @@ func (p *TQueryOptions) Field131DeepEqual(src int32) bool { } return true } +func (p *TQueryOptions) Field132DeepEqual(src int32) bool { + + if p.ParallelPrepareThreshold != src { + return false + } + return true +} +func (p *TQueryOptions) Field133DeepEqual(src int32) bool { + + if p.PartitionTopnMaxPartitions != src { + return false + } + return true +} +func (p *TQueryOptions) Field134DeepEqual(src int32) bool { + + if p.PartitionTopnPrePartitionRows != src { + return false + } + return true +} func (p *TQueryOptions) Field1000DeepEqual(src bool) bool { if p.DisableFileCache != src { @@ -25652,6 +25868,7 @@ type TPipelineFragmentParams struct { WalId *int64 `thrift:"wal_id,41,optional" frugal:"41,optional,i64" json:"wal_id,omitempty"` ContentLength *int64 `thrift:"content_length,42,optional" frugal:"42,optional,i64" json:"content_length,omitempty"` CurrentConnectFe *types.TNetworkAddress `thrift:"current_connect_fe,43,optional" frugal:"43,optional,types.TNetworkAddress" json:"current_connect_fe,omitempty"` + TopnFilterSourceNodeIds []int32 `thrift:"topn_filter_source_node_ids,44,optional" frugal:"44,optional,list" json:"topn_filter_source_node_ids,omitempty"` IsMowTable *bool `thrift:"is_mow_table,1000,optional" frugal:"1000,optional,bool" json:"is_mow_table,omitempty"` } @@ -26030,6 +26247,15 @@ func (p *TPipelineFragmentParams) GetCurrentConnectFe() (v *types.TNetworkAddres return p.CurrentConnectFe } +var TPipelineFragmentParams_TopnFilterSourceNodeIds_DEFAULT []int32 + +func (p *TPipelineFragmentParams) GetTopnFilterSourceNodeIds() (v []int32) { + if !p.IsSetTopnFilterSourceNodeIds() { + return TPipelineFragmentParams_TopnFilterSourceNodeIds_DEFAULT + } + return p.TopnFilterSourceNodeIds +} + var TPipelineFragmentParams_IsMowTable_DEFAULT bool func (p *TPipelineFragmentParams) GetIsMowTable() (v bool) { @@ -26164,6 +26390,9 @@ func (p *TPipelineFragmentParams) SetContentLength(val *int64) { func (p *TPipelineFragmentParams) SetCurrentConnectFe(val *types.TNetworkAddress) { p.CurrentConnectFe = val } +func (p *TPipelineFragmentParams) SetTopnFilterSourceNodeIds(val []int32) { + p.TopnFilterSourceNodeIds = val +} func (p *TPipelineFragmentParams) SetIsMowTable(val *bool) { p.IsMowTable = val } @@ -26211,6 +26440,7 @@ var fieldIDToName_TPipelineFragmentParams = map[int16]string{ 41: "wal_id", 42: "content_length", 43: "current_connect_fe", + 44: "topn_filter_source_node_ids", 1000: "is_mow_table", } @@ -26366,6 +26596,10 @@ func (p *TPipelineFragmentParams) IsSetCurrentConnectFe() bool { return p.CurrentConnectFe != nil } +func (p *TPipelineFragmentParams) IsSetTopnFilterSourceNodeIds() bool { + return p.TopnFilterSourceNodeIds != nil +} + func (p *TPipelineFragmentParams) IsSetIsMowTable() bool { return p.IsMowTable != nil } @@ -26731,6 +26965,14 @@ func (p *TPipelineFragmentParams) Read(iprot thrift.TProtocol) (err error) { } else if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } + case 44: + if fieldTypeId == thrift.LIST { + if err = p.ReadField44(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } case 1000: if fieldTypeId == thrift.BOOL { if err = p.ReadField1000(iprot); err != nil { @@ -27351,6 +27593,29 @@ func (p *TPipelineFragmentParams) ReadField43(iprot thrift.TProtocol) error { p.CurrentConnectFe = _field return nil } +func (p *TPipelineFragmentParams) ReadField44(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]int32, 0, size) + for i := 0; i < size; i++ { + + var _elem int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.TopnFilterSourceNodeIds = _field + return nil +} func (p *TPipelineFragmentParams) ReadField1000(iprot thrift.TProtocol) error { var _field *bool @@ -27537,6 +27802,10 @@ func (p *TPipelineFragmentParams) Write(oprot thrift.TProtocol) (err error) { fieldId = 43 goto WriteFieldError } + if err = p.writeField44(oprot); err != nil { + fieldId = 44 + goto WriteFieldError + } if err = p.writeField1000(oprot); err != nil { fieldId = 1000 goto WriteFieldError @@ -28434,6 +28703,33 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 43 end error: ", p), err) } +func (p *TPipelineFragmentParams) writeField44(oprot thrift.TProtocol) (err error) { + if p.IsSetTopnFilterSourceNodeIds() { + if err = oprot.WriteFieldBegin("topn_filter_source_node_ids", thrift.LIST, 44); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.I32, len(p.TopnFilterSourceNodeIds)); err != nil { + return err + } + for _, v := range p.TopnFilterSourceNodeIds { + if err := oprot.WriteI32(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 44 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 44 end error: ", p), err) +} + func (p *TPipelineFragmentParams) writeField1000(oprot thrift.TProtocol) (err error) { if p.IsSetIsMowTable() { if err = oprot.WriteFieldBegin("is_mow_table", thrift.BOOL, 1000); err != nil { @@ -28593,6 +28889,9 @@ func (p *TPipelineFragmentParams) DeepEqual(ano *TPipelineFragmentParams) bool { if !p.Field43DeepEqual(ano.CurrentConnectFe) { return false } + if !p.Field44DeepEqual(ano.TopnFilterSourceNodeIds) { + return false + } if !p.Field1000DeepEqual(ano.IsMowTable) { return false } @@ -29032,6 +29331,19 @@ func (p *TPipelineFragmentParams) Field43DeepEqual(src *types.TNetworkAddress) b } return true } +func (p *TPipelineFragmentParams) Field44DeepEqual(src []int32) bool { + + if len(p.TopnFilterSourceNodeIds) != len(src) { + return false + } + for i, v := range p.TopnFilterSourceNodeIds { + _src := src[i] + if v != _src { + return false + } + } + return true +} func (p *TPipelineFragmentParams) Field1000DeepEqual(src *bool) bool { if p.IsMowTable == src { @@ -29046,14 +29358,30 @@ func (p *TPipelineFragmentParams) Field1000DeepEqual(src *bool) bool { } type TPipelineFragmentParamsList struct { - ParamsList []*TPipelineFragmentParams `thrift:"params_list,1,optional" frugal:"1,optional,list" json:"params_list,omitempty"` + ParamsList []*TPipelineFragmentParams `thrift:"params_list,1,optional" frugal:"1,optional,list" json:"params_list,omitempty"` + DescTbl *descriptors.TDescriptorTable `thrift:"desc_tbl,2,optional" frugal:"2,optional,descriptors.TDescriptorTable" json:"desc_tbl,omitempty"` + FileScanParams map[types.TPlanNodeId]*plannodes.TFileScanRangeParams `thrift:"file_scan_params,3,optional" frugal:"3,optional,map" json:"file_scan_params,omitempty"` + Coord *types.TNetworkAddress `thrift:"coord,4,optional" frugal:"4,optional,types.TNetworkAddress" json:"coord,omitempty"` + QueryGlobals *TQueryGlobals `thrift:"query_globals,5,optional" frugal:"5,optional,TQueryGlobals" json:"query_globals,omitempty"` + ResourceInfo *types.TResourceInfo `thrift:"resource_info,6,optional" frugal:"6,optional,types.TResourceInfo" json:"resource_info,omitempty"` + FragmentNumOnHost *int32 `thrift:"fragment_num_on_host,7,optional" frugal:"7,optional,i32" json:"fragment_num_on_host,omitempty"` + QueryOptions *TQueryOptions `thrift:"query_options,8,optional" frugal:"8,optional,TQueryOptions" json:"query_options,omitempty"` + IsNereids bool `thrift:"is_nereids,9,optional" frugal:"9,optional,bool" json:"is_nereids,omitempty"` + WorkloadGroups []*TPipelineWorkloadGroup `thrift:"workload_groups,10,optional" frugal:"10,optional,list" json:"workload_groups,omitempty"` + QueryId *types.TUniqueId `thrift:"query_id,11,optional" frugal:"11,optional,types.TUniqueId" json:"query_id,omitempty"` + TopnFilterSourceNodeIds []int32 `thrift:"topn_filter_source_node_ids,12,optional" frugal:"12,optional,list" json:"topn_filter_source_node_ids,omitempty"` + RuntimeFilterMergeAddr *types.TNetworkAddress `thrift:"runtime_filter_merge_addr,13,optional" frugal:"13,optional,types.TNetworkAddress" json:"runtime_filter_merge_addr,omitempty"` } func NewTPipelineFragmentParamsList() *TPipelineFragmentParamsList { - return &TPipelineFragmentParamsList{} + return &TPipelineFragmentParamsList{ + + IsNereids: true, + } } func (p *TPipelineFragmentParamsList) InitDefault() { + p.IsNereids = true } var TPipelineFragmentParamsList_ParamsList_DEFAULT []*TPipelineFragmentParams @@ -29064,38 +29392,242 @@ func (p *TPipelineFragmentParamsList) GetParamsList() (v []*TPipelineFragmentPar } return p.ParamsList } -func (p *TPipelineFragmentParamsList) SetParamsList(val []*TPipelineFragmentParams) { - p.ParamsList = val -} -var fieldIDToName_TPipelineFragmentParamsList = map[int16]string{ - 1: "params_list", +var TPipelineFragmentParamsList_DescTbl_DEFAULT *descriptors.TDescriptorTable + +func (p *TPipelineFragmentParamsList) GetDescTbl() (v *descriptors.TDescriptorTable) { + if !p.IsSetDescTbl() { + return TPipelineFragmentParamsList_DescTbl_DEFAULT + } + return p.DescTbl } -func (p *TPipelineFragmentParamsList) IsSetParamsList() bool { - return p.ParamsList != nil +var TPipelineFragmentParamsList_FileScanParams_DEFAULT map[types.TPlanNodeId]*plannodes.TFileScanRangeParams + +func (p *TPipelineFragmentParamsList) GetFileScanParams() (v map[types.TPlanNodeId]*plannodes.TFileScanRangeParams) { + if !p.IsSetFileScanParams() { + return TPipelineFragmentParamsList_FileScanParams_DEFAULT + } + return p.FileScanParams } -func (p *TPipelineFragmentParamsList) Read(iprot thrift.TProtocol) (err error) { +var TPipelineFragmentParamsList_Coord_DEFAULT *types.TNetworkAddress - var fieldTypeId thrift.TType - var fieldId int16 +func (p *TPipelineFragmentParamsList) GetCoord() (v *types.TNetworkAddress) { + if !p.IsSetCoord() { + return TPipelineFragmentParamsList_Coord_DEFAULT + } + return p.Coord +} - if _, err = iprot.ReadStructBegin(); err != nil { - goto ReadStructBeginError +var TPipelineFragmentParamsList_QueryGlobals_DEFAULT *TQueryGlobals + +func (p *TPipelineFragmentParamsList) GetQueryGlobals() (v *TQueryGlobals) { + if !p.IsSetQueryGlobals() { + return TPipelineFragmentParamsList_QueryGlobals_DEFAULT } + return p.QueryGlobals +} - for { - _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() - if err != nil { - goto ReadFieldBeginError - } - if fieldTypeId == thrift.STOP { - break - } +var TPipelineFragmentParamsList_ResourceInfo_DEFAULT *types.TResourceInfo - switch fieldId { - case 1: +func (p *TPipelineFragmentParamsList) GetResourceInfo() (v *types.TResourceInfo) { + if !p.IsSetResourceInfo() { + return TPipelineFragmentParamsList_ResourceInfo_DEFAULT + } + return p.ResourceInfo +} + +var TPipelineFragmentParamsList_FragmentNumOnHost_DEFAULT int32 + +func (p *TPipelineFragmentParamsList) GetFragmentNumOnHost() (v int32) { + if !p.IsSetFragmentNumOnHost() { + return TPipelineFragmentParamsList_FragmentNumOnHost_DEFAULT + } + return *p.FragmentNumOnHost +} + +var TPipelineFragmentParamsList_QueryOptions_DEFAULT *TQueryOptions + +func (p *TPipelineFragmentParamsList) GetQueryOptions() (v *TQueryOptions) { + if !p.IsSetQueryOptions() { + return TPipelineFragmentParamsList_QueryOptions_DEFAULT + } + return p.QueryOptions +} + +var TPipelineFragmentParamsList_IsNereids_DEFAULT bool = true + +func (p *TPipelineFragmentParamsList) GetIsNereids() (v bool) { + if !p.IsSetIsNereids() { + return TPipelineFragmentParamsList_IsNereids_DEFAULT + } + return p.IsNereids +} + +var TPipelineFragmentParamsList_WorkloadGroups_DEFAULT []*TPipelineWorkloadGroup + +func (p *TPipelineFragmentParamsList) GetWorkloadGroups() (v []*TPipelineWorkloadGroup) { + if !p.IsSetWorkloadGroups() { + return TPipelineFragmentParamsList_WorkloadGroups_DEFAULT + } + return p.WorkloadGroups +} + +var TPipelineFragmentParamsList_QueryId_DEFAULT *types.TUniqueId + +func (p *TPipelineFragmentParamsList) GetQueryId() (v *types.TUniqueId) { + if !p.IsSetQueryId() { + return TPipelineFragmentParamsList_QueryId_DEFAULT + } + return p.QueryId +} + +var TPipelineFragmentParamsList_TopnFilterSourceNodeIds_DEFAULT []int32 + +func (p *TPipelineFragmentParamsList) GetTopnFilterSourceNodeIds() (v []int32) { + if !p.IsSetTopnFilterSourceNodeIds() { + return TPipelineFragmentParamsList_TopnFilterSourceNodeIds_DEFAULT + } + return p.TopnFilterSourceNodeIds +} + +var TPipelineFragmentParamsList_RuntimeFilterMergeAddr_DEFAULT *types.TNetworkAddress + +func (p *TPipelineFragmentParamsList) GetRuntimeFilterMergeAddr() (v *types.TNetworkAddress) { + if !p.IsSetRuntimeFilterMergeAddr() { + return TPipelineFragmentParamsList_RuntimeFilterMergeAddr_DEFAULT + } + return p.RuntimeFilterMergeAddr +} +func (p *TPipelineFragmentParamsList) SetParamsList(val []*TPipelineFragmentParams) { + p.ParamsList = val +} +func (p *TPipelineFragmentParamsList) SetDescTbl(val *descriptors.TDescriptorTable) { + p.DescTbl = val +} +func (p *TPipelineFragmentParamsList) SetFileScanParams(val map[types.TPlanNodeId]*plannodes.TFileScanRangeParams) { + p.FileScanParams = val +} +func (p *TPipelineFragmentParamsList) SetCoord(val *types.TNetworkAddress) { + p.Coord = val +} +func (p *TPipelineFragmentParamsList) SetQueryGlobals(val *TQueryGlobals) { + p.QueryGlobals = val +} +func (p *TPipelineFragmentParamsList) SetResourceInfo(val *types.TResourceInfo) { + p.ResourceInfo = val +} +func (p *TPipelineFragmentParamsList) SetFragmentNumOnHost(val *int32) { + p.FragmentNumOnHost = val +} +func (p *TPipelineFragmentParamsList) SetQueryOptions(val *TQueryOptions) { + p.QueryOptions = val +} +func (p *TPipelineFragmentParamsList) SetIsNereids(val bool) { + p.IsNereids = val +} +func (p *TPipelineFragmentParamsList) SetWorkloadGroups(val []*TPipelineWorkloadGroup) { + p.WorkloadGroups = val +} +func (p *TPipelineFragmentParamsList) SetQueryId(val *types.TUniqueId) { + p.QueryId = val +} +func (p *TPipelineFragmentParamsList) SetTopnFilterSourceNodeIds(val []int32) { + p.TopnFilterSourceNodeIds = val +} +func (p *TPipelineFragmentParamsList) SetRuntimeFilterMergeAddr(val *types.TNetworkAddress) { + p.RuntimeFilterMergeAddr = val +} + +var fieldIDToName_TPipelineFragmentParamsList = map[int16]string{ + 1: "params_list", + 2: "desc_tbl", + 3: "file_scan_params", + 4: "coord", + 5: "query_globals", + 6: "resource_info", + 7: "fragment_num_on_host", + 8: "query_options", + 9: "is_nereids", + 10: "workload_groups", + 11: "query_id", + 12: "topn_filter_source_node_ids", + 13: "runtime_filter_merge_addr", +} + +func (p *TPipelineFragmentParamsList) IsSetParamsList() bool { + return p.ParamsList != nil +} + +func (p *TPipelineFragmentParamsList) IsSetDescTbl() bool { + return p.DescTbl != nil +} + +func (p *TPipelineFragmentParamsList) IsSetFileScanParams() bool { + return p.FileScanParams != nil +} + +func (p *TPipelineFragmentParamsList) IsSetCoord() bool { + return p.Coord != nil +} + +func (p *TPipelineFragmentParamsList) IsSetQueryGlobals() bool { + return p.QueryGlobals != nil +} + +func (p *TPipelineFragmentParamsList) IsSetResourceInfo() bool { + return p.ResourceInfo != nil +} + +func (p *TPipelineFragmentParamsList) IsSetFragmentNumOnHost() bool { + return p.FragmentNumOnHost != nil +} + +func (p *TPipelineFragmentParamsList) IsSetQueryOptions() bool { + return p.QueryOptions != nil +} + +func (p *TPipelineFragmentParamsList) IsSetIsNereids() bool { + return p.IsNereids != TPipelineFragmentParamsList_IsNereids_DEFAULT +} + +func (p *TPipelineFragmentParamsList) IsSetWorkloadGroups() bool { + return p.WorkloadGroups != nil +} + +func (p *TPipelineFragmentParamsList) IsSetQueryId() bool { + return p.QueryId != nil +} + +func (p *TPipelineFragmentParamsList) IsSetTopnFilterSourceNodeIds() bool { + return p.TopnFilterSourceNodeIds != nil +} + +func (p *TPipelineFragmentParamsList) IsSetRuntimeFilterMergeAddr() bool { + return p.RuntimeFilterMergeAddr != nil +} + +func (p *TPipelineFragmentParamsList) Read(iprot thrift.TProtocol) (err error) { + + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: if fieldTypeId == thrift.LIST { if err = p.ReadField1(iprot); err != nil { goto ReadFieldError @@ -29103,6 +29635,102 @@ func (p *TPipelineFragmentParamsList) Read(iprot thrift.TProtocol) (err error) { } else if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } + case 2: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.MAP { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.I32 { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.LIST { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 11: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField11(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 12: + if fieldTypeId == thrift.LIST { + if err = p.ReadField12(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 13: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField13(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError @@ -29115,85 +29743,522 @@ func (p *TPipelineFragmentParamsList) Read(iprot thrift.TProtocol) (err error) { if err = iprot.ReadStructEnd(); err != nil { goto ReadStructEndError } - + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPipelineFragmentParamsList[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TPipelineFragmentParamsList) ReadField1(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*TPipelineFragmentParams, 0, size) + values := make([]TPipelineFragmentParams, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.ParamsList = _field + return nil +} +func (p *TPipelineFragmentParamsList) ReadField2(iprot thrift.TProtocol) error { + _field := descriptors.NewTDescriptorTable() + if err := _field.Read(iprot); err != nil { + return err + } + p.DescTbl = _field + return nil +} +func (p *TPipelineFragmentParamsList) ReadField3(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return err + } + _field := make(map[types.TPlanNodeId]*plannodes.TFileScanRangeParams, size) + values := make([]plannodes.TFileScanRangeParams, size) + for i := 0; i < size; i++ { + var _key types.TPlanNodeId + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _key = v + } + + _val := &values[i] + _val.InitDefault() + if err := _val.Read(iprot); err != nil { + return err + } + + _field[_key] = _val + } + if err := iprot.ReadMapEnd(); err != nil { + return err + } + p.FileScanParams = _field + return nil +} +func (p *TPipelineFragmentParamsList) ReadField4(iprot thrift.TProtocol) error { + _field := types.NewTNetworkAddress() + if err := _field.Read(iprot); err != nil { + return err + } + p.Coord = _field + return nil +} +func (p *TPipelineFragmentParamsList) ReadField5(iprot thrift.TProtocol) error { + _field := NewTQueryGlobals() + if err := _field.Read(iprot); err != nil { + return err + } + p.QueryGlobals = _field + return nil +} +func (p *TPipelineFragmentParamsList) ReadField6(iprot thrift.TProtocol) error { + _field := types.NewTResourceInfo() + if err := _field.Read(iprot); err != nil { + return err + } + p.ResourceInfo = _field + return nil +} +func (p *TPipelineFragmentParamsList) ReadField7(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.FragmentNumOnHost = _field + return nil +} +func (p *TPipelineFragmentParamsList) ReadField8(iprot thrift.TProtocol) error { + _field := NewTQueryOptions() + if err := _field.Read(iprot); err != nil { + return err + } + p.QueryOptions = _field + return nil +} +func (p *TPipelineFragmentParamsList) ReadField9(iprot thrift.TProtocol) error { + + var _field bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = v + } + p.IsNereids = _field + return nil +} +func (p *TPipelineFragmentParamsList) ReadField10(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*TPipelineWorkloadGroup, 0, size) + values := make([]TPipelineWorkloadGroup, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.WorkloadGroups = _field + return nil +} +func (p *TPipelineFragmentParamsList) ReadField11(iprot thrift.TProtocol) error { + _field := types.NewTUniqueId() + if err := _field.Read(iprot); err != nil { + return err + } + p.QueryId = _field + return nil +} +func (p *TPipelineFragmentParamsList) ReadField12(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]int32, 0, size) + for i := 0; i < size; i++ { + + var _elem int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.TopnFilterSourceNodeIds = _field + return nil +} +func (p *TPipelineFragmentParamsList) ReadField13(iprot thrift.TProtocol) error { + _field := types.NewTNetworkAddress() + if err := _field.Read(iprot); err != nil { + return err + } + p.RuntimeFilterMergeAddr = _field + return nil +} + +func (p *TPipelineFragmentParamsList) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TPipelineFragmentParamsList"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } + if err = p.writeField11(oprot); err != nil { + fieldId = 11 + goto WriteFieldError + } + if err = p.writeField12(oprot); err != nil { + fieldId = 12 + goto WriteFieldError + } + if err = p.writeField13(oprot); err != nil { + fieldId = 13 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TPipelineFragmentParamsList) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetParamsList() { + if err = oprot.WriteFieldBegin("params_list", thrift.LIST, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.ParamsList)); err != nil { + return err + } + for _, v := range p.ParamsList { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TPipelineFragmentParamsList) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetDescTbl() { + if err = oprot.WriteFieldBegin("desc_tbl", thrift.STRUCT, 2); err != nil { + goto WriteFieldBeginError + } + if err := p.DescTbl.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TPipelineFragmentParamsList) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetFileScanParams() { + if err = oprot.WriteFieldBegin("file_scan_params", thrift.MAP, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteMapBegin(thrift.I32, thrift.STRUCT, len(p.FileScanParams)); err != nil { + return err + } + for k, v := range p.FileScanParams { + if err := oprot.WriteI32(k); err != nil { + return err + } + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteMapEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *TPipelineFragmentParamsList) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetCoord() { + if err = oprot.WriteFieldBegin("coord", thrift.STRUCT, 4); err != nil { + goto WriteFieldBeginError + } + if err := p.Coord.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *TPipelineFragmentParamsList) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetQueryGlobals() { + if err = oprot.WriteFieldBegin("query_globals", thrift.STRUCT, 5); err != nil { + goto WriteFieldBeginError + } + if err := p.QueryGlobals.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *TPipelineFragmentParamsList) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetResourceInfo() { + if err = oprot.WriteFieldBegin("resource_info", thrift.STRUCT, 6); err != nil { + goto WriteFieldBeginError + } + if err := p.ResourceInfo.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TPipelineFragmentParamsList) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetFragmentNumOnHost() { + if err = oprot.WriteFieldBegin("fragment_num_on_host", thrift.I32, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.FragmentNumOnHost); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} + +func (p *TPipelineFragmentParamsList) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetQueryOptions() { + if err = oprot.WriteFieldBegin("query_options", thrift.STRUCT, 8); err != nil { + goto WriteFieldBeginError + } + if err := p.QueryOptions.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } return nil -ReadStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPipelineFragmentParamsList[fieldId]), err) -SkipFieldError: - return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) - -ReadFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) } -func (p *TPipelineFragmentParamsList) ReadField1(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return err +func (p *TPipelineFragmentParamsList) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetIsNereids() { + if err = oprot.WriteFieldBegin("is_nereids", thrift.BOOL, 9); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(p.IsNereids); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - _field := make([]*TPipelineFragmentParams, 0, size) - values := make([]TPipelineFragmentParams, size) - for i := 0; i < size; i++ { - _elem := &values[i] - _elem.InitDefault() + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} - if err := _elem.Read(iprot); err != nil { +func (p *TPipelineFragmentParamsList) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetWorkloadGroups() { + if err = oprot.WriteFieldBegin("workload_groups", thrift.LIST, 10); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.WorkloadGroups)); err != nil { return err } - - _field = append(_field, _elem) - } - if err := iprot.ReadListEnd(); err != nil { - return err + for _, v := range p.WorkloadGroups { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - p.ParamsList = _field return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) } -func (p *TPipelineFragmentParamsList) Write(oprot thrift.TProtocol) (err error) { - var fieldId int16 - if err = oprot.WriteStructBegin("TPipelineFragmentParamsList"); err != nil { - goto WriteStructBeginError - } - if p != nil { - if err = p.writeField1(oprot); err != nil { - fieldId = 1 - goto WriteFieldError +func (p *TPipelineFragmentParamsList) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetQueryId() { + if err = oprot.WriteFieldBegin("query_id", thrift.STRUCT, 11); err != nil { + goto WriteFieldBeginError + } + if err := p.QueryId.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - } - if err = oprot.WriteFieldStop(); err != nil { - goto WriteFieldStopError - } - if err = oprot.WriteStructEnd(); err != nil { - goto WriteStructEndError } return nil -WriteStructBeginError: - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) -WriteFieldError: - return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) -WriteFieldStopError: - return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) -WriteStructEndError: - return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err) } -func (p *TPipelineFragmentParamsList) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetParamsList() { - if err = oprot.WriteFieldBegin("params_list", thrift.LIST, 1); err != nil { +func (p *TPipelineFragmentParamsList) writeField12(oprot thrift.TProtocol) (err error) { + if p.IsSetTopnFilterSourceNodeIds() { + if err = oprot.WriteFieldBegin("topn_filter_source_node_ids", thrift.LIST, 12); err != nil { goto WriteFieldBeginError } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.ParamsList)); err != nil { + if err := oprot.WriteListBegin(thrift.I32, len(p.TopnFilterSourceNodeIds)); err != nil { return err } - for _, v := range p.ParamsList { - if err := v.Write(oprot); err != nil { + for _, v := range p.TopnFilterSourceNodeIds { + if err := oprot.WriteI32(v); err != nil { return err } } @@ -29206,9 +30271,28 @@ func (p *TPipelineFragmentParamsList) writeField1(oprot thrift.TProtocol) (err e } return nil WriteFieldBeginError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err) WriteFieldEndError: - return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) + return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err) +} + +func (p *TPipelineFragmentParamsList) writeField13(oprot thrift.TProtocol) (err error) { + if p.IsSetRuntimeFilterMergeAddr() { + if err = oprot.WriteFieldBegin("runtime_filter_merge_addr", thrift.STRUCT, 13); err != nil { + goto WriteFieldBeginError + } + if err := p.RuntimeFilterMergeAddr.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err) } func (p *TPipelineFragmentParamsList) String() string { @@ -29228,6 +30312,42 @@ func (p *TPipelineFragmentParamsList) DeepEqual(ano *TPipelineFragmentParamsList if !p.Field1DeepEqual(ano.ParamsList) { return false } + if !p.Field2DeepEqual(ano.DescTbl) { + return false + } + if !p.Field3DeepEqual(ano.FileScanParams) { + return false + } + if !p.Field4DeepEqual(ano.Coord) { + return false + } + if !p.Field5DeepEqual(ano.QueryGlobals) { + return false + } + if !p.Field6DeepEqual(ano.ResourceInfo) { + return false + } + if !p.Field7DeepEqual(ano.FragmentNumOnHost) { + return false + } + if !p.Field8DeepEqual(ano.QueryOptions) { + return false + } + if !p.Field9DeepEqual(ano.IsNereids) { + return false + } + if !p.Field10DeepEqual(ano.WorkloadGroups) { + return false + } + if !p.Field11DeepEqual(ano.QueryId) { + return false + } + if !p.Field12DeepEqual(ano.TopnFilterSourceNodeIds) { + return false + } + if !p.Field13DeepEqual(ano.RuntimeFilterMergeAddr) { + return false + } return true } @@ -29244,3 +30364,110 @@ func (p *TPipelineFragmentParamsList) Field1DeepEqual(src []*TPipelineFragmentPa } return true } +func (p *TPipelineFragmentParamsList) Field2DeepEqual(src *descriptors.TDescriptorTable) bool { + + if !p.DescTbl.DeepEqual(src) { + return false + } + return true +} +func (p *TPipelineFragmentParamsList) Field3DeepEqual(src map[types.TPlanNodeId]*plannodes.TFileScanRangeParams) bool { + + if len(p.FileScanParams) != len(src) { + return false + } + for k, v := range p.FileScanParams { + _src := src[k] + if !v.DeepEqual(_src) { + return false + } + } + return true +} +func (p *TPipelineFragmentParamsList) Field4DeepEqual(src *types.TNetworkAddress) bool { + + if !p.Coord.DeepEqual(src) { + return false + } + return true +} +func (p *TPipelineFragmentParamsList) Field5DeepEqual(src *TQueryGlobals) bool { + + if !p.QueryGlobals.DeepEqual(src) { + return false + } + return true +} +func (p *TPipelineFragmentParamsList) Field6DeepEqual(src *types.TResourceInfo) bool { + + if !p.ResourceInfo.DeepEqual(src) { + return false + } + return true +} +func (p *TPipelineFragmentParamsList) Field7DeepEqual(src *int32) bool { + + if p.FragmentNumOnHost == src { + return true + } else if p.FragmentNumOnHost == nil || src == nil { + return false + } + if *p.FragmentNumOnHost != *src { + return false + } + return true +} +func (p *TPipelineFragmentParamsList) Field8DeepEqual(src *TQueryOptions) bool { + + if !p.QueryOptions.DeepEqual(src) { + return false + } + return true +} +func (p *TPipelineFragmentParamsList) Field9DeepEqual(src bool) bool { + + if p.IsNereids != src { + return false + } + return true +} +func (p *TPipelineFragmentParamsList) Field10DeepEqual(src []*TPipelineWorkloadGroup) bool { + + if len(p.WorkloadGroups) != len(src) { + return false + } + for i, v := range p.WorkloadGroups { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} +func (p *TPipelineFragmentParamsList) Field11DeepEqual(src *types.TUniqueId) bool { + + if !p.QueryId.DeepEqual(src) { + return false + } + return true +} +func (p *TPipelineFragmentParamsList) Field12DeepEqual(src []int32) bool { + + if len(p.TopnFilterSourceNodeIds) != len(src) { + return false + } + for i, v := range p.TopnFilterSourceNodeIds { + _src := src[i] + if v != _src { + return false + } + } + return true +} +func (p *TPipelineFragmentParamsList) Field13DeepEqual(src *types.TNetworkAddress) bool { + + if !p.RuntimeFilterMergeAddr.DeepEqual(src) { + return false + } + return true +} diff --git a/pkg/rpc/kitex_gen/palointernalservice/k-PaloInternalService.go b/pkg/rpc/kitex_gen/palointernalservice/k-PaloInternalService.go index 2c3ef1e5..71de4907 100644 --- a/pkg/rpc/kitex_gen/palointernalservice/k-PaloInternalService.go +++ b/pkg/rpc/kitex_gen/palointernalservice/k-PaloInternalService.go @@ -2845,6 +2845,48 @@ func (p *TQueryOptions) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 132: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField132(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 133: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField133(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 134: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField134(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } case 1000: if fieldTypeId == thrift.BOOL { l, err = p.FastReadField1000(buf[offset:]) @@ -4591,6 +4633,48 @@ func (p *TQueryOptions) FastReadField131(buf []byte) (int, error) { return offset, nil } +func (p *TQueryOptions) FastReadField132(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.ParallelPrepareThreshold = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField133(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.PartitionTopnMaxPartitions = v + + } + return offset, nil +} + +func (p *TQueryOptions) FastReadField134(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.PartitionTopnPrePartitionRows = v + + } + return offset, nil +} + func (p *TQueryOptions) FastReadField1000(buf []byte) (int, error) { offset := 0 @@ -4731,6 +4815,9 @@ func (p *TQueryOptions) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryW offset += p.fastWriteField129(buf[offset:], binaryWriter) offset += p.fastWriteField130(buf[offset:], binaryWriter) offset += p.fastWriteField131(buf[offset:], binaryWriter) + offset += p.fastWriteField132(buf[offset:], binaryWriter) + offset += p.fastWriteField133(buf[offset:], binaryWriter) + offset += p.fastWriteField134(buf[offset:], binaryWriter) offset += p.fastWriteField1000(buf[offset:], binaryWriter) offset += p.fastWriteField18(buf[offset:], binaryWriter) offset += p.fastWriteField42(buf[offset:], binaryWriter) @@ -4869,6 +4956,9 @@ func (p *TQueryOptions) BLength() int { l += p.field129Length() l += p.field130Length() l += p.field131Length() + l += p.field132Length() + l += p.field133Length() + l += p.field134Length() l += p.field1000Length() } l += bthrift.Binary.FieldStopLength() @@ -6217,6 +6307,39 @@ func (p *TQueryOptions) fastWriteField131(buf []byte, binaryWriter bthrift.Binar return offset } +func (p *TQueryOptions) fastWriteField132(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetParallelPrepareThreshold() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "parallel_prepare_threshold", thrift.I32, 132) + offset += bthrift.Binary.WriteI32(buf[offset:], p.ParallelPrepareThreshold) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryOptions) fastWriteField133(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPartitionTopnMaxPartitions() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_topn_max_partitions", thrift.I32, 133) + offset += bthrift.Binary.WriteI32(buf[offset:], p.PartitionTopnMaxPartitions) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TQueryOptions) fastWriteField134(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetPartitionTopnPrePartitionRows() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_topn_pre_partition_rows", thrift.I32, 134) + offset += bthrift.Binary.WriteI32(buf[offset:], p.PartitionTopnPrePartitionRows) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TQueryOptions) fastWriteField1000(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetDisableFileCache() { @@ -7569,6 +7692,39 @@ func (p *TQueryOptions) field131Length() int { return l } +func (p *TQueryOptions) field132Length() int { + l := 0 + if p.IsSetParallelPrepareThreshold() { + l += bthrift.Binary.FieldBeginLength("parallel_prepare_threshold", thrift.I32, 132) + l += bthrift.Binary.I32Length(p.ParallelPrepareThreshold) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryOptions) field133Length() int { + l := 0 + if p.IsSetPartitionTopnMaxPartitions() { + l += bthrift.Binary.FieldBeginLength("partition_topn_max_partitions", thrift.I32, 133) + l += bthrift.Binary.I32Length(p.PartitionTopnMaxPartitions) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TQueryOptions) field134Length() int { + l := 0 + if p.IsSetPartitionTopnPrePartitionRows() { + l += bthrift.Binary.FieldBeginLength("partition_topn_pre_partition_rows", thrift.I32, 134) + l += bthrift.Binary.I32Length(p.PartitionTopnPrePartitionRows) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TQueryOptions) field1000Length() int { l := 0 if p.IsSetDisableFileCache() { @@ -19710,6 +19866,20 @@ func (p *TPipelineFragmentParams) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 44: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField44(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } case 1000: if fieldTypeId == thrift.BOOL { l, err = p.FastReadField1000(buf[offset:]) @@ -20513,6 +20683,36 @@ func (p *TPipelineFragmentParams) FastReadField43(buf []byte) (int, error) { return offset, nil } +func (p *TPipelineFragmentParams) FastReadField44(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.TopnFilterSourceNodeIds = make([]int32, 0, size) + for i := 0; i < size; i++ { + var _elem int32 + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.TopnFilterSourceNodeIds = append(p.TopnFilterSourceNodeIds, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + func (p *TPipelineFragmentParams) FastReadField1000(buf []byte) (int, error) { offset := 0 @@ -20578,6 +20778,7 @@ func (p *TPipelineFragmentParams) FastWriteNocopy(buf []byte, binaryWriter bthri offset += p.fastWriteField36(buf[offset:], binaryWriter) offset += p.fastWriteField39(buf[offset:], binaryWriter) offset += p.fastWriteField43(buf[offset:], binaryWriter) + offset += p.fastWriteField44(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -20630,6 +20831,7 @@ func (p *TPipelineFragmentParams) BLength() int { l += p.field41Length() l += p.field42Length() l += p.field43Length() + l += p.field44Length() l += p.field1000Length() } l += bthrift.Binary.FieldStopLength() @@ -21160,6 +21362,25 @@ func (p *TPipelineFragmentParams) fastWriteField43(buf []byte, binaryWriter bthr return offset } +func (p *TPipelineFragmentParams) fastWriteField44(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTopnFilterSourceNodeIds() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "topn_filter_source_node_ids", thrift.LIST, 44) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.I32, 0) + var length int + for _, v := range p.TopnFilterSourceNodeIds { + length++ + offset += bthrift.Binary.WriteI32(buf[offset:], v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I32, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TPipelineFragmentParams) fastWriteField1000(buf []byte, binaryWriter bthrift.BinaryWriter) int { offset := 0 if p.IsSetIsMowTable() { @@ -21642,6 +21863,19 @@ func (p *TPipelineFragmentParams) field43Length() int { return l } +func (p *TPipelineFragmentParams) field44Length() int { + l := 0 + if p.IsSetTopnFilterSourceNodeIds() { + l += bthrift.Binary.FieldBeginLength("topn_filter_source_node_ids", thrift.LIST, 44) + l += bthrift.Binary.ListBeginLength(thrift.I32, len(p.TopnFilterSourceNodeIds)) + var tmpV int32 + l += bthrift.Binary.I32Length(int32(tmpV)) * len(p.TopnFilterSourceNodeIds) + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *TPipelineFragmentParams) field1000Length() int { l := 0 if p.IsSetIsMowTable() { @@ -21689,65 +21923,444 @@ func (p *TPipelineFragmentParamsList) FastRead(buf []byte) (int, error) { goto SkipFieldError } } - default: - l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) - offset += l - if err != nil { - goto SkipFieldError + case 2: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } } - } - - l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadFieldEndError - } - } - l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) - offset += l - if err != nil { - goto ReadStructEndError - } - - return offset, nil -ReadStructBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) -ReadFieldBeginError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) -ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPipelineFragmentParamsList[fieldId]), err) -SkipFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) -ReadFieldEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) -ReadStructEndError: - return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) -} - -func (p *TPipelineFragmentParamsList) FastReadField1(buf []byte) (int, error) { - offset := 0 - - _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) - offset += l - if err != nil { - return offset, err - } - p.ParamsList = make([]*TPipelineFragmentParams, 0, size) - for i := 0; i < size; i++ { - _elem := NewTPipelineFragmentParams() - if l, err := _elem.FastRead(buf[offset:]); err != nil { - return offset, err - } else { - offset += l - } - - p.ParamsList = append(p.ParamsList, _elem) - } - if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { - return offset, err - } else { + case 3: + if fieldTypeId == thrift.MAP { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 8: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 9: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 10: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 11: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField11(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 12: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField12(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 13: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField13(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + + l, err = bthrift.Binary.ReadFieldEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldEndError + } + } + l, err = bthrift.Binary.ReadStructEnd(buf[offset:]) + offset += l + if err != nil { + goto ReadStructEndError + } + + return offset, nil +ReadStructBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPipelineFragmentParamsList[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +ReadFieldEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TPipelineFragmentParamsList) FastReadField1(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.ParamsList = make([]*TPipelineFragmentParams, 0, size) + for i := 0; i < size; i++ { + _elem := NewTPipelineFragmentParams() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.ParamsList = append(p.ParamsList, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TPipelineFragmentParamsList) FastReadField2(buf []byte) (int, error) { + offset := 0 + + tmp := descriptors.NewTDescriptorTable() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.DescTbl = tmp + return offset, nil +} + +func (p *TPipelineFragmentParamsList) FastReadField3(buf []byte) (int, error) { + offset := 0 + + _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.FileScanParams = make(map[types.TPlanNodeId]*plannodes.TFileScanRangeParams, size) + for i := 0; i < size; i++ { + var _key types.TPlanNodeId + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _key = v + + } + _val := plannodes.NewTFileScanRangeParams() + if l, err := _val.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.FileScanParams[_key] = _val + } + if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TPipelineFragmentParamsList) FastReadField4(buf []byte) (int, error) { + offset := 0 + + tmp := types.NewTNetworkAddress() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Coord = tmp + return offset, nil +} + +func (p *TPipelineFragmentParamsList) FastReadField5(buf []byte) (int, error) { + offset := 0 + + tmp := NewTQueryGlobals() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.QueryGlobals = tmp + return offset, nil +} + +func (p *TPipelineFragmentParamsList) FastReadField6(buf []byte) (int, error) { + offset := 0 + + tmp := types.NewTResourceInfo() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.ResourceInfo = tmp + return offset, nil +} + +func (p *TPipelineFragmentParamsList) FastReadField7(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.FragmentNumOnHost = &v + + } + return offset, nil +} + +func (p *TPipelineFragmentParamsList) FastReadField8(buf []byte) (int, error) { + offset := 0 + + tmp := NewTQueryOptions() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { offset += l } + p.QueryOptions = tmp + return offset, nil +} + +func (p *TPipelineFragmentParamsList) FastReadField9(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + p.IsNereids = v + + } + return offset, nil +} + +func (p *TPipelineFragmentParamsList) FastReadField10(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.WorkloadGroups = make([]*TPipelineWorkloadGroup, 0, size) + for i := 0; i < size; i++ { + _elem := NewTPipelineWorkloadGroup() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + p.WorkloadGroups = append(p.WorkloadGroups, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TPipelineFragmentParamsList) FastReadField11(buf []byte) (int, error) { + offset := 0 + + tmp := types.NewTUniqueId() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.QueryId = tmp + return offset, nil +} + +func (p *TPipelineFragmentParamsList) FastReadField12(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + p.TopnFilterSourceNodeIds = make([]int32, 0, size) + for i := 0; i < size; i++ { + var _elem int32 + if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + + _elem = v + + } + + p.TopnFilterSourceNodeIds = append(p.TopnFilterSourceNodeIds, _elem) + } + if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + return offset, nil +} + +func (p *TPipelineFragmentParamsList) FastReadField13(buf []byte) (int, error) { + offset := 0 + + tmp := types.NewTNetworkAddress() + if l, err := tmp.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.RuntimeFilterMergeAddr = tmp return offset, nil } @@ -21760,7 +22373,19 @@ func (p *TPipelineFragmentParamsList) FastWriteNocopy(buf []byte, binaryWriter b offset := 0 offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TPipelineFragmentParamsList") if p != nil { + offset += p.fastWriteField7(buf[offset:], binaryWriter) + offset += p.fastWriteField9(buf[offset:], binaryWriter) offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) + offset += p.fastWriteField4(buf[offset:], binaryWriter) + offset += p.fastWriteField5(buf[offset:], binaryWriter) + offset += p.fastWriteField6(buf[offset:], binaryWriter) + offset += p.fastWriteField8(buf[offset:], binaryWriter) + offset += p.fastWriteField10(buf[offset:], binaryWriter) + offset += p.fastWriteField11(buf[offset:], binaryWriter) + offset += p.fastWriteField12(buf[offset:], binaryWriter) + offset += p.fastWriteField13(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -21772,6 +22397,18 @@ func (p *TPipelineFragmentParamsList) BLength() int { l += bthrift.Binary.StructBeginLength("TPipelineFragmentParamsList") if p != nil { l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + l += p.field10Length() + l += p.field11Length() + l += p.field12Length() + l += p.field13Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -21796,6 +22433,156 @@ func (p *TPipelineFragmentParamsList) fastWriteField1(buf []byte, binaryWriter b return offset } +func (p *TPipelineFragmentParamsList) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetDescTbl() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "desc_tbl", thrift.STRUCT, 2) + offset += p.DescTbl.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParamsList) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFileScanParams() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "file_scan_params", thrift.MAP, 3) + mapBeginOffset := offset + offset += bthrift.Binary.MapBeginLength(thrift.I32, thrift.STRUCT, 0) + var length int + for k, v := range p.FileScanParams { + length++ + + offset += bthrift.Binary.WriteI32(buf[offset:], k) + + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I32, thrift.STRUCT, length) + offset += bthrift.Binary.WriteMapEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParamsList) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetCoord() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "coord", thrift.STRUCT, 4) + offset += p.Coord.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParamsList) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetQueryGlobals() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "query_globals", thrift.STRUCT, 5) + offset += p.QueryGlobals.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParamsList) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetResourceInfo() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "resource_info", thrift.STRUCT, 6) + offset += p.ResourceInfo.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParamsList) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetFragmentNumOnHost() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "fragment_num_on_host", thrift.I32, 7) + offset += bthrift.Binary.WriteI32(buf[offset:], *p.FragmentNumOnHost) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParamsList) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetQueryOptions() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "query_options", thrift.STRUCT, 8) + offset += p.QueryOptions.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParamsList) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetIsNereids() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "is_nereids", thrift.BOOL, 9) + offset += bthrift.Binary.WriteBool(buf[offset:], p.IsNereids) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParamsList) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetWorkloadGroups() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "workload_groups", thrift.LIST, 10) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0) + var length int + for _, v := range p.WorkloadGroups { + length++ + offset += v.FastWriteNocopy(buf[offset:], binaryWriter) + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParamsList) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetQueryId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "query_id", thrift.STRUCT, 11) + offset += p.QueryId.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParamsList) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTopnFilterSourceNodeIds() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "topn_filter_source_node_ids", thrift.LIST, 12) + listBeginOffset := offset + offset += bthrift.Binary.ListBeginLength(thrift.I32, 0) + var length int + for _, v := range p.TopnFilterSourceNodeIds { + length++ + offset += bthrift.Binary.WriteI32(buf[offset:], v) + + } + bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I32, length) + offset += bthrift.Binary.WriteListEnd(buf[offset:]) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TPipelineFragmentParamsList) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetRuntimeFilterMergeAddr() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "runtime_filter_merge_addr", thrift.STRUCT, 13) + offset += p.RuntimeFilterMergeAddr.FastWriteNocopy(buf[offset:], binaryWriter) + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TPipelineFragmentParamsList) field1Length() int { l := 0 if p.IsSetParamsList() { @@ -21809,3 +22596,139 @@ func (p *TPipelineFragmentParamsList) field1Length() int { } return l } + +func (p *TPipelineFragmentParamsList) field2Length() int { + l := 0 + if p.IsSetDescTbl() { + l += bthrift.Binary.FieldBeginLength("desc_tbl", thrift.STRUCT, 2) + l += p.DescTbl.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPipelineFragmentParamsList) field3Length() int { + l := 0 + if p.IsSetFileScanParams() { + l += bthrift.Binary.FieldBeginLength("file_scan_params", thrift.MAP, 3) + l += bthrift.Binary.MapBeginLength(thrift.I32, thrift.STRUCT, len(p.FileScanParams)) + for k, v := range p.FileScanParams { + + l += bthrift.Binary.I32Length(k) + + l += v.BLength() + } + l += bthrift.Binary.MapEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPipelineFragmentParamsList) field4Length() int { + l := 0 + if p.IsSetCoord() { + l += bthrift.Binary.FieldBeginLength("coord", thrift.STRUCT, 4) + l += p.Coord.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPipelineFragmentParamsList) field5Length() int { + l := 0 + if p.IsSetQueryGlobals() { + l += bthrift.Binary.FieldBeginLength("query_globals", thrift.STRUCT, 5) + l += p.QueryGlobals.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPipelineFragmentParamsList) field6Length() int { + l := 0 + if p.IsSetResourceInfo() { + l += bthrift.Binary.FieldBeginLength("resource_info", thrift.STRUCT, 6) + l += p.ResourceInfo.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPipelineFragmentParamsList) field7Length() int { + l := 0 + if p.IsSetFragmentNumOnHost() { + l += bthrift.Binary.FieldBeginLength("fragment_num_on_host", thrift.I32, 7) + l += bthrift.Binary.I32Length(*p.FragmentNumOnHost) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPipelineFragmentParamsList) field8Length() int { + l := 0 + if p.IsSetQueryOptions() { + l += bthrift.Binary.FieldBeginLength("query_options", thrift.STRUCT, 8) + l += p.QueryOptions.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPipelineFragmentParamsList) field9Length() int { + l := 0 + if p.IsSetIsNereids() { + l += bthrift.Binary.FieldBeginLength("is_nereids", thrift.BOOL, 9) + l += bthrift.Binary.BoolLength(p.IsNereids) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPipelineFragmentParamsList) field10Length() int { + l := 0 + if p.IsSetWorkloadGroups() { + l += bthrift.Binary.FieldBeginLength("workload_groups", thrift.LIST, 10) + l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.WorkloadGroups)) + for _, v := range p.WorkloadGroups { + l += v.BLength() + } + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPipelineFragmentParamsList) field11Length() int { + l := 0 + if p.IsSetQueryId() { + l += bthrift.Binary.FieldBeginLength("query_id", thrift.STRUCT, 11) + l += p.QueryId.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPipelineFragmentParamsList) field12Length() int { + l := 0 + if p.IsSetTopnFilterSourceNodeIds() { + l += bthrift.Binary.FieldBeginLength("topn_filter_source_node_ids", thrift.LIST, 12) + l += bthrift.Binary.ListBeginLength(thrift.I32, len(p.TopnFilterSourceNodeIds)) + var tmpV int32 + l += bthrift.Binary.I32Length(int32(tmpV)) * len(p.TopnFilterSourceNodeIds) + l += bthrift.Binary.ListEndLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TPipelineFragmentParamsList) field13Length() int { + l := 0 + if p.IsSetRuntimeFilterMergeAddr() { + l += bthrift.Binary.FieldBeginLength("runtime_filter_merge_addr", thrift.STRUCT, 13) + l += p.RuntimeFilterMergeAddr.BLength() + l += bthrift.Binary.FieldEndLength() + } + return l +} diff --git a/pkg/rpc/kitex_gen/plannodes/PlanNodes.go b/pkg/rpc/kitex_gen/plannodes/PlanNodes.go index e61913a6..d9924303 100644 --- a/pkg/rpc/kitex_gen/plannodes/PlanNodes.go +++ b/pkg/rpc/kitex_gen/plannodes/PlanNodes.go @@ -12240,7 +12240,9 @@ func (p *TTrinoConnectorFileDesc) Field11DeepEqual(src *string) bool { } type TMaxComputeFileDesc struct { - PartitionSpec *string `thrift:"partition_spec,1,optional" frugal:"1,optional,string" json:"partition_spec,omitempty"` + PartitionSpec *string `thrift:"partition_spec,1,optional" frugal:"1,optional,string" json:"partition_spec,omitempty"` + SessionId *string `thrift:"session_id,2,optional" frugal:"2,optional,string" json:"session_id,omitempty"` + TableBatchReadSession *string `thrift:"table_batch_read_session,3,optional" frugal:"3,optional,string" json:"table_batch_read_session,omitempty"` } func NewTMaxComputeFileDesc() *TMaxComputeFileDesc { @@ -12258,18 +12260,52 @@ func (p *TMaxComputeFileDesc) GetPartitionSpec() (v string) { } return *p.PartitionSpec } + +var TMaxComputeFileDesc_SessionId_DEFAULT string + +func (p *TMaxComputeFileDesc) GetSessionId() (v string) { + if !p.IsSetSessionId() { + return TMaxComputeFileDesc_SessionId_DEFAULT + } + return *p.SessionId +} + +var TMaxComputeFileDesc_TableBatchReadSession_DEFAULT string + +func (p *TMaxComputeFileDesc) GetTableBatchReadSession() (v string) { + if !p.IsSetTableBatchReadSession() { + return TMaxComputeFileDesc_TableBatchReadSession_DEFAULT + } + return *p.TableBatchReadSession +} func (p *TMaxComputeFileDesc) SetPartitionSpec(val *string) { p.PartitionSpec = val } +func (p *TMaxComputeFileDesc) SetSessionId(val *string) { + p.SessionId = val +} +func (p *TMaxComputeFileDesc) SetTableBatchReadSession(val *string) { + p.TableBatchReadSession = val +} var fieldIDToName_TMaxComputeFileDesc = map[int16]string{ 1: "partition_spec", + 2: "session_id", + 3: "table_batch_read_session", } func (p *TMaxComputeFileDesc) IsSetPartitionSpec() bool { return p.PartitionSpec != nil } +func (p *TMaxComputeFileDesc) IsSetSessionId() bool { + return p.SessionId != nil +} + +func (p *TMaxComputeFileDesc) IsSetTableBatchReadSession() bool { + return p.TableBatchReadSession != nil +} + func (p *TMaxComputeFileDesc) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType @@ -12297,6 +12333,22 @@ func (p *TMaxComputeFileDesc) Read(iprot thrift.TProtocol) (err error) { } else if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } default: if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError @@ -12337,6 +12389,28 @@ func (p *TMaxComputeFileDesc) ReadField1(iprot thrift.TProtocol) error { p.PartitionSpec = _field return nil } +func (p *TMaxComputeFileDesc) ReadField2(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.SessionId = _field + return nil +} +func (p *TMaxComputeFileDesc) ReadField3(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.TableBatchReadSession = _field + return nil +} func (p *TMaxComputeFileDesc) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 @@ -12348,6 +12422,14 @@ func (p *TMaxComputeFileDesc) Write(oprot thrift.TProtocol) (err error) { fieldId = 1 goto WriteFieldError } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } } if err = oprot.WriteFieldStop(); err != nil { goto WriteFieldStopError @@ -12385,6 +12467,44 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } +func (p *TMaxComputeFileDesc) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetSessionId() { + if err = oprot.WriteFieldBegin("session_id", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.SessionId); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TMaxComputeFileDesc) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetTableBatchReadSession() { + if err = oprot.WriteFieldBegin("table_batch_read_session", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.TableBatchReadSession); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + func (p *TMaxComputeFileDesc) String() string { if p == nil { return "" @@ -12402,6 +12522,12 @@ func (p *TMaxComputeFileDesc) DeepEqual(ano *TMaxComputeFileDesc) bool { if !p.Field1DeepEqual(ano.PartitionSpec) { return false } + if !p.Field2DeepEqual(ano.SessionId) { + return false + } + if !p.Field3DeepEqual(ano.TableBatchReadSession) { + return false + } return true } @@ -12417,6 +12543,30 @@ func (p *TMaxComputeFileDesc) Field1DeepEqual(src *string) bool { } return true } +func (p *TMaxComputeFileDesc) Field2DeepEqual(src *string) bool { + + if p.SessionId == src { + return true + } else if p.SessionId == nil || src == nil { + return false + } + if strings.Compare(*p.SessionId, *src) != 0 { + return false + } + return true +} +func (p *TMaxComputeFileDesc) Field3DeepEqual(src *string) bool { + + if p.TableBatchReadSession == src { + return true + } else if p.TableBatchReadSession == nil || src == nil { + return false + } + if strings.Compare(*p.TableBatchReadSession, *src) != 0 { + return false + } + return true +} type THudiFileDesc struct { InstantTime *string `thrift:"instant_time,1,optional" frugal:"1,optional,string" json:"instant_time,omitempty"` diff --git a/pkg/rpc/kitex_gen/plannodes/k-PlanNodes.go b/pkg/rpc/kitex_gen/plannodes/k-PlanNodes.go index fcc36a1d..91423c05 100644 --- a/pkg/rpc/kitex_gen/plannodes/k-PlanNodes.go +++ b/pkg/rpc/kitex_gen/plannodes/k-PlanNodes.go @@ -8198,6 +8198,34 @@ func (p *TMaxComputeFileDesc) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } default: l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId) offset += l @@ -8246,6 +8274,32 @@ func (p *TMaxComputeFileDesc) FastReadField1(buf []byte) (int, error) { return offset, nil } +func (p *TMaxComputeFileDesc) FastReadField2(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.SessionId = &v + + } + return offset, nil +} + +func (p *TMaxComputeFileDesc) FastReadField3(buf []byte) (int, error) { + offset := 0 + + if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + p.TableBatchReadSession = &v + + } + return offset, nil +} + // for compatibility func (p *TMaxComputeFileDesc) FastWrite(buf []byte) int { return 0 @@ -8256,6 +8310,8 @@ func (p *TMaxComputeFileDesc) FastWriteNocopy(buf []byte, binaryWriter bthrift.B offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TMaxComputeFileDesc") if p != nil { offset += p.fastWriteField1(buf[offset:], binaryWriter) + offset += p.fastWriteField2(buf[offset:], binaryWriter) + offset += p.fastWriteField3(buf[offset:], binaryWriter) } offset += bthrift.Binary.WriteFieldStop(buf[offset:]) offset += bthrift.Binary.WriteStructEnd(buf[offset:]) @@ -8267,6 +8323,8 @@ func (p *TMaxComputeFileDesc) BLength() int { l += bthrift.Binary.StructBeginLength("TMaxComputeFileDesc") if p != nil { l += p.field1Length() + l += p.field2Length() + l += p.field3Length() } l += bthrift.Binary.FieldStopLength() l += bthrift.Binary.StructEndLength() @@ -8284,6 +8342,28 @@ func (p *TMaxComputeFileDesc) fastWriteField1(buf []byte, binaryWriter bthrift.B return offset } +func (p *TMaxComputeFileDesc) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetSessionId() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "session_id", thrift.STRING, 2) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.SessionId) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + +func (p *TMaxComputeFileDesc) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int { + offset := 0 + if p.IsSetTableBatchReadSession() { + offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_batch_read_session", thrift.STRING, 3) + offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.TableBatchReadSession) + + offset += bthrift.Binary.WriteFieldEnd(buf[offset:]) + } + return offset +} + func (p *TMaxComputeFileDesc) field1Length() int { l := 0 if p.IsSetPartitionSpec() { @@ -8295,6 +8375,28 @@ func (p *TMaxComputeFileDesc) field1Length() int { return l } +func (p *TMaxComputeFileDesc) field2Length() int { + l := 0 + if p.IsSetSessionId() { + l += bthrift.Binary.FieldBeginLength("session_id", thrift.STRING, 2) + l += bthrift.Binary.StringLengthNocopy(*p.SessionId) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + +func (p *TMaxComputeFileDesc) field3Length() int { + l := 0 + if p.IsSetTableBatchReadSession() { + l += bthrift.Binary.FieldBeginLength("table_batch_read_session", thrift.STRING, 3) + l += bthrift.Binary.StringLengthNocopy(*p.TableBatchReadSession) + + l += bthrift.Binary.FieldEndLength() + } + return l +} + func (p *THudiFileDesc) FastRead(buf []byte) (int, error) { var err error var offset int diff --git a/pkg/rpc/thrift/DataSinks.thrift b/pkg/rpc/thrift/DataSinks.thrift index e46f7e60..ed7ccee6 100644 --- a/pkg/rpc/thrift/DataSinks.thrift +++ b/pkg/rpc/thrift/DataSinks.thrift @@ -188,6 +188,7 @@ struct TDataStreamSink { 10: optional Descriptors.TOlapTableLocationParam tablet_sink_location 11: optional i64 tablet_sink_txn_id 12: optional Types.TTupleId tablet_sink_tuple_id + 13: optional list tablet_sink_exprs } struct TMultiCastDataStreamSink { diff --git a/pkg/rpc/thrift/Descriptors.thrift b/pkg/rpc/thrift/Descriptors.thrift index 56222c23..10ad6de3 100644 --- a/pkg/rpc/thrift/Descriptors.thrift +++ b/pkg/rpc/thrift/Descriptors.thrift @@ -353,14 +353,16 @@ struct TJdbcTable { } struct TMCTable { - 1: optional string region + 1: optional string region // deprecated 2: optional string project 3: optional string table 4: optional string access_key 5: optional string secret_key - 6: optional string public_access - 7: optional string odps_url - 8: optional string tunnel_url + 6: optional string public_access // deprecated + 7: optional string odps_url // deprecated + 8: optional string tunnel_url // deprecated + 9: optional string endpoint + 10: optional string quota } struct TTrinoConnectorTable { diff --git a/pkg/rpc/thrift/FrontendService.thrift b/pkg/rpc/thrift/FrontendService.thrift index 3190d331..436129dd 100644 --- a/pkg/rpc/thrift/FrontendService.thrift +++ b/pkg/rpc/thrift/FrontendService.thrift @@ -1492,6 +1492,7 @@ struct TGetMetaDBMeta { 3: optional list tables 4: optional list dropped_partitions 5: optional list dropped_tables + 6: optional list dropped_indexes } struct TGetMetaResult { diff --git a/pkg/rpc/thrift/HeartbeatService.thrift b/pkg/rpc/thrift/HeartbeatService.thrift index 4daea779..c03f04a6 100644 --- a/pkg/rpc/thrift/HeartbeatService.thrift +++ b/pkg/rpc/thrift/HeartbeatService.thrift @@ -39,6 +39,8 @@ struct TMasterInfo { 7: optional i64 heartbeat_flags 8: optional i64 backend_id 9: optional list frontend_infos + 10: optional string meta_service_endpoint; + 11: optional string cloud_unique_id; } struct TBackendInfo { diff --git a/pkg/rpc/thrift/PaloInternalService.thrift b/pkg/rpc/thrift/PaloInternalService.thrift index 7875aa2b..48f41e8e 100644 --- a/pkg/rpc/thrift/PaloInternalService.thrift +++ b/pkg/rpc/thrift/PaloInternalService.thrift @@ -342,6 +342,9 @@ struct TQueryOptions { 130: optional bool enable_adaptive_pipeline_task_serial_read_on_limit = true; 131: optional i32 adaptive_pipeline_task_serial_read_on_limit = 10000; + 132: optional i32 parallel_prepare_threshold = 0; + 133: optional i32 partition_topn_max_partitions = 1024; + 134: optional i32 partition_topn_pre_partition_rows = 1000; // For cloud, to control if the content would be written into file cache // In write path, to control if the content would be written into file cache. // In read path, read from file cache or remote storage when execute query. @@ -811,11 +814,27 @@ struct TPipelineFragmentParams { 41: optional i64 wal_id 42: optional i64 content_length 43: optional Types.TNetworkAddress current_connect_fe + // Used by 2.1 + 44: optional list topn_filter_source_node_ids // For cloud 1000: optional bool is_mow_table; } struct TPipelineFragmentParamsList { - 1: optional list params_list; + 1: optional list params_list; + 2: optional Descriptors.TDescriptorTable desc_tbl; + // scan node id -> scan range params, only for external file scan + 3: optional map file_scan_params; + 4: optional Types.TNetworkAddress coord; + 5: optional TQueryGlobals query_globals; + 6: optional Types.TResourceInfo resource_info; + // The total number of fragments on same BE host + 7: optional i32 fragment_num_on_host + 8: optional TQueryOptions query_options + 9: optional bool is_nereids = true; + 10: optional list workload_groups + 11: optional Types.TUniqueId query_id + 12: optional list topn_filter_source_node_ids + 13: optional Types.TNetworkAddress runtime_filter_merge_addr } diff --git a/pkg/rpc/thrift/PlanNodes.thrift b/pkg/rpc/thrift/PlanNodes.thrift index e53289c1..c77ab48b 100644 --- a/pkg/rpc/thrift/PlanNodes.thrift +++ b/pkg/rpc/thrift/PlanNodes.thrift @@ -347,7 +347,10 @@ struct TTrinoConnectorFileDesc { } struct TMaxComputeFileDesc { - 1: optional string partition_spec + 1: optional string partition_spec // deprecated + 2: optional string session_id + 3: optional string table_batch_read_session + } struct THudiFileDesc { diff --git a/regression-test/common/helper.groovy b/regression-test/common/helper.groovy index 6feb7bc3..354b8974 100644 --- a/regression-test/common/helper.groovy +++ b/regression-test/common/helper.groovy @@ -229,7 +229,6 @@ class Helper { check check_func } } - def result = null features_uri.call() { code, body -> if (!"${code}".toString().equals("200")) { @@ -251,6 +250,34 @@ class Helper { } return false } + + Object get_job_progress(tableName = "") { + def request_body = suite.get_ccr_body(tableName) + def get_job_progress_uri = { check_func -> + suite.httpTest { + uri "/job_progress" + endpoint syncerAddress + body request_body + op "post" + check check_func + } + } + + def result = null + get_job_progress_uri.call() { code, body -> + if (!"${code}".toString().equals("200")) { + throw "request failed, code: ${code}, body: ${body}" + } + def jsonSlurper = new groovy.json.JsonSlurper() + def object = jsonSlurper.parseText "${body}" + if (!object.success) { + throw "request failed, error msg: ${object.error_msg}" + } + suite.logger.info("job progress: ${object.job_progress}") + result = jsonSlurper.parseText object.job_progress + } + return result + } } new Helper(suite) diff --git a/regression-test/suites/table-schema-change/test_filter_dropped_indexes.groovy b/regression-test/suites/table-schema-change/test_filter_dropped_indexes.groovy new file mode 100644 index 00000000..20f2ede8 --- /dev/null +++ b/regression-test/suites/table-schema-change/test_filter_dropped_indexes.groovy @@ -0,0 +1,120 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_filter_dropped_indexes") { + def helper = new GroovyShell(new Binding(['suite': delegate])) + .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy")) + + def tableName = "tbl_" + UUID.randomUUID().toString().replace("-", "") + def test_num = 0 + def insert_num = 5 + + def exist = { res -> Boolean + return res.size() != 0 + } + + def has_count = { count -> + return { res -> Boolean + res.size() == count + } + } + + sql "DROP TABLE IF EXISTS ${tableName}" + sql """ + CREATE TABLE if NOT EXISTS ${tableName} + ( + `test` INT, + `id` INT, + `value` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "binlog.enable" = "true" + ) + """ + + def values = []; + for (int index = 0; index < insert_num; index++) { + values.add("(${test_num}, ${index}, ${index})") + } + sql """ + INSERT INTO ${tableName} VALUES ${values.join(",")} + """ + sql "sync" + + helper.ccrJobCreate(tableName) + + assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30)) + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", insert_num, 60)) + + logger.info("=== pause job, insert data and issue schema change ===") + + helper.ccrJobPause(tableName) + sql "INSERT INTO ${tableName} VALUES (100, 100, 100)" + sql "INSERT INTO ${tableName} VALUES (101, 101, 101)" + sql "INSERT INTO ${tableName} VALUES (102, 102, 102)" + + logger.info("=== add first column ===") + // binlog type: ALTER_JOB, binlog data: + // { + // "type":"SCHEMA_CHANGE", + // "dbId":11049, + // "tableId":11058, + // "tableName":"tbl_add_column6ab3b514b63c4368aa0a0149da0acabd", + // "jobId":11076, + // "jobState":"FINISHED", + // "rawSql":"ALTER TABLE `regression_test_schema_change`.`tbl_add_column6ab3b514b63c4368aa0a0149da0acabd` ADD COLUMN `first` int NULL DEFAULT \"0\" COMMENT \"\" FIRST" + // } + sql """ + ALTER TABLE ${tableName} + ADD COLUMN `first` INT KEY DEFAULT "0" FIRST + """ + sql "sync" + + assertTrue(helper.checkShowTimesOf(""" + SHOW ALTER TABLE COLUMN + FROM ${context.dbName} + WHERE TableName = "${tableName}" AND State = "FINISHED" + """, + has_count(1), 30)) + + def first_job_progress = helper.get_job_progress(tableName) + + logger.info("resume ccr job and wait sync job") + helper.ccrJobResume(tableName) + + def has_column_first = { res -> Boolean + // Field == 'first' && 'Key' == 'YES' + return res[0][0] == 'first' && (res[0][3] == 'YES' || res[0][3] == 'true') + } + + assertTrue(helper.checkShowTimesOf("SHOW COLUMNS FROM `${tableName}`", has_column_first, 60, "target_sql")) + + sql "INSERT INTO ${tableName} VALUES (123, 123, 123, 123)" + + // cache must be clear and reload. + assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", insert_num + 4, 60)) + + // no full sync triggered. + def last_job_progress = helper.get_job_progress(tableName) + assertTrue(last_job_progress.full_sync_start_at == first_job_progress.full_sync_start_at) +} + + +