From 08e52bb5f488a0ec7232405efdd1f3bcc49bcaa8 Mon Sep 17 00:00:00 2001 From: Davies Liu Date: Mon, 9 Dec 2024 14:43:53 +0800 Subject: [PATCH] meta/sql: simplify backup v2 (#5352) Signed-off-by: jiefenghuang Co-authored-by: jiefenghuang --- pkg/meta/backup.go | 404 ++++++------- pkg/meta/base.go | 58 +- pkg/meta/load_dump_test.go | 16 +- pkg/meta/pb/backup.pb.go | 1072 ++++++++++------------------------ pkg/meta/pb/backup.proto | 70 +-- pkg/meta/redis_bak.go | 11 +- pkg/meta/sql.go | 9 +- pkg/meta/sql_bak.go | 1128 ++++++++++++++---------------------- pkg/meta/tkv.go | 5 - pkg/meta/tkv_bak.go | 14 +- 10 files changed, 970 insertions(+), 1817 deletions(-) diff --git a/pkg/meta/backup.go b/pkg/meta/backup.go index 82cf01eec0e4..d03a1c027c00 100644 --- a/pkg/meta/backup.go +++ b/pkg/meta/backup.go @@ -22,8 +22,6 @@ import ( "encoding/json" "fmt" "io" - "reflect" - "sync" "unsafe" "github.com/juicedata/juicefs/pkg/meta/pb" @@ -39,57 +37,38 @@ const ( ) const ( - SegTypeUnknown = iota - SegTypeFormat - SegTypeCounter - SegTypeSustained - SegTypeDelFile - SegTypeAcl - SegTypeXattr - SegTypeQuota - SegTypeStat - SegTypeNode - SegTypeChunk - SegTypeSliceRef - SegTypeEdge - SegTypeParent // for redis/tkv only - SegTypeSymlink - SegTypeMix // for redis/tkv only - SegTypeMax + segTypeUnknown = iota + segTypeFormat + segTypeCounter + segTypeNode + segTypeEdge + segTypeChunk + segTypeSliceRef + segTypeSymlink + segTypeSustained + segTypeDelFile + segTypeXattr + segTypeAcl + segTypeStat + segTypeQuota + segTypeParent // for redis/tkv only + segTypeMix // for redis/tkv only + segTypeMax ) -var ( - SegType2Name map[int]protoreflect.FullName - SegName2Type map[protoreflect.FullName]int -) +var errBakEOF = fmt.Errorf("reach backup EOF") -func init() { - SegType2Name = map[int]protoreflect.FullName{ - SegTypeFormat: proto.MessageName(&pb.Format{}), - SegTypeCounter: proto.MessageName(&pb.Counters{}), - SegTypeSustained: proto.MessageName(&pb.SustainedList{}), - SegTypeDelFile: proto.MessageName(&pb.DelFileList{}), - SegTypeSliceRef: proto.MessageName(&pb.SliceRefList{}), - SegTypeAcl: proto.MessageName(&pb.AclList{}), - SegTypeXattr: proto.MessageName(&pb.XattrList{}), - SegTypeQuota: proto.MessageName(&pb.QuotaList{}), - SegTypeStat: proto.MessageName(&pb.StatList{}), - SegTypeNode: proto.MessageName(&pb.NodeList{}), - SegTypeChunk: proto.MessageName(&pb.ChunkList{}), - SegTypeEdge: proto.MessageName(&pb.EdgeList{}), - SegTypeParent: proto.MessageName(&pb.ParentList{}), - SegTypeSymlink: proto.MessageName(&pb.SymlinkList{}), - } - - SegName2Type = make(map[protoreflect.FullName]int) - for k, v := range SegType2Name { - SegName2Type[v] = k - } - - SegType2Name[SegTypeMix] = "kv.Mix" +func getMessageNameFromType(typ int) protoreflect.FullName { + if typ == segTypeFormat { + return proto.MessageName(&pb.Format{}) + } else if typ < segTypeMax { + return proto.MessageName(&pb.Batch{}) + } else { + return "" + } } -func CreateMessageByName(name protoreflect.FullName) (proto.Message, error) { +func createMessageByName(name protoreflect.FullName) (proto.Message, error) { typ, err := protoregistry.GlobalTypes.FindMessageByName(name) if err != nil { return nil, fmt.Errorf("failed to find message %s's type: %v", name, err) @@ -97,18 +76,16 @@ func CreateMessageByName(name protoreflect.FullName) (proto.Message, error) { return typ.New().Interface(), nil } -var ErrBakEOF = fmt.Errorf("reach backup EOF") - -// BakFormat: BakSegment... + BakEOF + BakFooter -type BakFormat struct { - Offset uint64 - Footer *BakFooter +// bakFormat: BakSegment... + BakEOS + BakFooter +type bakFormat struct { + pos uint64 + footer *bakFooter } -func NewBakFormat() *BakFormat { - return &BakFormat{ - Footer: &BakFooter{ - Msg: &pb.Footer{ +func newBakFormat() *bakFormat { + return &bakFormat{ + footer: &bakFooter{ + msg: &pb.Footer{ Magic: BakMagic, Version: BakVersion, Infos: make(map[string]*pb.Footer_SegInfo), @@ -117,7 +94,7 @@ func NewBakFormat() *BakFormat { } } -func (f *BakFormat) WriteSegment(w io.Writer, seg *BakSegment) error { +func (f *bakFormat) writeSegment(w io.Writer, seg *bakSegment) error { if seg == nil { return nil } @@ -128,32 +105,32 @@ func (f *BakFormat) WriteSegment(w io.Writer, seg *BakSegment) error { } name := seg.String() - info, ok := f.Footer.Msg.Infos[name] + info, ok := f.footer.msg.Infos[name] if !ok { info = &pb.Footer_SegInfo{Offset: []uint64{}, Num: 0} - f.Footer.Msg.Infos[name] = info + f.footer.msg.Infos[name] = info } - info.Offset = append(info.Offset, f.Offset) - info.Num += seg.Num() - f.Offset += uint64(n) + info.Offset = append(info.Offset, f.pos) + info.Num += seg.num() + f.pos += uint64(n) return nil } -func (f *BakFormat) ReadSegment(r io.Reader) (*BakSegment, error) { - seg := &BakSegment{} +func (f *bakFormat) readSegment(r io.Reader) (*bakSegment, error) { + seg := &bakSegment{} if err := seg.Unmarshal(r); err != nil { return nil, err } return seg, nil } -func (f *BakFormat) WriteFooter(w io.Writer) error { +func (f *bakFormat) writeFooter(w io.Writer) error { if err := f.writeEOS(w); err != nil { return err } - data, err := f.Footer.Marshal() + data, err := f.footer.Marshal() if err != nil { return err } @@ -164,43 +141,43 @@ func (f *BakFormat) WriteFooter(w io.Writer) error { return nil } -func (f *BakFormat) writeEOS(w io.Writer) error { +func (f *bakFormat) writeEOS(w io.Writer) error { if n, err := w.Write(binary.BigEndian.AppendUint32(nil, BakEOS)); err != nil && n != 4 { return fmt.Errorf("failed to write EOS: err %w, write len %d, expect len 4", err, n) } return nil } -func (f *BakFormat) ReadFooter(r io.ReadSeeker) (*BakFooter, error) { - footer := &BakFooter{} +func (f *bakFormat) readFooter(r io.ReadSeeker) (*bakFooter, error) { // nolint:unused + footer := &bakFooter{} if err := footer.Unmarshal(r); err != nil { return nil, err } - if footer.Msg.Magic != BakMagic { - return nil, fmt.Errorf("invalid magic number %d, expect %d", footer.Msg.Magic, BakMagic) + if footer.msg.Magic != BakMagic { + return nil, fmt.Errorf("invalid magic number %d, expect %d", footer.msg.Magic, BakMagic) } - f.Footer = footer + f.footer = footer return footer, nil } -type BakFooter struct { - Msg *pb.Footer - Len uint64 +type bakFooter struct { + msg *pb.Footer + len uint64 } -func (h *BakFooter) Marshal() ([]byte, error) { - data, err := proto.Marshal(h.Msg) +func (h *bakFooter) Marshal() ([]byte, error) { + data, err := proto.Marshal(h.msg) if err != nil { return nil, fmt.Errorf("failed to marshal footer: %w", err) } - h.Len = uint64(len(data)) - data = binary.BigEndian.AppendUint64(data, h.Len) + h.len = uint64(len(data)) + data = binary.BigEndian.AppendUint64(data, h.len) return data, nil } -func (h *BakFooter) Unmarshal(r io.ReadSeeker) error { - lenSize := int64(unsafe.Sizeof(h.Len)) +func (h *bakFooter) Unmarshal(r io.ReadSeeker) error { + lenSize := int64(unsafe.Sizeof(h.len)) _, _ = r.Seek(-lenSize, io.SeekEnd) data := make([]byte, lenSize) @@ -208,70 +185,117 @@ func (h *BakFooter) Unmarshal(r io.ReadSeeker) error { return fmt.Errorf("failed to read footer length: err %w, read len %d, expect len %d", err, n, lenSize) } - h.Len = binary.BigEndian.Uint64(data) - _, _ = r.Seek(-int64(h.Len)-lenSize, io.SeekEnd) - data = make([]byte, h.Len) - if n, err := r.Read(data); err != nil && n != int(h.Len) { - return fmt.Errorf("failed to read footer: err %w, read len %d, expect len %d", err, n, h.Len) + h.len = binary.BigEndian.Uint64(data) + _, _ = r.Seek(-int64(h.len)-lenSize, io.SeekEnd) + data = make([]byte, h.len) + if n, err := r.Read(data); err != nil && n != int(h.len) { + return fmt.Errorf("failed to read footer: err %w, read len %d, expect len %d", err, n, h.len) } - h.Msg = &pb.Footer{} - if err := proto.Unmarshal(data, h.Msg); err != nil { + h.msg = &pb.Footer{} + if err := proto.Unmarshal(data, h.msg); err != nil { return fmt.Errorf("failed to unmarshal footer: %w", err) } return nil } -type BakSegment struct { - Typ uint32 - Len uint64 - Val proto.Message +type bakSegment struct { + typ uint32 + len uint64 + val proto.Message } -func (s *BakSegment) String() string { - return string(proto.MessageName(s.Val).Name()) +func (s *bakSegment) String() string { + return string(proto.MessageName(s.val).Name()) } -func (s *BakSegment) Num() uint64 { - switch v := s.Val.(type) { - case *pb.Format: +func (s *bakSegment) num() uint64 { + switch s.typ { + case segTypeFormat: return 1 - case *pb.Counters: - return 6 default: - val := reflect.ValueOf(v) - if val.Kind() == reflect.Ptr { - val = val.Elem() - } - field := val.FieldByName("List") - if field.IsValid() && field.Kind() == reflect.Slice { - return uint64(field.Len()) + b := s.val.(*pb.Batch) + switch s.typ { + case segTypeCounter: + return uint64(len(b.Counters)) + case segTypeNode: + return uint64(len(b.Nodes)) + case segTypeEdge: + return uint64(len(b.Edges)) + case segTypeChunk: + return uint64(len(b.Chunks)) + case segTypeSliceRef: + return uint64(len(b.SliceRefs)) + case segTypeSymlink: + return uint64(len(b.Symlinks)) + case segTypeSustained: + return uint64(len(b.Sustained)) + case segTypeDelFile: + return uint64(len(b.Delfiles)) + case segTypeXattr: + return uint64(len(b.Xattrs)) + case segTypeAcl: + return uint64(len(b.Acls)) + case segTypeStat: + return uint64(len(b.Dirstats)) + case segTypeQuota: + return uint64(len(b.Quotas)) + case segTypeParent: + return uint64(len(b.Parents)) } return 0 } } -func (s *BakSegment) Marshal(w io.Writer) (int, error) { - if s == nil || s.Val == nil { +func (s *bakSegment) Marshal(w io.Writer) (int, error) { + if s == nil || s.val == nil { return 0, fmt.Errorf("segment %s is nil", s) } - typ, ok := SegName2Type[proto.MessageName(s.Val)] - if !ok { - return 0, fmt.Errorf("segment type %d is unknown", typ) + switch v := s.val.(type) { + case *pb.Format: + s.typ = uint32(segTypeFormat) + case *pb.Batch: + if v.Counters != nil { + s.typ = uint32(segTypeCounter) + } else if v.Sustained != nil { + s.typ = uint32(segTypeSustained) + } else if v.Delfiles != nil { + s.typ = uint32(segTypeDelFile) + } else if v.Acls != nil { + s.typ = uint32(segTypeAcl) + } else if v.Xattrs != nil { + s.typ = uint32(segTypeXattr) + } else if v.Quotas != nil { + s.typ = uint32(segTypeQuota) + } else if v.Dirstats != nil { + s.typ = uint32(segTypeStat) + } else if v.Nodes != nil { + s.typ = uint32(segTypeNode) + } else if v.Chunks != nil { + s.typ = uint32(segTypeChunk) + } else if v.SliceRefs != nil { + s.typ = uint32(segTypeSliceRef) + } else if v.Edges != nil { + s.typ = uint32(segTypeEdge) + } else if v.Symlinks != nil { + s.typ = uint32(segTypeSymlink) + } else if v.Parents != nil { + s.typ = uint32(segTypeParent) + } else { + return 0, fmt.Errorf("unknown batch type %s", s) + } } - s.Typ = uint32(typ) - if err := binary.Write(w, binary.BigEndian, s.Typ); err != nil { + if err := binary.Write(w, binary.BigEndian, s.typ); err != nil { return 0, fmt.Errorf("failed to write segment type %s : %w", s, err) } - - data, err := proto.Marshal(s.Val) + data, err := proto.Marshal(s.val) if err != nil { return 0, fmt.Errorf("failed to marshal segment message %s : %w", s, err) } - s.Len = uint64(len(data)) - if err := binary.Write(w, binary.BigEndian, s.Len); err != nil { + s.len = uint64(len(data)) + if err := binary.Write(w, binary.BigEndian, s.len); err != nil { return 0, fmt.Errorf("failed to write segment length %s: %w", s, err) } @@ -279,162 +303,94 @@ func (s *BakSegment) Marshal(w io.Writer) (int, error) { return 0, fmt.Errorf("failed to write segment data %s: err %w, write len %d, expect len %d", s, err, n, len(data)) } - return binary.Size(s.Typ) + binary.Size(s.Len) + len(data), nil + return binary.Size(s.typ) + binary.Size(s.len) + len(data), nil } -func (s *BakSegment) Unmarshal(r io.Reader) error { - if err := binary.Read(r, binary.BigEndian, &s.Typ); err != nil { +func (s *bakSegment) Unmarshal(r io.Reader) error { + if err := binary.Read(r, binary.BigEndian, &s.typ); err != nil { return fmt.Errorf("failed to read segment type: %v", err) } - name, ok := SegType2Name[int(s.Typ)] - if !ok { - if s.Typ == BakMagic { - return ErrBakEOF - } - return fmt.Errorf("segment type %d is unknown", s.Typ) + if s.typ == BakMagic { + return errBakEOF + } + name := getMessageNameFromType(int(s.typ)) + if name == "" { + return fmt.Errorf("segment type %d is unknown", s.typ) } - if err := binary.Read(r, binary.BigEndian, &s.Len); err != nil { + if err := binary.Read(r, binary.BigEndian, &s.len); err != nil { return fmt.Errorf("failed to read segment %s length: %v", s, err) } - - data := make([]byte, s.Len) + data := make([]byte, s.len) n, err := r.Read(data) - if err != nil && n != int(s.Len) { - return fmt.Errorf("failed to read segment value: err %v, read len %d, expect len %d", err, n, s.Len) + if err != nil && n != int(s.len) { + return fmt.Errorf("failed to read segment value: err %v, read len %d, expect len %d", err, n, s.len) } - msg, err := CreateMessageByName(name) + msg, err := createMessageByName(name) if err != nil { return fmt.Errorf("failed to create message %s: %v", name, err) } if err = proto.Unmarshal(data, msg); err != nil { return fmt.Errorf("failed to unmarshal segment msg %s: %v", name, err) } - s.Val = msg + s.val = msg return nil } -// Dump Segment - type DumpOption struct { KeepSecret bool - CoNum int + Threads int } func (opt *DumpOption) check() *DumpOption { if opt == nil { opt = &DumpOption{} } - if opt.CoNum < 1 { - opt.CoNum = 10 + if opt.Threads < 1 { + opt.Threads = 10 } return opt } -type segReleaser interface { - release(msg proto.Message) -} - -type iDumpedSeg interface { - String() string - dump(ctx Context, ch chan *dumpedResult) error - segReleaser -} - -type dumpedSeg struct { - iDumpedSeg - typ int - meta Meta - opt *DumpOption - txn *eTxn -} - -func (s *dumpedSeg) String() string { return string(SegType2Name[s.typ]) } -func (s *dumpedSeg) release(msg proto.Message) {} - -type formatDS struct { - dumpedSeg -} - -func (s *formatDS) dump(ctx Context, ch chan *dumpedResult) error { - f := s.meta.GetFormat() - return dumpResult(ctx, ch, &dumpedResult{s, ConvertFormatToPB(&f, s.opt.KeepSecret)}) -} - -type dumpedBatchSeg struct { - dumpedSeg - pools []*sync.Pool +func (m *baseMeta) dumpFormat(ctx Context, opt *DumpOption, ch chan<- *dumpedResult) error { + f := m.GetFormat() + if !opt.KeepSecret { + f.RemoveSecret() + } + data, err := json.MarshalIndent(f, "", "") + if err != nil { + logger.Errorf("failed to marshal format %s: %v", f.Name, err) + return nil + } + return dumpResult(ctx, ch, &dumpedResult{msg: &pb.Format{Data: data}}) } type dumpedResult struct { - seg segReleaser - msg proto.Message + msg proto.Message + release func(m proto.Message) } -func dumpResult(ctx context.Context, ch chan *dumpedResult, res *dumpedResult) error { +func dumpResult(ctx context.Context, ch chan<- *dumpedResult, res *dumpedResult) error { select { case <-ctx.Done(): return ctx.Err() case ch <- res: + return nil } - return nil } -// Load Segment... - type LoadOption struct { - CoNum int + Threads int } func (opt *LoadOption) check() { - if opt.CoNum < 1 { - opt.CoNum = 10 - } -} - -type iLoadedSeg interface { - String() string - load(ctx Context, msg proto.Message) error -} - -type loadedSeg struct { - iLoadedSeg - typ int - meta Meta -} - -func (s *loadedSeg) String() string { return string(SegType2Name[s.typ]) } - -// Message Marshal/Unmarshal - -func ConvertFormatToPB(f *Format, keepSecret bool) *pb.Format { - if !keepSecret { - f.RemoveSecret() - } - data, err := json.MarshalIndent(f, "", "") - if err != nil { - logger.Errorf("failed to marshal format %s: %v", f.Name, err) - return nil - } - return &pb.Format{ - Data: data, + if opt.Threads < 1 { + opt.Threads = 10 } } // transaction +type txSessionKey struct{} type txMaxRetryKey struct{} - -type bTxnOption struct { - coNum int - notUsed bool - maxRetry int - maxStmtRetry int -} - -type eTxn struct { - en engine - opt *bTxnOption - obj interface{} // real transaction object for different engine -} diff --git a/pkg/meta/base.go b/pkg/meta/base.go index f492a7d5c2ae..5fb9c009a1db 100644 --- a/pkg/meta/base.go +++ b/pkg/meta/base.go @@ -136,9 +136,8 @@ type engine interface { newDirHandler(inode Ino, plus bool, entries []*Entry) DirHandler - execETxn(ctx Context, txn *eTxn, fn func(ctx Context, txn *eTxn) error) error - buildDumpedSeg(typ int, opt *DumpOption, txn *eTxn) iDumpedSeg - buildLoadedSeg(typ int, opt *LoadOption) iLoadedSeg + dump(ctx Context, opt *DumpOption, ch chan<- *dumpedResult) error + load(ctx Context, typ int, opt *LoadOption, val proto.Message) error prepareLoad(ctx Context, opt *LoadOption) error } @@ -3086,33 +3085,15 @@ func (h *dirHandler) Close() { func (m *baseMeta) DumpMetaV2(ctx Context, w io.Writer, opt *DumpOption) error { opt = opt.check() - bak := NewBakFormat() + bak := newBakFormat() ch := make(chan *dumpedResult, 100) wg := &sync.WaitGroup{} wg.Add(1) go func() { defer wg.Done() - - txn := &eTxn{ - en: m.en, - opt: &bTxnOption{ - coNum: opt.CoNum, - maxRetry: 1, - maxStmtRetry: 3, - }, - } - err := m.en.execETxn(ctx, txn, func(ctx Context, txn *eTxn) error { - for typ := SegTypeFormat; typ < SegTypeMax; typ++ { - seg := m.en.buildDumpedSeg(typ, opt, txn) - if seg != nil { - if err := seg.dump(ctx, ch); err != nil { - return fmt.Errorf("dump %s err: %w", seg, err) - } - } - } - return nil - }) + err := m.en.dump(ctx, opt, ch) if err != nil { + logger.Errorf("dump meta err: %v", err) ctx.Cancel() } else { close(ch) @@ -3130,17 +3111,20 @@ func (m *baseMeta) DumpMetaV2(ctx Context, w io.Writer, opt *DumpOption) error { if res == nil { break } - if err := bak.WriteSegment(w, &BakSegment{Val: res.msg}); err != nil { - logger.Errorf("write %s err: %v", res.seg, err) + seg := &bakSegment{val: res.msg} + if err := bak.writeSegment(w, seg); err != nil { + logger.Errorf("write %d err: %v", seg.typ, err) ctx.Cancel() wg.Wait() return err } - res.seg.release(res.msg) + if res.release != nil { + res.release(res.msg) + } } wg.Wait() - return bak.WriteFooter(w) + return bak.writeFooter(w) } func (m *baseMeta) LoadMetaV2(ctx Context, r io.Reader, opt *LoadOption) error { @@ -3152,8 +3136,8 @@ func (m *baseMeta) LoadMetaV2(ctx Context, r io.Reader, opt *LoadOption) error { } type task struct { + typ int msg proto.Message - seg iLoadedSeg } var wg sync.WaitGroup @@ -3171,24 +3155,25 @@ func (m *baseMeta) LoadMetaV2(ctx Context, r io.Reader, opt *LoadOption) error { if task == nil { break } - if err := task.seg.load(ctx, task.msg); err != nil { - logger.Errorf("failed to insert %s: %s", task.seg, err) + err := m.en.load(ctx, task.typ, opt, task.msg) + if err != nil { + logger.Errorf("failed to insert %d: %s", task.typ, err) ctx.Cancel() return } } } - for i := 0; i < opt.CoNum; i++ { + for i := 0; i < opt.Threads; i++ { wg.Add(1) go workerFunc(ctx, taskCh) } - bak := &BakFormat{} + bak := &bakFormat{} for { - seg, err := bak.ReadSegment(r) + seg, err := bak.readSegment(r) if err != nil { - if errors.Is(err, ErrBakEOF) { + if errors.Is(err, errBakEOF) { close(taskCh) break } @@ -3197,12 +3182,11 @@ func (m *baseMeta) LoadMetaV2(ctx Context, r io.Reader, opt *LoadOption) error { return err } - ls := m.en.buildLoadedSeg(int(seg.Typ), opt) select { case <-ctx.Done(): wg.Wait() return ctx.Err() - case taskCh <- &task{seg.Val, ls}: + case taskCh <- &task{int(seg.typ), seg.val}: } } wg.Wait() diff --git a/pkg/meta/load_dump_test.go b/pkg/meta/load_dump_test.go index fed5c18d1c71..14b2c5652cc3 100644 --- a/pkg/meta/load_dump_test.go +++ b/pkg/meta/load_dump_test.go @@ -18,6 +18,7 @@ package meta import ( "bytes" + "context" "fmt" "io" "os" @@ -337,7 +338,7 @@ func TestLoadDump(t *testing.T) { //skip mutate func testDumpV2(t *testing.T, m Meta, result string, opt *DumpOption) { if opt == nil { - opt = &DumpOption{CoNum: 10, KeepSecret: true} + opt = &DumpOption{Threads: 10, KeepSecret: true} } fp, err := os.OpenFile(result, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) if err != nil { @@ -347,7 +348,7 @@ func testDumpV2(t *testing.T, m Meta, result string, opt *DumpOption) { if _, err = m.Load(true); err != nil { t.Fatalf("load setting: %s", err) } - if err = m.DumpMetaV2(Background, fp, opt); err != nil { + if err = m.DumpMetaV2(WrapContext(context.Background()), fp, opt); err != nil { t.Fatalf("dump meta: %s", err) } fp.Sync() @@ -363,7 +364,7 @@ func testLoadV2(t *testing.T, uri, fname string) Meta { t.Fatalf("open file: %s", fname) } defer fp.Close() - if err = m.LoadMetaV2(Background, fp, &LoadOption{CoNum: 10}); err != nil { + if err = m.LoadMetaV2(WrapContext(context.Background()), fp, &LoadOption{Threads: 10}); err != nil { t.Fatalf("load meta: %s", err) } if _, err := m.Load(true); err != nil { @@ -400,7 +401,8 @@ func TestLoadDumpV2(t *testing.T) { logger.SetLevel(logrus.DebugLevel) engines := map[string][]string{ - "mysql": {"mysql://root:@/dev", "mysql://root:@/dev2"}, + "sqlite3": {"sqlite3://dev.db", "sqlite3://dev2.db"}, + // "mysql": {"mysql://root:@/dev", "mysql://root:@/dev2"}, // "redis": {"redis://127.0.0.1:6379/2", "redis://127.0.0.1:6379/3"}, // "tikv": {"tikv://127.0.0.1:2379/jfs-load-dump-1", "tikv://127.0.0.1:2379/jfs-load-dump-2"}, } @@ -454,13 +456,13 @@ func TestLoadDump_MemKV(t *testing.T) { func testSecretAndTrash(t *testing.T, addr, addr2 string) { m := testLoad(t, addr, sampleFile) - testDumpV2(t, m, "sqlite-secret.dump", &DumpOption{CoNum: 10, KeepSecret: true}) + testDumpV2(t, m, "sqlite-secret.dump", &DumpOption{Threads: 10, KeepSecret: true}) m2 := testLoadV2(t, addr2, "sqlite-secret.dump") if m2.GetFormat().EncryptKey != m.GetFormat().EncryptKey { t.Fatalf("encrypt key not valid: %s", m2.GetFormat().EncryptKey) } - testDumpV2(t, m, "sqlite-non-secret.dump", &DumpOption{CoNum: 10, KeepSecret: false}) + testDumpV2(t, m, "sqlite-non-secret.dump", &DumpOption{Threads: 10, KeepSecret: false}) m2.Reset() m2 = testLoadV2(t, addr2, "sqlite-non-secret.dump") if m2.GetFormat().EncryptKey != "removed" { @@ -481,7 +483,7 @@ func testSecretAndTrash(t *testing.T, addr, addr2 string) { return false, nil }) if cnt != len(trashs) { - t.Fatalf("trash count: %d", cnt) + t.Fatalf("trash count: %d != %d", cnt, len(trashs)) } } diff --git a/pkg/meta/pb/backup.pb.go b/pkg/meta/pb/backup.pb.go index 66cb8daecb24..aac0a7cbd792 100644 --- a/pkg/meta/pb/backup.pb.go +++ b/pkg/meta/pb/backup.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.1 -// protoc v3.12.4 +// protoc-gen-go v1.35.2 +// protoc v5.29.0 // source: pkg/meta/pb/backup.proto package pb @@ -65,33 +65,29 @@ func (x *Format) GetData() []byte { return nil } -type Counters struct { +type Counter struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - UsedSpace int64 `protobuf:"varint,1,opt,name=usedSpace,proto3" json:"usedSpace,omitempty"` - UsedInodes int64 `protobuf:"varint,2,opt,name=usedInodes,proto3" json:"usedInodes,omitempty"` - NextInode int64 `protobuf:"varint,3,opt,name=nextInode,proto3" json:"nextInode,omitempty"` - NextChunk int64 `protobuf:"varint,4,opt,name=nextChunk,proto3" json:"nextChunk,omitempty"` - NextSession int64 `protobuf:"varint,5,opt,name=nextSession,proto3" json:"nextSession,omitempty"` - NextTrash int64 `protobuf:"varint,6,opt,name=nextTrash,proto3" json:"nextTrash,omitempty"` + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` } -func (x *Counters) Reset() { - *x = Counters{} +func (x *Counter) Reset() { + *x = Counter{} mi := &file_pkg_meta_pb_backup_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } -func (x *Counters) String() string { +func (x *Counter) String() string { return protoimpl.X.MessageStringOf(x) } -func (*Counters) ProtoMessage() {} +func (*Counter) ProtoMessage() {} -func (x *Counters) ProtoReflect() protoreflect.Message { +func (x *Counter) ProtoReflect() protoreflect.Message { mi := &file_pkg_meta_pb_backup_proto_msgTypes[1] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -103,49 +99,21 @@ func (x *Counters) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Counters.ProtoReflect.Descriptor instead. -func (*Counters) Descriptor() ([]byte, []int) { +// Deprecated: Use Counter.ProtoReflect.Descriptor instead. +func (*Counter) Descriptor() ([]byte, []int) { return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{1} } -func (x *Counters) GetUsedSpace() int64 { +func (x *Counter) GetKey() string { if x != nil { - return x.UsedSpace - } - return 0 -} - -func (x *Counters) GetUsedInodes() int64 { - if x != nil { - return x.UsedInodes - } - return 0 -} - -func (x *Counters) GetNextInode() int64 { - if x != nil { - return x.NextInode - } - return 0 -} - -func (x *Counters) GetNextChunk() int64 { - if x != nil { - return x.NextChunk - } - return 0 -} - -func (x *Counters) GetNextSession() int64 { - if x != nil { - return x.NextSession + return x.Key } - return 0 + return "" } -func (x *Counters) GetNextTrash() int64 { +func (x *Counter) GetValue() int64 { if x != nil { - return x.NextTrash + return x.Value } return 0 } @@ -203,51 +171,6 @@ func (x *Sustained) GetInodes() []uint64 { return nil } -type SustainedList struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - List []*Sustained `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"` -} - -func (x *SustainedList) Reset() { - *x = SustainedList{} - mi := &file_pkg_meta_pb_backup_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SustainedList) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SustainedList) ProtoMessage() {} - -func (x *SustainedList) ProtoReflect() protoreflect.Message { - mi := &file_pkg_meta_pb_backup_proto_msgTypes[3] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SustainedList.ProtoReflect.Descriptor instead. -func (*SustainedList) Descriptor() ([]byte, []int) { - return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{3} -} - -func (x *SustainedList) GetList() []*Sustained { - if x != nil { - return x.List - } - return nil -} - type DelFile struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -260,7 +183,7 @@ type DelFile struct { func (x *DelFile) Reset() { *x = DelFile{} - mi := &file_pkg_meta_pb_backup_proto_msgTypes[4] + mi := &file_pkg_meta_pb_backup_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -272,7 +195,7 @@ func (x *DelFile) String() string { func (*DelFile) ProtoMessage() {} func (x *DelFile) ProtoReflect() protoreflect.Message { - mi := &file_pkg_meta_pb_backup_proto_msgTypes[4] + mi := &file_pkg_meta_pb_backup_proto_msgTypes[3] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -285,7 +208,7 @@ func (x *DelFile) ProtoReflect() protoreflect.Message { // Deprecated: Use DelFile.ProtoReflect.Descriptor instead. func (*DelFile) Descriptor() ([]byte, []int) { - return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{4} + return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{3} } func (x *DelFile) GetInode() uint64 { @@ -309,51 +232,6 @@ func (x *DelFile) GetExpire() int64 { return 0 } -type DelFileList struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - List []*DelFile `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"` -} - -func (x *DelFileList) Reset() { - *x = DelFileList{} - mi := &file_pkg_meta_pb_backup_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *DelFileList) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DelFileList) ProtoMessage() {} - -func (x *DelFileList) ProtoReflect() protoreflect.Message { - mi := &file_pkg_meta_pb_backup_proto_msgTypes[5] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DelFileList.ProtoReflect.Descriptor instead. -func (*DelFileList) Descriptor() ([]byte, []int) { - return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{5} -} - -func (x *DelFileList) GetList() []*DelFile { - if x != nil { - return x.List - } - return nil -} - type SliceRef struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -366,7 +244,7 @@ type SliceRef struct { func (x *SliceRef) Reset() { *x = SliceRef{} - mi := &file_pkg_meta_pb_backup_proto_msgTypes[6] + mi := &file_pkg_meta_pb_backup_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -378,7 +256,7 @@ func (x *SliceRef) String() string { func (*SliceRef) ProtoMessage() {} func (x *SliceRef) ProtoReflect() protoreflect.Message { - mi := &file_pkg_meta_pb_backup_proto_msgTypes[6] + mi := &file_pkg_meta_pb_backup_proto_msgTypes[4] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -391,7 +269,7 @@ func (x *SliceRef) ProtoReflect() protoreflect.Message { // Deprecated: Use SliceRef.ProtoReflect.Descriptor instead. func (*SliceRef) Descriptor() ([]byte, []int) { - return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{6} + return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{4} } func (x *SliceRef) GetId() uint64 { @@ -415,51 +293,6 @@ func (x *SliceRef) GetRefs() int64 { return 0 } -type SliceRefList struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - List []*SliceRef `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"` -} - -func (x *SliceRefList) Reset() { - *x = SliceRefList{} - mi := &file_pkg_meta_pb_backup_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SliceRefList) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SliceRefList) ProtoMessage() {} - -func (x *SliceRefList) ProtoReflect() protoreflect.Message { - mi := &file_pkg_meta_pb_backup_proto_msgTypes[7] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SliceRefList.ProtoReflect.Descriptor instead. -func (*SliceRefList) Descriptor() ([]byte, []int) { - return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{7} -} - -func (x *SliceRefList) GetList() []*SliceRef { - if x != nil { - return x.List - } - return nil -} - type Acl struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -471,7 +304,7 @@ type Acl struct { func (x *Acl) Reset() { *x = Acl{} - mi := &file_pkg_meta_pb_backup_proto_msgTypes[8] + mi := &file_pkg_meta_pb_backup_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -483,7 +316,7 @@ func (x *Acl) String() string { func (*Acl) ProtoMessage() {} func (x *Acl) ProtoReflect() protoreflect.Message { - mi := &file_pkg_meta_pb_backup_proto_msgTypes[8] + mi := &file_pkg_meta_pb_backup_proto_msgTypes[5] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -496,7 +329,7 @@ func (x *Acl) ProtoReflect() protoreflect.Message { // Deprecated: Use Acl.ProtoReflect.Descriptor instead. func (*Acl) Descriptor() ([]byte, []int) { - return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{8} + return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{5} } func (x *Acl) GetId() uint32 { @@ -513,51 +346,6 @@ func (x *Acl) GetData() []byte { return nil } -type AclList struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - List []*Acl `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"` -} - -func (x *AclList) Reset() { - *x = AclList{} - mi := &file_pkg_meta_pb_backup_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *AclList) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AclList) ProtoMessage() {} - -func (x *AclList) ProtoReflect() protoreflect.Message { - mi := &file_pkg_meta_pb_backup_proto_msgTypes[9] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AclList.ProtoReflect.Descriptor instead. -func (*AclList) Descriptor() ([]byte, []int) { - return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{9} -} - -func (x *AclList) GetList() []*Acl { - if x != nil { - return x.List - } - return nil -} - type Xattr struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -570,7 +358,7 @@ type Xattr struct { func (x *Xattr) Reset() { *x = Xattr{} - mi := &file_pkg_meta_pb_backup_proto_msgTypes[10] + mi := &file_pkg_meta_pb_backup_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -582,7 +370,7 @@ func (x *Xattr) String() string { func (*Xattr) ProtoMessage() {} func (x *Xattr) ProtoReflect() protoreflect.Message { - mi := &file_pkg_meta_pb_backup_proto_msgTypes[10] + mi := &file_pkg_meta_pb_backup_proto_msgTypes[6] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -595,7 +383,7 @@ func (x *Xattr) ProtoReflect() protoreflect.Message { // Deprecated: Use Xattr.ProtoReflect.Descriptor instead. func (*Xattr) Descriptor() ([]byte, []int) { - return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{10} + return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{6} } func (x *Xattr) GetInode() uint64 { @@ -619,51 +407,6 @@ func (x *Xattr) GetValue() []byte { return nil } -type XattrList struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - List []*Xattr `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"` -} - -func (x *XattrList) Reset() { - *x = XattrList{} - mi := &file_pkg_meta_pb_backup_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *XattrList) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*XattrList) ProtoMessage() {} - -func (x *XattrList) ProtoReflect() protoreflect.Message { - mi := &file_pkg_meta_pb_backup_proto_msgTypes[11] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use XattrList.ProtoReflect.Descriptor instead. -func (*XattrList) Descriptor() ([]byte, []int) { - return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{11} -} - -func (x *XattrList) GetList() []*Xattr { - if x != nil { - return x.List - } - return nil -} - type Quota struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -678,7 +421,7 @@ type Quota struct { func (x *Quota) Reset() { *x = Quota{} - mi := &file_pkg_meta_pb_backup_proto_msgTypes[12] + mi := &file_pkg_meta_pb_backup_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -690,7 +433,7 @@ func (x *Quota) String() string { func (*Quota) ProtoMessage() {} func (x *Quota) ProtoReflect() protoreflect.Message { - mi := &file_pkg_meta_pb_backup_proto_msgTypes[12] + mi := &file_pkg_meta_pb_backup_proto_msgTypes[7] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -703,7 +446,7 @@ func (x *Quota) ProtoReflect() protoreflect.Message { // Deprecated: Use Quota.ProtoReflect.Descriptor instead. func (*Quota) Descriptor() ([]byte, []int) { - return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{12} + return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{7} } func (x *Quota) GetInode() uint64 { @@ -741,51 +484,6 @@ func (x *Quota) GetUsedInodes() int64 { return 0 } -type QuotaList struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - List []*Quota `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"` -} - -func (x *QuotaList) Reset() { - *x = QuotaList{} - mi := &file_pkg_meta_pb_backup_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *QuotaList) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*QuotaList) ProtoMessage() {} - -func (x *QuotaList) ProtoReflect() protoreflect.Message { - mi := &file_pkg_meta_pb_backup_proto_msgTypes[13] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use QuotaList.ProtoReflect.Descriptor instead. -func (*QuotaList) Descriptor() ([]byte, []int) { - return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{13} -} - -func (x *QuotaList) GetList() []*Quota { - if x != nil { - return x.List - } - return nil -} - type Stat struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -799,7 +497,7 @@ type Stat struct { func (x *Stat) Reset() { *x = Stat{} - mi := &file_pkg_meta_pb_backup_proto_msgTypes[14] + mi := &file_pkg_meta_pb_backup_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -811,7 +509,7 @@ func (x *Stat) String() string { func (*Stat) ProtoMessage() {} func (x *Stat) ProtoReflect() protoreflect.Message { - mi := &file_pkg_meta_pb_backup_proto_msgTypes[14] + mi := &file_pkg_meta_pb_backup_proto_msgTypes[8] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -824,7 +522,7 @@ func (x *Stat) ProtoReflect() protoreflect.Message { // Deprecated: Use Stat.ProtoReflect.Descriptor instead. func (*Stat) Descriptor() ([]byte, []int) { - return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{14} + return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{8} } func (x *Stat) GetInode() uint64 { @@ -855,51 +553,6 @@ func (x *Stat) GetUsedInodes() int64 { return 0 } -type StatList struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - List []*Stat `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"` -} - -func (x *StatList) Reset() { - *x = StatList{} - mi := &file_pkg_meta_pb_backup_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatList) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatList) ProtoMessage() {} - -func (x *StatList) ProtoReflect() protoreflect.Message { - mi := &file_pkg_meta_pb_backup_proto_msgTypes[15] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatList.ProtoReflect.Descriptor instead. -func (*StatList) Descriptor() ([]byte, []int) { - return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{15} -} - -func (x *StatList) GetList() []*Stat { - if x != nil { - return x.List - } - return nil -} - type Node struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -911,7 +564,7 @@ type Node struct { func (x *Node) Reset() { *x = Node{} - mi := &file_pkg_meta_pb_backup_proto_msgTypes[16] + mi := &file_pkg_meta_pb_backup_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -923,7 +576,7 @@ func (x *Node) String() string { func (*Node) ProtoMessage() {} func (x *Node) ProtoReflect() protoreflect.Message { - mi := &file_pkg_meta_pb_backup_proto_msgTypes[16] + mi := &file_pkg_meta_pb_backup_proto_msgTypes[9] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -936,7 +589,7 @@ func (x *Node) ProtoReflect() protoreflect.Message { // Deprecated: Use Node.ProtoReflect.Descriptor instead. func (*Node) Descriptor() ([]byte, []int) { - return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{16} + return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{9} } func (x *Node) GetInode() uint64 { @@ -953,51 +606,6 @@ func (x *Node) GetData() []byte { return nil } -type NodeList struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - List []*Node `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"` -} - -func (x *NodeList) Reset() { - *x = NodeList{} - mi := &file_pkg_meta_pb_backup_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *NodeList) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NodeList) ProtoMessage() {} - -func (x *NodeList) ProtoReflect() protoreflect.Message { - mi := &file_pkg_meta_pb_backup_proto_msgTypes[17] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NodeList.ProtoReflect.Descriptor instead. -func (*NodeList) Descriptor() ([]byte, []int) { - return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{17} -} - -func (x *NodeList) GetList() []*Node { - if x != nil { - return x.List - } - return nil -} - type Edge struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1011,7 +619,7 @@ type Edge struct { func (x *Edge) Reset() { *x = Edge{} - mi := &file_pkg_meta_pb_backup_proto_msgTypes[18] + mi := &file_pkg_meta_pb_backup_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1023,7 +631,7 @@ func (x *Edge) String() string { func (*Edge) ProtoMessage() {} func (x *Edge) ProtoReflect() protoreflect.Message { - mi := &file_pkg_meta_pb_backup_proto_msgTypes[18] + mi := &file_pkg_meta_pb_backup_proto_msgTypes[10] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1036,7 +644,7 @@ func (x *Edge) ProtoReflect() protoreflect.Message { // Deprecated: Use Edge.ProtoReflect.Descriptor instead. func (*Edge) Descriptor() ([]byte, []int) { - return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{18} + return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{10} } func (x *Edge) GetParent() uint64 { @@ -1067,51 +675,6 @@ func (x *Edge) GetType() uint32 { return 0 } -type EdgeList struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - List []*Edge `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"` -} - -func (x *EdgeList) Reset() { - *x = EdgeList{} - mi := &file_pkg_meta_pb_backup_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *EdgeList) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EdgeList) ProtoMessage() {} - -func (x *EdgeList) ProtoReflect() protoreflect.Message { - mi := &file_pkg_meta_pb_backup_proto_msgTypes[19] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EdgeList.ProtoReflect.Descriptor instead. -func (*EdgeList) Descriptor() ([]byte, []int) { - return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{19} -} - -func (x *EdgeList) GetList() []*Edge { - if x != nil { - return x.List - } - return nil -} - // for redis and tikv only type Parent struct { state protoimpl.MessageState @@ -1125,7 +688,7 @@ type Parent struct { func (x *Parent) Reset() { *x = Parent{} - mi := &file_pkg_meta_pb_backup_proto_msgTypes[20] + mi := &file_pkg_meta_pb_backup_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1137,7 +700,7 @@ func (x *Parent) String() string { func (*Parent) ProtoMessage() {} func (x *Parent) ProtoReflect() protoreflect.Message { - mi := &file_pkg_meta_pb_backup_proto_msgTypes[20] + mi := &file_pkg_meta_pb_backup_proto_msgTypes[11] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1150,7 +713,7 @@ func (x *Parent) ProtoReflect() protoreflect.Message { // Deprecated: Use Parent.ProtoReflect.Descriptor instead. func (*Parent) Descriptor() ([]byte, []int) { - return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{20} + return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{11} } func (x *Parent) GetInode() uint64 { @@ -1174,51 +737,6 @@ func (x *Parent) GetCnt() int64 { return 0 } -type ParentList struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - List []*Parent `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"` -} - -func (x *ParentList) Reset() { - *x = ParentList{} - mi := &file_pkg_meta_pb_backup_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ParentList) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ParentList) ProtoMessage() {} - -func (x *ParentList) ProtoReflect() protoreflect.Message { - mi := &file_pkg_meta_pb_backup_proto_msgTypes[21] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ParentList.ProtoReflect.Descriptor instead. -func (*ParentList) Descriptor() ([]byte, []int) { - return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{21} -} - -func (x *ParentList) GetList() []*Parent { - if x != nil { - return x.List - } - return nil -} - type Chunk struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1231,7 +749,7 @@ type Chunk struct { func (x *Chunk) Reset() { *x = Chunk{} - mi := &file_pkg_meta_pb_backup_proto_msgTypes[22] + mi := &file_pkg_meta_pb_backup_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1243,7 +761,7 @@ func (x *Chunk) String() string { func (*Chunk) ProtoMessage() {} func (x *Chunk) ProtoReflect() protoreflect.Message { - mi := &file_pkg_meta_pb_backup_proto_msgTypes[22] + mi := &file_pkg_meta_pb_backup_proto_msgTypes[12] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1256,7 +774,7 @@ func (x *Chunk) ProtoReflect() protoreflect.Message { // Deprecated: Use Chunk.ProtoReflect.Descriptor instead. func (*Chunk) Descriptor() ([]byte, []int) { - return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{22} + return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{12} } func (x *Chunk) GetInode() uint64 { @@ -1280,29 +798,30 @@ func (x *Chunk) GetSlices() []byte { return nil } -type ChunkList struct { +type Symlink struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - List []*Chunk `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"` + Inode uint64 `protobuf:"varint,1,opt,name=inode,proto3" json:"inode,omitempty"` + Target []byte `protobuf:"bytes,2,opt,name=target,proto3" json:"target,omitempty"` } -func (x *ChunkList) Reset() { - *x = ChunkList{} - mi := &file_pkg_meta_pb_backup_proto_msgTypes[23] +func (x *Symlink) Reset() { + *x = Symlink{} + mi := &file_pkg_meta_pb_backup_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } -func (x *ChunkList) String() string { +func (x *Symlink) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ChunkList) ProtoMessage() {} +func (*Symlink) ProtoMessage() {} -func (x *ChunkList) ProtoReflect() protoreflect.Message { - mi := &file_pkg_meta_pb_backup_proto_msgTypes[23] +func (x *Symlink) ProtoReflect() protoreflect.Message { + mi := &file_pkg_meta_pb_backup_proto_msgTypes[13] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1313,42 +832,60 @@ func (x *ChunkList) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ChunkList.ProtoReflect.Descriptor instead. -func (*ChunkList) Descriptor() ([]byte, []int) { - return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{23} +// Deprecated: Use Symlink.ProtoReflect.Descriptor instead. +func (*Symlink) Descriptor() ([]byte, []int) { + return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{13} +} + +func (x *Symlink) GetInode() uint64 { + if x != nil { + return x.Inode + } + return 0 } -func (x *ChunkList) GetList() []*Chunk { +func (x *Symlink) GetTarget() []byte { if x != nil { - return x.List + return x.Target } return nil } -type Symlink struct { +type Batch struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Inode uint64 `protobuf:"varint,1,opt,name=inode,proto3" json:"inode,omitempty"` - Target []byte `protobuf:"bytes,2,opt,name=target,proto3" json:"target,omitempty"` -} - -func (x *Symlink) Reset() { - *x = Symlink{} - mi := &file_pkg_meta_pb_backup_proto_msgTypes[24] + Nodes []*Node `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"` + Edges []*Edge `protobuf:"bytes,2,rep,name=edges,proto3" json:"edges,omitempty"` + Chunks []*Chunk `protobuf:"bytes,3,rep,name=chunks,proto3" json:"chunks,omitempty"` + SliceRefs []*SliceRef `protobuf:"bytes,4,rep,name=sliceRefs,proto3" json:"sliceRefs,omitempty"` + Xattrs []*Xattr `protobuf:"bytes,5,rep,name=xattrs,proto3" json:"xattrs,omitempty"` + Parents []*Parent `protobuf:"bytes,6,rep,name=parents,proto3" json:"parents,omitempty"` + Symlinks []*Symlink `protobuf:"bytes,7,rep,name=symlinks,proto3" json:"symlinks,omitempty"` + Sustained []*Sustained `protobuf:"bytes,8,rep,name=sustained,proto3" json:"sustained,omitempty"` + Delfiles []*DelFile `protobuf:"bytes,9,rep,name=delfiles,proto3" json:"delfiles,omitempty"` + Dirstats []*Stat `protobuf:"bytes,10,rep,name=dirstats,proto3" json:"dirstats,omitempty"` + Quotas []*Quota `protobuf:"bytes,11,rep,name=quotas,proto3" json:"quotas,omitempty"` + Acls []*Acl `protobuf:"bytes,12,rep,name=acls,proto3" json:"acls,omitempty"` + Counters []*Counter `protobuf:"bytes,13,rep,name=counters,proto3" json:"counters,omitempty"` +} + +func (x *Batch) Reset() { + *x = Batch{} + mi := &file_pkg_meta_pb_backup_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } -func (x *Symlink) String() string { +func (x *Batch) String() string { return protoimpl.X.MessageStringOf(x) } -func (*Symlink) ProtoMessage() {} +func (*Batch) ProtoMessage() {} -func (x *Symlink) ProtoReflect() protoreflect.Message { - mi := &file_pkg_meta_pb_backup_proto_msgTypes[24] +func (x *Batch) ProtoReflect() protoreflect.Message { + mi := &file_pkg_meta_pb_backup_proto_msgTypes[14] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1359,66 +896,98 @@ func (x *Symlink) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Symlink.ProtoReflect.Descriptor instead. -func (*Symlink) Descriptor() ([]byte, []int) { - return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{24} +// Deprecated: Use Batch.ProtoReflect.Descriptor instead. +func (*Batch) Descriptor() ([]byte, []int) { + return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{14} } -func (x *Symlink) GetInode() uint64 { +func (x *Batch) GetNodes() []*Node { if x != nil { - return x.Inode + return x.Nodes } - return 0 + return nil } -func (x *Symlink) GetTarget() []byte { +func (x *Batch) GetEdges() []*Edge { if x != nil { - return x.Target + return x.Edges } return nil } -type SymlinkList struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *Batch) GetChunks() []*Chunk { + if x != nil { + return x.Chunks + } + return nil +} - List []*Symlink `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"` +func (x *Batch) GetSliceRefs() []*SliceRef { + if x != nil { + return x.SliceRefs + } + return nil } -func (x *SymlinkList) Reset() { - *x = SymlinkList{} - mi := &file_pkg_meta_pb_backup_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *Batch) GetXattrs() []*Xattr { + if x != nil { + return x.Xattrs + } + return nil } -func (x *SymlinkList) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *Batch) GetParents() []*Parent { + if x != nil { + return x.Parents + } + return nil } -func (*SymlinkList) ProtoMessage() {} +func (x *Batch) GetSymlinks() []*Symlink { + if x != nil { + return x.Symlinks + } + return nil +} -func (x *SymlinkList) ProtoReflect() protoreflect.Message { - mi := &file_pkg_meta_pb_backup_proto_msgTypes[25] +func (x *Batch) GetSustained() []*Sustained { if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms + return x.Sustained } - return mi.MessageOf(x) + return nil +} + +func (x *Batch) GetDelfiles() []*DelFile { + if x != nil { + return x.Delfiles + } + return nil } -// Deprecated: Use SymlinkList.ProtoReflect.Descriptor instead. -func (*SymlinkList) Descriptor() ([]byte, []int) { - return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{25} +func (x *Batch) GetDirstats() []*Stat { + if x != nil { + return x.Dirstats + } + return nil } -func (x *SymlinkList) GetList() []*Symlink { +func (x *Batch) GetQuotas() []*Quota { if x != nil { - return x.List + return x.Quotas + } + return nil +} + +func (x *Batch) GetAcls() []*Acl { + if x != nil { + return x.Acls + } + return nil +} + +func (x *Batch) GetCounters() []*Counter { + if x != nil { + return x.Counters } return nil } @@ -1435,7 +1004,7 @@ type Footer struct { func (x *Footer) Reset() { *x = Footer{} - mi := &file_pkg_meta_pb_backup_proto_msgTypes[26] + mi := &file_pkg_meta_pb_backup_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1447,7 +1016,7 @@ func (x *Footer) String() string { func (*Footer) ProtoMessage() {} func (x *Footer) ProtoReflect() protoreflect.Message { - mi := &file_pkg_meta_pb_backup_proto_msgTypes[26] + mi := &file_pkg_meta_pb_backup_proto_msgTypes[15] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1460,7 +1029,7 @@ func (x *Footer) ProtoReflect() protoreflect.Message { // Deprecated: Use Footer.ProtoReflect.Descriptor instead. func (*Footer) Descriptor() ([]byte, []int) { - return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{26} + return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{15} } func (x *Footer) GetMagic() uint32 { @@ -1495,7 +1064,7 @@ type Footer_SegInfo struct { func (x *Footer_SegInfo) Reset() { *x = Footer_SegInfo{} - mi := &file_pkg_meta_pb_backup_proto_msgTypes[27] + mi := &file_pkg_meta_pb_backup_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1507,7 +1076,7 @@ func (x *Footer_SegInfo) String() string { func (*Footer_SegInfo) ProtoMessage() {} func (x *Footer_SegInfo) ProtoReflect() protoreflect.Message { - mi := &file_pkg_meta_pb_backup_proto_msgTypes[27] + mi := &file_pkg_meta_pb_backup_proto_msgTypes[16] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1520,7 +1089,7 @@ func (x *Footer_SegInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use Footer_SegInfo.ProtoReflect.Descriptor instead. func (*Footer_SegInfo) Descriptor() ([]byte, []int) { - return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{26, 0} + return file_pkg_meta_pb_backup_proto_rawDescGZIP(), []int{15, 0} } func (x *Footer_SegInfo) GetOffset() []uint64 { @@ -1543,128 +1112,115 @@ var file_pkg_meta_pb_backup_proto_rawDesc = []byte{ 0x0a, 0x18, 0x70, 0x6b, 0x67, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x2f, 0x70, 0x62, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x22, 0x1c, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0xc4, 0x01, 0x0a, - 0x08, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x75, 0x73, 0x65, - 0x64, 0x53, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x75, 0x73, - 0x65, 0x64, 0x53, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x75, 0x73, 0x65, 0x64, 0x49, - 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x75, 0x73, 0x65, - 0x64, 0x49, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x65, 0x78, 0x74, 0x49, - 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6e, 0x65, 0x78, 0x74, - 0x49, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x65, 0x78, 0x74, 0x43, 0x68, 0x75, - 0x6e, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6e, 0x65, 0x78, 0x74, 0x43, 0x68, - 0x75, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x6e, 0x65, 0x78, 0x74, 0x53, 0x65, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x6e, 0x65, 0x78, 0x74, 0x53, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x65, 0x78, 0x74, 0x54, 0x72, 0x61, - 0x73, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6e, 0x65, 0x78, 0x74, 0x54, 0x72, - 0x61, 0x73, 0x68, 0x22, 0x35, 0x0a, 0x09, 0x53, 0x75, 0x73, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x64, - 0x12, 0x10, 0x0a, 0x03, 0x73, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x73, - 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x04, 0x52, 0x06, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x22, 0x32, 0x0a, 0x0d, 0x53, 0x75, - 0x73, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x04, 0x6c, - 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x70, 0x62, 0x2e, 0x53, - 0x75, 0x73, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x64, 0x52, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x22, 0x4f, - 0x0a, 0x07, 0x44, 0x65, 0x6c, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x6f, - 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x12, - 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x78, 0x70, 0x69, 0x72, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x22, - 0x2e, 0x0a, 0x0b, 0x44, 0x65, 0x6c, 0x46, 0x69, 0x6c, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x1f, - 0x0a, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x70, - 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x22, - 0x42, 0x0a, 0x08, 0x53, 0x6c, 0x69, 0x63, 0x65, 0x52, 0x65, 0x66, 0x12, 0x0e, 0x0a, 0x02, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x73, - 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, - 0x12, 0x0a, 0x04, 0x72, 0x65, 0x66, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x72, - 0x65, 0x66, 0x73, 0x22, 0x30, 0x0a, 0x0c, 0x53, 0x6c, 0x69, 0x63, 0x65, 0x52, 0x65, 0x66, 0x4c, - 0x69, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x6c, 0x69, 0x63, 0x65, 0x52, 0x65, 0x66, 0x52, - 0x04, 0x6c, 0x69, 0x73, 0x74, 0x22, 0x29, 0x0a, 0x03, 0x41, 0x63, 0x6c, 0x12, 0x0e, 0x0a, 0x02, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, - 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, - 0x22, 0x26, 0x0a, 0x07, 0x41, 0x63, 0x6c, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x04, 0x6c, - 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x07, 0x2e, 0x70, 0x62, 0x2e, 0x41, - 0x63, 0x6c, 0x52, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x22, 0x47, 0x0a, 0x05, 0x58, 0x61, 0x74, 0x74, - 0x72, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x05, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x22, 0x2a, 0x0a, 0x09, 0x58, 0x61, 0x74, 0x74, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x1d, - 0x0a, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x70, - 0x62, 0x2e, 0x58, 0x61, 0x74, 0x74, 0x72, 0x52, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x22, 0x95, 0x01, - 0x0a, 0x05, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x6f, 0x64, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x1a, 0x0a, - 0x08, 0x6d, 0x61, 0x78, 0x53, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x08, 0x6d, 0x61, 0x78, 0x53, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6d, 0x61, 0x78, - 0x49, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, - 0x78, 0x49, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x75, 0x73, 0x65, 0x64, 0x53, - 0x70, 0x61, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x75, 0x73, 0x65, 0x64, - 0x53, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x75, 0x73, 0x65, 0x64, 0x49, 0x6e, 0x6f, - 0x64, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x75, 0x73, 0x65, 0x64, 0x49, - 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x22, 0x2a, 0x0a, 0x09, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x4c, 0x69, - 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x52, 0x04, 0x6c, 0x69, 0x73, - 0x74, 0x22, 0x7a, 0x0a, 0x04, 0x53, 0x74, 0x61, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x6f, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x31, 0x0a, 0x07, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0x35, 0x0a, 0x09, 0x53, 0x75, 0x73, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x64, 0x12, 0x10, 0x0a, 0x03, + 0x73, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x73, 0x69, 0x64, 0x12, 0x16, + 0x0a, 0x06, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x04, 0x52, 0x06, + 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x22, 0x4f, 0x0a, 0x07, 0x44, 0x65, 0x6c, 0x46, 0x69, 0x6c, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x05, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, + 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, + 0x16, 0x0a, 0x06, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x06, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x22, 0x42, 0x0a, 0x08, 0x53, 0x6c, 0x69, 0x63, 0x65, + 0x52, 0x65, 0x66, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x65, 0x66, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x72, 0x65, 0x66, 0x73, 0x22, 0x29, 0x0a, 0x03, 0x41, + 0x63, 0x6c, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, + 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x47, 0x0a, 0x05, 0x58, 0x61, 0x74, 0x74, 0x72, 0x12, + 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, + 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0x95, 0x01, 0x0a, 0x05, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x12, - 0x1e, 0x0a, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, - 0x1c, 0x0a, 0x09, 0x75, 0x73, 0x65, 0x64, 0x53, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x09, 0x75, 0x73, 0x65, 0x64, 0x53, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1e, 0x0a, - 0x0a, 0x75, 0x73, 0x65, 0x64, 0x49, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x0a, 0x75, 0x73, 0x65, 0x64, 0x49, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x22, 0x28, 0x0a, - 0x08, 0x53, 0x74, 0x61, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x04, 0x6c, 0x69, 0x73, - 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x61, - 0x74, 0x52, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x22, 0x30, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, + 0x1a, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x53, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x53, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6d, + 0x61, 0x78, 0x49, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, + 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x75, 0x73, 0x65, + 0x64, 0x53, 0x70, 0x61, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x75, 0x73, + 0x65, 0x64, 0x53, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x75, 0x73, 0x65, 0x64, 0x49, + 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x75, 0x73, 0x65, + 0x64, 0x49, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x22, 0x7a, 0x0a, 0x04, 0x53, 0x74, 0x61, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, - 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x28, 0x0a, 0x08, 0x4e, 0x6f, 0x64, - 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x70, 0x62, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6c, - 0x69, 0x73, 0x74, 0x22, 0x5c, 0x0a, 0x04, 0x45, 0x64, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x05, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x74, 0x79, 0x70, - 0x65, 0x22, 0x28, 0x0a, 0x08, 0x45, 0x64, 0x67, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x1c, 0x0a, - 0x04, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x70, 0x62, - 0x2e, 0x45, 0x64, 0x67, 0x65, 0x52, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x22, 0x48, 0x0a, 0x06, 0x50, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x63, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x03, 0x63, 0x6e, 0x74, 0x22, 0x2c, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4c, - 0x69, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x04, 0x6c, - 0x69, 0x73, 0x74, 0x22, 0x4b, 0x0a, 0x05, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x12, 0x14, 0x0a, 0x05, - 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x69, 0x6e, 0x6f, - 0x64, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6c, 0x69, 0x63, - 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x6c, 0x69, 0x63, 0x65, 0x73, - 0x22, 0x2a, 0x0a, 0x09, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x1d, 0x0a, - 0x04, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x70, 0x62, - 0x2e, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x22, 0x37, 0x0a, 0x07, - 0x53, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x6f, 0x64, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, - 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x74, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x22, 0x2e, 0x0a, 0x0b, 0x53, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, - 0x4c, 0x69, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x52, - 0x04, 0x6c, 0x69, 0x73, 0x74, 0x22, 0xe8, 0x01, 0x0a, 0x06, 0x46, 0x6f, 0x6f, 0x74, 0x65, 0x72, - 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x67, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x05, 0x6d, 0x61, 0x67, 0x69, 0x63, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x2b, 0x0a, 0x05, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x46, 0x6f, 0x6f, 0x74, 0x65, 0x72, 0x2e, 0x49, 0x6e, 0x66, 0x6f, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x1a, 0x33, 0x0a, - 0x07, 0x53, 0x65, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, - 0x65, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x04, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, - 0x12, 0x10, 0x0a, 0x03, 0x6e, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x6e, - 0x75, 0x6d, 0x1a, 0x4c, 0x0a, 0x0a, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x46, 0x6f, 0x6f, 0x74, 0x65, 0x72, 0x2e, 0x53, 0x65, - 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x42, 0x06, 0x5a, 0x04, 0x2e, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x4c, 0x65, 0x6e, + 0x67, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x4c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1c, 0x0a, 0x09, 0x75, 0x73, 0x65, 0x64, 0x53, 0x70, 0x61, + 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x75, 0x73, 0x65, 0x64, 0x53, 0x70, + 0x61, 0x63, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x75, 0x73, 0x65, 0x64, 0x49, 0x6e, 0x6f, 0x64, 0x65, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x75, 0x73, 0x65, 0x64, 0x49, 0x6e, 0x6f, + 0x64, 0x65, 0x73, 0x22, 0x30, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x69, + 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x69, 0x6e, 0x6f, 0x64, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x5c, 0x0a, 0x04, 0x45, 0x64, 0x67, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x22, 0x48, 0x0a, 0x06, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x0a, + 0x05, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x69, 0x6e, + 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x63, + 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x63, 0x6e, 0x74, 0x22, 0x4b, 0x0a, + 0x05, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x69, 0x6e, 0x64, + 0x65, 0x78, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6c, 0x69, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x06, 0x73, 0x6c, 0x69, 0x63, 0x65, 0x73, 0x22, 0x37, 0x0a, 0x07, 0x53, 0x79, + 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x74, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x22, 0xed, 0x03, 0x0a, 0x05, 0x42, 0x61, 0x74, 0x63, 0x68, 0x12, 0x1e, 0x0a, + 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x70, + 0x62, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x1e, 0x0a, + 0x05, 0x65, 0x64, 0x67, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x70, + 0x62, 0x2e, 0x45, 0x64, 0x67, 0x65, 0x52, 0x05, 0x65, 0x64, 0x67, 0x65, 0x73, 0x12, 0x21, 0x0a, + 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x09, 0x2e, + 0x70, 0x62, 0x2e, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, + 0x12, 0x2a, 0x0a, 0x09, 0x73, 0x6c, 0x69, 0x63, 0x65, 0x52, 0x65, 0x66, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x6c, 0x69, 0x63, 0x65, 0x52, 0x65, + 0x66, 0x52, 0x09, 0x73, 0x6c, 0x69, 0x63, 0x65, 0x52, 0x65, 0x66, 0x73, 0x12, 0x21, 0x0a, 0x06, + 0x78, 0x61, 0x74, 0x74, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x70, + 0x62, 0x2e, 0x58, 0x61, 0x74, 0x74, 0x72, 0x52, 0x06, 0x78, 0x61, 0x74, 0x74, 0x72, 0x73, 0x12, + 0x24, 0x0a, 0x07, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x07, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x27, 0x0a, 0x08, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, + 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x79, 0x6d, + 0x6c, 0x69, 0x6e, 0x6b, 0x52, 0x08, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x12, 0x2b, + 0x0a, 0x09, 0x73, 0x75, 0x73, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x64, 0x18, 0x08, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x0d, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x73, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x64, + 0x52, 0x09, 0x73, 0x75, 0x73, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x64, 0x12, 0x27, 0x0a, 0x08, 0x64, + 0x65, 0x6c, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0b, 0x2e, + 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x08, 0x64, 0x65, 0x6c, 0x66, + 0x69, 0x6c, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x08, 0x64, 0x69, 0x72, 0x73, 0x74, 0x61, 0x74, 0x73, + 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x61, 0x74, + 0x52, 0x08, 0x64, 0x69, 0x72, 0x73, 0x74, 0x61, 0x74, 0x73, 0x12, 0x21, 0x0a, 0x06, 0x71, 0x75, + 0x6f, 0x74, 0x61, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x70, 0x62, 0x2e, + 0x51, 0x75, 0x6f, 0x74, 0x61, 0x52, 0x06, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x73, 0x12, 0x1b, 0x0a, + 0x04, 0x61, 0x63, 0x6c, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x07, 0x2e, 0x70, 0x62, + 0x2e, 0x41, 0x63, 0x6c, 0x52, 0x04, 0x61, 0x63, 0x6c, 0x73, 0x12, 0x27, 0x0a, 0x08, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x70, + 0x62, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x08, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x65, 0x72, 0x73, 0x22, 0xd6, 0x01, 0x0a, 0x06, 0x46, 0x6f, 0x6f, 0x74, 0x65, 0x72, 0x12, 0x14, + 0x0a, 0x05, 0x6d, 0x61, 0x67, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6d, + 0x61, 0x67, 0x69, 0x63, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2b, + 0x0a, 0x05, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x70, 0x62, 0x2e, 0x46, 0x6f, 0x6f, 0x74, 0x65, 0x72, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x1a, 0x21, 0x0a, 0x07, 0x53, + 0x65, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x04, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x1a, 0x4c, + 0x0a, 0x0a, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x28, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, + 0x70, 0x62, 0x2e, 0x46, 0x6f, 0x6f, 0x74, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x67, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x06, 0x5a, 0x04, + 0x2e, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1679,58 +1235,48 @@ func file_pkg_meta_pb_backup_proto_rawDescGZIP() []byte { return file_pkg_meta_pb_backup_proto_rawDescData } -var file_pkg_meta_pb_backup_proto_msgTypes = make([]protoimpl.MessageInfo, 29) +var file_pkg_meta_pb_backup_proto_msgTypes = make([]protoimpl.MessageInfo, 18) var file_pkg_meta_pb_backup_proto_goTypes = []any{ (*Format)(nil), // 0: pb.Format - (*Counters)(nil), // 1: pb.Counters + (*Counter)(nil), // 1: pb.Counter (*Sustained)(nil), // 2: pb.Sustained - (*SustainedList)(nil), // 3: pb.SustainedList - (*DelFile)(nil), // 4: pb.DelFile - (*DelFileList)(nil), // 5: pb.DelFileList - (*SliceRef)(nil), // 6: pb.SliceRef - (*SliceRefList)(nil), // 7: pb.SliceRefList - (*Acl)(nil), // 8: pb.Acl - (*AclList)(nil), // 9: pb.AclList - (*Xattr)(nil), // 10: pb.Xattr - (*XattrList)(nil), // 11: pb.XattrList - (*Quota)(nil), // 12: pb.Quota - (*QuotaList)(nil), // 13: pb.QuotaList - (*Stat)(nil), // 14: pb.Stat - (*StatList)(nil), // 15: pb.StatList - (*Node)(nil), // 16: pb.Node - (*NodeList)(nil), // 17: pb.NodeList - (*Edge)(nil), // 18: pb.Edge - (*EdgeList)(nil), // 19: pb.EdgeList - (*Parent)(nil), // 20: pb.Parent - (*ParentList)(nil), // 21: pb.ParentList - (*Chunk)(nil), // 22: pb.Chunk - (*ChunkList)(nil), // 23: pb.ChunkList - (*Symlink)(nil), // 24: pb.Symlink - (*SymlinkList)(nil), // 25: pb.SymlinkList - (*Footer)(nil), // 26: pb.Footer - (*Footer_SegInfo)(nil), // 27: pb.Footer.SegInfo - nil, // 28: pb.Footer.InfosEntry + (*DelFile)(nil), // 3: pb.DelFile + (*SliceRef)(nil), // 4: pb.SliceRef + (*Acl)(nil), // 5: pb.Acl + (*Xattr)(nil), // 6: pb.Xattr + (*Quota)(nil), // 7: pb.Quota + (*Stat)(nil), // 8: pb.Stat + (*Node)(nil), // 9: pb.Node + (*Edge)(nil), // 10: pb.Edge + (*Parent)(nil), // 11: pb.Parent + (*Chunk)(nil), // 12: pb.Chunk + (*Symlink)(nil), // 13: pb.Symlink + (*Batch)(nil), // 14: pb.Batch + (*Footer)(nil), // 15: pb.Footer + (*Footer_SegInfo)(nil), // 16: pb.Footer.SegInfo + nil, // 17: pb.Footer.InfosEntry } var file_pkg_meta_pb_backup_proto_depIdxs = []int32{ - 2, // 0: pb.SustainedList.list:type_name -> pb.Sustained - 4, // 1: pb.DelFileList.list:type_name -> pb.DelFile - 6, // 2: pb.SliceRefList.list:type_name -> pb.SliceRef - 8, // 3: pb.AclList.list:type_name -> pb.Acl - 10, // 4: pb.XattrList.list:type_name -> pb.Xattr - 12, // 5: pb.QuotaList.list:type_name -> pb.Quota - 14, // 6: pb.StatList.list:type_name -> pb.Stat - 16, // 7: pb.NodeList.list:type_name -> pb.Node - 18, // 8: pb.EdgeList.list:type_name -> pb.Edge - 20, // 9: pb.ParentList.list:type_name -> pb.Parent - 22, // 10: pb.ChunkList.list:type_name -> pb.Chunk - 24, // 11: pb.SymlinkList.list:type_name -> pb.Symlink - 28, // 12: pb.Footer.infos:type_name -> pb.Footer.InfosEntry - 27, // 13: pb.Footer.InfosEntry.value:type_name -> pb.Footer.SegInfo - 14, // [14:14] is the sub-list for method output_type - 14, // [14:14] is the sub-list for method input_type - 14, // [14:14] is the sub-list for extension type_name - 14, // [14:14] is the sub-list for extension extendee - 0, // [0:14] is the sub-list for field type_name + 9, // 0: pb.Batch.nodes:type_name -> pb.Node + 10, // 1: pb.Batch.edges:type_name -> pb.Edge + 12, // 2: pb.Batch.chunks:type_name -> pb.Chunk + 4, // 3: pb.Batch.sliceRefs:type_name -> pb.SliceRef + 6, // 4: pb.Batch.xattrs:type_name -> pb.Xattr + 11, // 5: pb.Batch.parents:type_name -> pb.Parent + 13, // 6: pb.Batch.symlinks:type_name -> pb.Symlink + 2, // 7: pb.Batch.sustained:type_name -> pb.Sustained + 3, // 8: pb.Batch.delfiles:type_name -> pb.DelFile + 8, // 9: pb.Batch.dirstats:type_name -> pb.Stat + 7, // 10: pb.Batch.quotas:type_name -> pb.Quota + 5, // 11: pb.Batch.acls:type_name -> pb.Acl + 1, // 12: pb.Batch.counters:type_name -> pb.Counter + 17, // 13: pb.Footer.infos:type_name -> pb.Footer.InfosEntry + 16, // 14: pb.Footer.InfosEntry.value:type_name -> pb.Footer.SegInfo + 15, // [15:15] is the sub-list for method output_type + 15, // [15:15] is the sub-list for method input_type + 15, // [15:15] is the sub-list for extension type_name + 15, // [15:15] is the sub-list for extension extendee + 0, // [0:15] is the sub-list for field type_name } func init() { file_pkg_meta_pb_backup_proto_init() } @@ -1744,7 +1290,7 @@ func file_pkg_meta_pb_backup_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_pkg_meta_pb_backup_proto_rawDesc, NumEnums: 0, - NumMessages: 29, + NumMessages: 18, NumExtensions: 0, NumServices: 0, }, diff --git a/pkg/meta/pb/backup.proto b/pkg/meta/pb/backup.proto index a9d365164c6f..7c90d4b02f59 100644 --- a/pkg/meta/pb/backup.proto +++ b/pkg/meta/pb/backup.proto @@ -12,13 +12,9 @@ message Format { bytes data = 1; // meta.Format's json format } -message Counters { - int64 usedSpace = 1; - int64 usedInodes = 2; - int64 nextInode = 3; - int64 nextChunk = 4; - int64 nextSession = 5; - int64 nextTrash = 6; +message Counter { + string key = 1; + int64 value = 2; } message Sustained { @@ -26,49 +22,29 @@ message Sustained { repeated uint64 inodes = 2; } -message SustainedList { - repeated Sustained list = 1; -} - message DelFile { uint64 inode = 1; uint64 length = 2; int64 expire = 3; } -message DelFileList { - repeated DelFile list = 1; -} - message SliceRef { uint64 id = 1; uint32 size = 2; int64 refs = 3; } -message SliceRefList { - repeated SliceRef list = 1; -} - message Acl { uint32 id = 1; bytes data = 2; // acl.Rule's binary format } -message AclList { - repeated Acl list = 1; -} - message Xattr { uint64 inode = 1; string name = 2; bytes value = 3; } -message XattrList { - repeated Xattr list = 1; -} - message Quota { uint64 inode = 1; int64 maxSpace = 2; @@ -77,10 +53,6 @@ message Quota { int64 usedInodes = 5; } -message QuotaList { - repeated Quota list = 1; -} - message Stat { uint64 inode = 1; int64 dataLength = 2; @@ -88,19 +60,11 @@ message Stat { int64 usedInodes = 4; } -message StatList { - repeated Stat list = 1; -} - message Node { uint64 inode = 1; bytes data = 2; // meta.Attr's binary format } -message NodeList { - repeated Node list = 1; -} - message Edge { uint64 parent = 1; uint64 inode = 2; @@ -108,10 +72,6 @@ message Edge { uint32 type = 4; } -message EdgeList { - repeated Edge list = 1; -} - // for redis and tikv only message Parent { uint64 inode = 1; @@ -119,27 +79,31 @@ message Parent { int64 cnt = 3; } -message ParentList { - repeated Parent list = 1; -} - message Chunk { uint64 inode = 1; uint32 index = 2; bytes slices = 3; // array of meta.slice } -message ChunkList { - repeated Chunk list = 1; -} - message Symlink { uint64 inode = 1; bytes target = 2; } -message SymlinkList { - repeated Symlink list = 1; +message Batch { + repeated Node nodes = 1; + repeated Edge edges = 2; + repeated Chunk chunks = 3; + repeated SliceRef sliceRefs = 4; + repeated Xattr xattrs = 5; + repeated Parent parents = 6; + repeated Symlink symlinks = 7; + repeated Sustained sustained = 8; + repeated DelFile delfiles = 9; + repeated Stat dirstats = 10; + repeated Quota quotas = 11; + repeated Acl acls = 12; + repeated Counter counters = 13; } message Footer { diff --git a/pkg/meta/redis_bak.go b/pkg/meta/redis_bak.go index 19a8fe739ebd..7578cc5ded6e 100644 --- a/pkg/meta/redis_bak.go +++ b/pkg/meta/redis_bak.go @@ -19,17 +19,14 @@ package meta -func (m *redisMeta) buildDumpedSeg(typ int, opt *DumpOption, txn *eTxn) iDumpedSeg { - return nil -} +import "google.golang.org/protobuf/proto" -func (m *redisMeta) buildLoadedSeg(typ int, opt *LoadOption) iLoadedSeg { +func (m *redisMeta) dump(ctx Context, opt *DumpOption, ch chan<- *dumpedResult) error { return nil } -func (m *redisMeta) execETxn(ctx Context, txn *eTxn, f func(Context, *eTxn) error) error { - txn.opt.notUsed = true - return f(ctx, txn) +func (m *redisMeta) load(ctx Context, typ int, opt *LoadOption, val proto.Message) error { + return nil } func (m *redisMeta) prepareLoad(ctx Context, opt *LoadOption) error { diff --git a/pkg/meta/sql.go b/pkg/meta/sql.go index ed98d1c8743b..b6a6e222fb8d 100644 --- a/pkg/meta/sql.go +++ b/pkg/meta/sql.go @@ -841,8 +841,13 @@ func (m *dbMeta) txn(f func(s *xorm.Session) error, inodes ...Ino) error { func (m *dbMeta) roTxn(ctx context.Context, f func(s *xorm.Session) error) error { start := time.Now() defer func() { m.txDist.Observe(time.Since(start).Seconds()) }() - s := m.db.NewSession() - defer s.Close() + var s *xorm.Session + if v := ctx.Value(txSessionKey{}); v != nil { + s = v.(*xorm.Session) + } else { + s = m.db.NewSession() + defer s.Close() + } var opt sql.TxOptions if !m.noReadOnlyTxn { opt.ReadOnly = true diff --git a/pkg/meta/sql_bak.go b/pkg/meta/sql_bak.go index 0bdde23202a7..90ab441e8702 100644 --- a/pkg/meta/sql_bak.go +++ b/pkg/meta/sql_bak.go @@ -24,11 +24,11 @@ import ( "fmt" "sync" "sync/atomic" - "time" aclAPI "github.com/juicedata/juicefs/pkg/acl" "github.com/juicedata/juicefs/pkg/meta/pb" "github.com/juicedata/juicefs/pkg/utils" + "github.com/pkg/errors" "golang.org/x/sync/errgroup" "google.golang.org/protobuf/proto" "xorm.io/xorm" @@ -38,169 +38,272 @@ var ( sqlDumpBatchSize = 100000 ) -func (m *dbMeta) buildDumpedSeg(typ int, opt *DumpOption, txn *eTxn) iDumpedSeg { - ds := dumpedSeg{typ: typ, meta: m, opt: opt, txn: txn} - switch typ { - case SegTypeFormat: - return &formatDS{ds} - case SegTypeCounter: - return &sqlCounterDS{ds} - case SegTypeSustained: - return &sqlSustainedDS{ds} - case SegTypeDelFile: - return &sqlDelFileDS{ds} - case SegTypeSliceRef: - return &sqlSliceRefDS{dumpedBatchSeg{ds, []*sync.Pool{{New: func() interface{} { return &pb.SliceRef{} }}}}} - case SegTypeAcl: - return &sqlAclDS{ds} - case SegTypeXattr: - return &sqlXattrDS{ds} - case SegTypeQuota: - return &sqlQuotaDS{ds} - case SegTypeStat: - return &sqlStatDS{ds} - case SegTypeNode: - return &sqlNodeDBS{dumpedBatchSeg{ds, []*sync.Pool{{New: func() interface{} { return &pb.Node{} }}}}} - case SegTypeChunk: - return &sqlChunkDBS{dumpedBatchSeg{ds, []*sync.Pool{{New: func() interface{} { return &pb.Chunk{} }}}}} - case SegTypeEdge: - return &sqlEdgeDBS{dumpedBatchSeg{ds, []*sync.Pool{{New: func() interface{} { return &pb.Edge{} }}}}, sync.Mutex{}} - case SegTypeParent: - return &sqlParentDS{ds} - case SegTypeSymlink: - return &sqlSymlinkDBS{dumpedBatchSeg{ds, []*sync.Pool{{New: func() interface{} { return &pb.Symlink{} }}}}} +func (m *dbMeta) dump(ctx Context, opt *DumpOption, ch chan<- *dumpedResult) error { + var dumps = []func(ctx Context, opt *DumpOption, ch chan<- *dumpedResult) error{ + m.dumpFormat, + m.dumpCounters, + m.dumpNodes, + m.dumpChunks, + m.dumpEdges, + m.dumpSymlinks, + m.dumpSustained, + m.dumpDelFiles, + m.dumpSliceRef, + m.dumpACL, + m.dumpXattr, + m.dumpQuota, + m.dumpDirStat, + } + ctx.WithValue(txMaxRetryKey{}, 3) + if opt.Threads == 1 { + // use same session for all dumps + sess := m.db.NewSession() + defer sess.Close() + ctx.WithValue(txSessionKey{}, sess) + } + for _, f := range dumps { + err := f(ctx, opt, ch) + if err != nil { + return err + } } return nil } -var sqlLoadedPoolOnce sync.Once -var sqlLoadedPools = make(map[int][]*sync.Pool) +func sqlQueryBatch(ctx Context, opt *DumpOption, maxId uint64, query func(ctx context.Context, start, end uint64) (int, error)) error { + eg, egCtx := errgroup.WithContext(ctx) + eg.SetLimit(opt.Threads) + + sum := int64(0) + batch := uint64(sqlDumpBatchSize) + for id := uint64(0); id <= maxId; id += batch { + startId := id + eg.Go(func() error { + n, err := query(egCtx, startId, startId+batch) + atomic.AddInt64(&sum, int64(n)) + return err + }) + } + logger.Debugf("dump %d rows", sum) + return eg.Wait() +} -func (m *dbMeta) buildLoadedPools(typ int) []*sync.Pool { - sqlLoadedPoolOnce.Do(func() { - sqlLoadedPools = map[int][]*sync.Pool{ - SegTypeNode: {{New: func() interface{} { return &node{} }}}, - SegTypeChunk: {{New: func() interface{} { return &chunk{} }}}, - SegTypeEdge: {{New: func() interface{} { return &edge{} }}}, - SegTypeSymlink: {{New: func() interface{} { return &symlink{} }}}, +func (m *dbMeta) dumpNodes(ctx Context, opt *DumpOption, ch chan<- *dumpedResult) error { + pool := sync.Pool{New: func() interface{} { return &pb.Node{} }} + release := func(p proto.Message) { + for _, s := range p.(*pb.Batch).Nodes { + pool.Put(s) } + } + + var rows []node + if err := m.roTxn(ctx, func(s *xorm.Session) error { + rows = rows[:0] + return s.Where("inode >= ?", TrashInode).Find(&rows) + }); err != nil { + return err + } + nodes := make([]*pb.Node, 0, len(rows)) + var attr Attr + for _, n := range rows { + pn := pool.Get().(*pb.Node) + pn.Inode = uint64(n.Inode) + m.parseAttr(&n, &attr) + pn.Data = m.marshal(&attr) + nodes = append(nodes, pn) + } + if err := dumpResult(ctx, ch, &dumpedResult{&pb.Batch{Nodes: nodes}, release}); err != nil { + return errors.Wrap(err, "dump trash nodes") + } + + var maxInode uint64 + err := m.roTxn(ctx, func(s *xorm.Session) error { + var row node + ok, err := s.Select("max(inode) as inode").Where("inode < ?", TrashInode).Get(&row) + if ok { + maxInode = uint64(row.Inode) + } + return err + }) + if err != nil { + return errors.Wrap(err, "max inode") + } + + return sqlQueryBatch(ctx, opt, maxInode, func(ctx context.Context, start, end uint64) (int, error) { + var rows []node + if err := m.roTxn(ctx, func(s *xorm.Session) error { + rows = rows[:0] + return s.Where("inode >= ? AND inode < ?", start, end).Find(&rows) + }); err != nil { + return 0, err + } + nodes := make([]*pb.Node, 0, len(rows)) + var attr Attr + for _, n := range rows { + pn := pool.Get().(*pb.Node) + pn.Inode = uint64(n.Inode) + m.parseAttr(&n, &attr) + pn.Data = m.marshal(&attr) + nodes = append(nodes, pn) + } + return len(rows), dumpResult(ctx, ch, &dumpedResult{&pb.Batch{Nodes: nodes}, release}) }) - return sqlLoadedPools[typ] } -func (m *dbMeta) buildLoadedSeg(typ int, opt *LoadOption) iLoadedSeg { - ls := loadedSeg{typ: typ, meta: m} - switch typ { - case SegTypeFormat: - return &sqlFormatLS{ls} - case SegTypeCounter: - return &sqlCounterLS{ls} - case SegTypeSustained: - return &sqlSustainedLS{ls} - case SegTypeDelFile: - return &sqlDelFileLS{ls} - case SegTypeSliceRef: - return &sqlSliceRefLS{ls} - case SegTypeAcl: - return &sqlAclLS{ls} - case SegTypeXattr: - return &sqlXattrLS{ls} - case SegTypeQuota: - return &sqlQuotaLS{ls} - case SegTypeStat: - return &sqlStatLS{ls} - case SegTypeNode: - return &sqlNodeLS{ls, m.buildLoadedPools(typ)} - case SegTypeChunk: - return &sqlChunkLS{ls, m.buildLoadedPools(typ)} - case SegTypeEdge: - return &sqlEdgeLS{ls, m.buildLoadedPools(typ)} - case SegTypeParent: - return &sqlParentLS{ls} - case SegTypeSymlink: - return &sqlSymlinkLS{ls, m.buildLoadedPools(typ)} +func (m *dbMeta) dumpChunks(ctx Context, opt *DumpOption, ch chan<- *dumpedResult) error { + pool := sync.Pool{New: func() interface{} { return &pb.Chunk{} }} + release := func(p proto.Message) { + for _, s := range p.(*pb.Batch).Chunks { + pool.Put(s) + } } - return nil -} -func (m *dbMeta) execETxn(ctx Context, txn *eTxn, f func(Context, *eTxn) error) error { - if txn.opt.coNum > 1 { - // only use same txn when coNum == 1 for sql - txn.opt.notUsed = true - return f(ctx, txn) + var maxId uint64 + err := m.roTxn(ctx, func(s *xorm.Session) error { + var row chunk + ok, err := s.Select("MAX(id) as id").Get(&row) + if ok { + maxId = uint64(row.Id) + } + return err + }) + if err != nil { + return err } - ctx.WithValue(txMaxRetryKey{}, txn.opt.maxRetry) - return m.roTxn(ctx, func(sess *xorm.Session) error { - txn.obj = sess - return f(ctx, txn) + + return sqlQueryBatch(ctx, opt, maxId, func(ctx context.Context, start, end uint64) (int, error) { + var rows []chunk + if err := m.roTxn(ctx, func(s *xorm.Session) error { + rows = rows[:0] + return s.Where("id >= ? AND id < ?", start, end).Find(&rows) + }); err != nil { + return 0, err + } + chunks := make([]*pb.Chunk, 0, len(rows)) + for _, c := range rows { + pc := pool.Get().(*pb.Chunk) + pc.Inode = uint64(c.Inode) + pc.Index = c.Indx + pc.Slices = c.Slices + chunks = append(chunks, pc) + } + return len(rows), dumpResult(ctx, ch, &dumpedResult{&pb.Batch{Chunks: chunks}, release}) }) } -func (m *dbMeta) execStmt(ctx context.Context, txn *eTxn, f func(*xorm.Session) error) error { - if txn.opt.notUsed { - return m.roTxn(ctx, func(s *xorm.Session) error { - return f(s) - }) +func (m *dbMeta) dumpEdges(ctx Context, opt *DumpOption, ch chan<- *dumpedResult) error { + pool := sync.Pool{New: func() interface{} { return &pb.Edge{} }} + release := func(p proto.Message) { + for _, s := range p.(*pb.Batch).Edges { + pool.Put(s) + } } - var err error - cnt := 0 - for cnt < txn.opt.maxStmtRetry { - err = f(txn.obj.(*xorm.Session)) - if err == nil || !m.shouldRetry(err) { - break + var maxId uint64 + err := m.roTxn(ctx, func(s *xorm.Session) error { + var row edge + ok, err := s.Select("MAX(id) as id").Get(&row) + if ok { + maxId = uint64(row.Id) } - cnt++ - time.Sleep(time.Duration(cnt) * time.Microsecond) + return err + }) + if err != nil { + return err } - return err -} -func getSQLCounterFields(c *pb.Counters) map[string]*int64 { - return map[string]*int64{ - usedSpace: &c.UsedSpace, - totalInodes: &c.UsedInodes, - "nextInode": &c.NextInode, - "nextChunk": &c.NextChunk, - "nextSession": &c.NextSession, - "nextTrash": &c.NextTrash, + var mu sync.Mutex + dumpParents := make(map[uint64][]uint64) + err = sqlQueryBatch(ctx, opt, maxId, func(ctx context.Context, start, end uint64) (int, error) { + var rows []edge + if err := m.roTxn(ctx, func(s *xorm.Session) error { + rows = rows[:0] + return s.Where("id >= ? AND id < ?", start, end).Find(&rows) + }); err != nil { + return 0, err + } + edges := make([]*pb.Edge, 0, len(rows)) + for _, e := range rows { + pe := pool.Get().(*pb.Edge) + pe.Parent = uint64(e.Parent) + pe.Inode = uint64(e.Inode) + pe.Name = e.Name + pe.Type = uint32(e.Type) + edges = append(edges, pe) + mu.Lock() + dumpParents[uint64(e.Inode)] = append(dumpParents[uint64(e.Inode)], uint64(e.Parent)) + mu.Unlock() + } + return len(rows), dumpResult(ctx, ch, &dumpedResult{&pb.Batch{Edges: edges}, release}) + }) + if err != nil { + return err } -} -type sqlCounterDS struct { - dumpedSeg + parents := make([]*pb.Parent, 0, sqlDumpBatchSize) + st := make(map[uint64]int64) + for inode, ps := range dumpParents { + if len(ps) > 1 { + for k := range st { + delete(st, k) + } + for _, p := range ps { + st[p] = st[p] + 1 + } + for parent, cnt := range st { + parents = append(parents, &pb.Parent{Inode: inode, Parent: parent, Cnt: cnt}) + } + } + if len(parents) >= sqlDumpBatchSize { + if err := dumpResult(ctx, ch, &dumpedResult{msg: &pb.Batch{Parents: parents}}); err != nil { + return err + } + parents = parents[:0] + } + } + return dumpResult(ctx, ch, &dumpedResult{msg: &pb.Batch{Parents: parents}}) } -func (s *sqlCounterDS) dump(ctx Context, ch chan *dumpedResult) error { - meta := s.meta.(*dbMeta) - var rows []counter - if err := meta.execStmt(ctx, s.txn, func(s *xorm.Session) error { +func (m *dbMeta) dumpSymlinks(ctx Context, opt *DumpOption, ch chan<- *dumpedResult) error { + var rows []symlink + if err := m.roTxn(ctx, func(s *xorm.Session) error { rows = rows[:0] return s.Find(&rows) }); err != nil { return err } - counters := &pb.Counters{} - fieldMap := getSQLCounterFields(counters) - for _, row := range rows { - if fieldPtr, ok := fieldMap[row.Name]; ok { - *fieldPtr = row.Value + + symlinks := make([]*pb.Symlink, 0, min(len(rows), sqlDumpBatchSize)) + for _, r := range rows { + symlinks = append(symlinks, &pb.Symlink{Inode: uint64(r.Inode), Target: r.Target}) + if len(symlinks) >= sqlDumpBatchSize { + if err := dumpResult(ctx, ch, &dumpedResult{msg: &pb.Batch{Symlinks: symlinks}}); err != nil { + return err + } + symlinks = symlinks[:0] } } - if err := dumpResult(ctx, ch, &dumpedResult{s, counters}); err != nil { - return err - } - logger.Debugf("dump %s result %+v", s, counters) - return nil + return dumpResult(ctx, ch, &dumpedResult{msg: &pb.Batch{Symlinks: symlinks}}) } -type sqlSustainedDS struct { - dumpedSeg +func (m *dbMeta) dumpCounters(ctx Context, opt *DumpOption, ch chan<- *dumpedResult) error { + var rows []counter + if err := m.roTxn(ctx, func(s *xorm.Session) error { + rows = rows[:0] + return s.Find(&rows) + }); err != nil { + return err + } + var counters = make([]*pb.Counter, 0, len(rows)) + for _, row := range rows { + counters = append(counters, &pb.Counter{Key: row.Name, Value: row.Value}) + } + return dumpResult(ctx, ch, &dumpedResult{msg: &pb.Batch{Counters: counters}}) } -func (s *sqlSustainedDS) dump(ctx Context, ch chan *dumpedResult) error { +func (m *dbMeta) dumpSustained(ctx Context, opt *DumpOption, ch chan<- *dumpedResult) error { var rows []sustained - if err := s.meta.(*dbMeta).execStmt(ctx, s.txn, func(s *xorm.Session) error { + if err := m.roTxn(ctx, func(s *xorm.Session) error { rows = rows[:0] return s.Find(&rows) }); err != nil { @@ -210,173 +313,109 @@ func (s *sqlSustainedDS) dump(ctx Context, ch chan *dumpedResult) error { for _, row := range rows { ss[row.Sid] = append(ss[row.Sid], uint64(row.Inode)) } - - pss := &pb.SustainedList{ - List: make([]*pb.Sustained, 0, len(ss)), - } + sustained := make([]*pb.Sustained, 0, len(rows)) for k, v := range ss { - pss.List = append(pss.List, &pb.Sustained{Sid: k, Inodes: v}) - } - - if err := dumpResult(ctx, ch, &dumpedResult{s, pss}); err != nil { - return err + sustained = append(sustained, &pb.Sustained{Sid: k, Inodes: v}) } - logger.Debugf("dump %s num %d", s, len(ss)) - return nil + return dumpResult(ctx, ch, &dumpedResult{msg: &pb.Batch{Sustained: sustained}}) } -type sqlDelFileDS struct { - dumpedSeg -} - -func (s *sqlDelFileDS) dump(ctx Context, ch chan *dumpedResult) error { +func (m *dbMeta) dumpDelFiles(ctx Context, opt *DumpOption, ch chan<- *dumpedResult) error { var rows []delfile - if err := s.meta.(*dbMeta).execStmt(ctx, s.txn, func(s *xorm.Session) error { + if err := m.roTxn(ctx, func(s *xorm.Session) error { rows = rows[:0] return s.Find(&rows) }); err != nil { return err } - delFiles := &pb.DelFileList{List: make([]*pb.DelFile, 0, len(rows))} + delFiles := make([]*pb.DelFile, 0, min(sqlDumpBatchSize, len(rows))) for _, row := range rows { - delFiles.List = append(delFiles.List, &pb.DelFile{Inode: uint64(row.Inode), Length: row.Length, Expire: row.Expire}) - } - if err := dumpResult(ctx, ch, &dumpedResult{s, delFiles}); err != nil { - return err - } - logger.Debugf("dump %s num %d", s, len(delFiles.List)) - return nil -} - -type sqlSliceRefDS struct { - dumpedBatchSeg -} - -func (s *sqlSliceRefDS) dump(ctx Context, ch chan *dumpedResult) error { - eg, _ := errgroup.WithContext(ctx) - eg.SetLimit(s.opt.CoNum) - - taskFinished := false - psrs := &pb.SliceRefList{List: make([]*pb.SliceRef, 0, 1024)} - for start := 0; !taskFinished; start += sqlDumpBatchSize { - nStart := start - eg.Go(func() error { - var rows []sliceRef - if err := s.meta.(*dbMeta).execStmt(ctx, s.txn, func(s *xorm.Session) error { - rows = rows[:0] - return s.Where("refs != 1").Limit(sqlDumpBatchSize, nStart).Find(&rows) // skip default refs - }); err != nil || len(rows) == 0 { - taskFinished = true + delFiles = append(delFiles, &pb.DelFile{Inode: uint64(row.Inode), Length: row.Length, Expire: row.Expire}) + if len(delFiles) >= sqlDumpBatchSize { + if err := dumpResult(ctx, ch, &dumpedResult{msg: &pb.Batch{Delfiles: delFiles}}); err != nil { return err } - var psr *pb.SliceRef - for _, sr := range rows { - psr = s.pools[0].Get().(*pb.SliceRef) - psr.Id = sr.Id - psr.Size = sr.Size - psr.Refs = int64(sr.Refs) - psrs.List = append(psrs.List, psr) - } - return nil - }) - } - if err := eg.Wait(); err != nil { - logger.Errorf("query %s err: %v", s, err) - return err - } - if err := dumpResult(ctx, ch, &dumpedResult{s, psrs}); err != nil { - return err + delFiles = delFiles[:0] + } } - logger.Debugf("dump %s num %d", s, len(psrs.List)) - return nil + return dumpResult(ctx, ch, &dumpedResult{msg: &pb.Batch{Delfiles: delFiles}}) } -func (s *sqlSliceRefDS) release(msg proto.Message) { - psrs := msg.(*pb.SliceRefList) - for _, psr := range psrs.List { - s.pools[0].Put(psr) +func (m *dbMeta) dumpSliceRef(ctx Context, opt *DumpOption, ch chan<- *dumpedResult) error { + var rows []sliceRef + if err := m.roTxn(ctx, func(s *xorm.Session) error { + rows = rows[:0] + return s.Where("refs != 1").Find(&rows) // skip default refs + }); err != nil { + return err } - psrs.List = nil -} - -type sqlAclDS struct { - dumpedSeg + sliceRefs := make([]*pb.SliceRef, 0, min(sqlDumpBatchSize, len(rows))) + for _, sr := range rows { + sliceRefs = append(sliceRefs, &pb.SliceRef{Id: sr.Id, Size: sr.Size, Refs: int64(sr.Refs)}) + if len(sliceRefs) >= sqlDumpBatchSize { + if err := dumpResult(ctx, ch, &dumpedResult{msg: &pb.Batch{SliceRefs: sliceRefs}}); err != nil { + return err + } + sliceRefs = sliceRefs[:0] + } + } + return dumpResult(ctx, ch, &dumpedResult{msg: &pb.Batch{SliceRefs: sliceRefs}}) } -func (s *sqlAclDS) dump(ctx Context, ch chan *dumpedResult) error { +func (m *dbMeta) dumpACL(ctx Context, opt *DumpOption, ch chan<- *dumpedResult) error { var rows []acl - if err := s.meta.(*dbMeta).execStmt(ctx, s.txn, func(s *xorm.Session) error { + if err := m.roTxn(ctx, func(s *xorm.Session) error { rows = rows[:0] return s.Find(&rows) }); err != nil { return err } - acls := &pb.AclList{List: make([]*pb.Acl, 0, len(rows))} + acls := make([]*pb.Acl, 0, len(rows)) for _, row := range rows { - acls.List = append(acls.List, &pb.Acl{ + acls = append(acls, &pb.Acl{ Id: row.Id, Data: row.toRule().Encode(), }) } - if err := dumpResult(ctx, ch, &dumpedResult{s, acls}); err != nil { - return err - } - logger.Debugf("dump %s num %d", s, len(acls.List)) - return nil -} - -type sqlXattrDS struct { - dumpedSeg + return dumpResult(ctx, ch, &dumpedResult{msg: &pb.Batch{Acls: acls}}) } -func (s *sqlXattrDS) dump(ctx Context, ch chan *dumpedResult) error { +func (m *dbMeta) dumpXattr(ctx Context, opt *DumpOption, ch chan<- *dumpedResult) error { var rows []xattr - if err := s.meta.(*dbMeta).execStmt(ctx, s.txn, func(s *xorm.Session) error { + if err := m.roTxn(ctx, func(s *xorm.Session) error { rows = rows[:0] return s.Find(&rows) }); err != nil { return err } - - if len(rows) == 0 { - return nil - } - - pxs := &pb.XattrList{ - List: make([]*pb.Xattr, 0, len(rows)), - } + xattrs := make([]*pb.Xattr, 0, min(sqlDumpBatchSize, len(rows))) for _, x := range rows { - pxs.List = append(pxs.List, &pb.Xattr{ + xattrs = append(xattrs, &pb.Xattr{ Inode: uint64(x.Inode), Name: x.Name, Value: x.Value, }) + if len(xattrs) >= sqlDumpBatchSize { + if err := dumpResult(ctx, ch, &dumpedResult{msg: &pb.Batch{Xattrs: xattrs}}); err != nil { + return err + } + xattrs = xattrs[:0] + } } - - logger.Debugf("dump %s num %d", s, len(pxs.List)) - return dumpResult(ctx, ch, &dumpedResult{s, pxs}) + return dumpResult(ctx, ch, &dumpedResult{msg: &pb.Batch{Xattrs: xattrs}}) } -type sqlQuotaDS struct { - dumpedSeg -} - -func (s *sqlQuotaDS) dump(ctx Context, ch chan *dumpedResult) error { +func (m *dbMeta) dumpQuota(ctx Context, opt *DumpOption, ch chan<- *dumpedResult) error { var rows []dirQuota - if err := s.meta.(*dbMeta).execStmt(ctx, s.txn, func(s *xorm.Session) error { + if err := m.roTxn(ctx, func(s *xorm.Session) error { rows = rows[:0] return s.Find(&rows) }); err != nil { return err } - if len(rows) == 0 { - return nil - } - pqs := &pb.QuotaList{ - List: make([]*pb.Quota, 0, len(rows)), - } + quotas := make([]*pb.Quota, 0, len(rows)) for _, q := range rows { - pqs.List = append(pqs.List, &pb.Quota{ + quotas = append(quotas, &pb.Quota{ Inode: uint64(q.Inode), MaxSpace: q.MaxSpace, MaxInodes: q.MaxInodes, @@ -384,404 +423,197 @@ func (s *sqlQuotaDS) dump(ctx Context, ch chan *dumpedResult) error { UsedInodes: q.UsedInodes, }) } - logger.Debugf("dump %s num %d", s, len(pqs.List)) - return dumpResult(ctx, ch, &dumpedResult{s, pqs}) + return dumpResult(ctx, ch, &dumpedResult{msg: &pb.Batch{Quotas: quotas}}) } -type sqlStatDS struct { - dumpedSeg -} - -func (s *sqlStatDS) dump(ctx Context, ch chan *dumpedResult) error { +func (m *dbMeta) dumpDirStat(ctx Context, opt *DumpOption, ch chan<- *dumpedResult) error { var rows []dirStats - if err := s.meta.(*dbMeta).execStmt(ctx, s.txn, func(s *xorm.Session) error { + if err := m.roTxn(ctx, func(s *xorm.Session) error { rows = rows[:0] return s.Find(&rows) }); err != nil { return err } - if len(rows) == 0 { - return nil - } - pss := &pb.StatList{ - List: make([]*pb.Stat, 0, len(rows)), - } + dirStats := make([]*pb.Stat, 0, min(sqlDumpBatchSize, len(rows))) for _, st := range rows { - pss.List = append(pss.List, &pb.Stat{ + dirStats = append(dirStats, &pb.Stat{ Inode: uint64(st.Inode), DataLength: st.DataLength, UsedInodes: st.UsedInodes, UsedSpace: st.UsedSpace, }) - } - logger.Debugf("dump %s num %d", s, len(pss.List)) - return dumpResult(ctx, ch, &dumpedResult{s, pss}) -} - -func sqlQueryBatch(ctx Context, s iDumpedSeg, opt *DumpOption, ch chan *dumpedResult, query func(ctx context.Context, limit, start int, sum *int64) (proto.Message, error)) error { - eg, egCtx := errgroup.WithContext(ctx) - eg.SetLimit(opt.CoNum) - - taskFinished := false - sum := int64(0) - for start := 0; !taskFinished; start += sqlDumpBatchSize { - nStart := start - eg.Go(func() error { - msg, err := query(egCtx, sqlDumpBatchSize, nStart, &sum) - if err != nil || msg == nil { - taskFinished = true + if len(dirStats) >= sqlDumpBatchSize { + if err := dumpResult(ctx, ch, &dumpedResult{msg: &pb.Batch{Dirstats: dirStats}}); err != nil { return err } - return dumpResult(egCtx, ch, &dumpedResult{s, msg}) - }) - } - if err := eg.Wait(); err != nil { - logger.Errorf("query %s err: %v", s, err) - return err + dirStats = dirStats[:0] + } } - logger.Debugf("dump %s num %d", s, sum) - return nil + return dumpResult(ctx, ch, &dumpedResult{msg: &pb.Batch{Dirstats: dirStats}}) } -type sqlNodeDBS struct { - dumpedBatchSeg +func (m *dbMeta) load(ctx Context, typ int, opt *LoadOption, val proto.Message) error { + switch typ { + case segTypeFormat: + return m.loadFormat(ctx, val) + case segTypeCounter: + return m.loadCounters(ctx, val) + case segTypeNode: + return m.loadNodes(ctx, val) + case segTypeChunk: + return m.loadChunks(ctx, val) + case segTypeEdge: + return m.loadEdges(ctx, val) + case segTypeSymlink: + return m.loadSymlinks(ctx, val) + case segTypeSustained: + return m.loadSustained(ctx, val) + case segTypeDelFile: + return m.loadDelFiles(ctx, val) + case segTypeSliceRef: + return m.loadSliceRefs(ctx, val) + case segTypeAcl: + return m.loadAcl(ctx, val) + case segTypeXattr: + return m.loadXattrs(ctx, val) + case segTypeQuota: + return m.loadQuota(ctx, val) + case segTypeStat: + return m.loadDirStats(ctx, val) + case segTypeParent, segTypeMix: + return nil // skip + default: + logger.Warnf("skip segment type %d", typ) + return nil + } } -func (s *sqlNodeDBS) dump(ctx Context, ch chan *dumpedResult) error { - return sqlQueryBatch(ctx, s, s.opt, ch, s.doQuery) +func (m *dbMeta) loadFormat(ctx Context, msg proto.Message) error { + return m.insertRows([]interface{}{ + &setting{ + Name: "format", + Value: string(msg.(*pb.Format).Data), + }, + }) } -func (s *sqlNodeDBS) doQuery(ctx context.Context, limit, start int, sum *int64) (proto.Message, error) { - var rows []node - m := s.meta.(*dbMeta) - if err := m.execStmt(ctx, s.txn, func(s *xorm.Session) error { - rows = rows[:0] - return s.Limit(limit, start).Find(&rows) - }); err != nil { - return nil, err - } - if len(rows) == 0 { - return nil, nil - } - pns := &pb.NodeList{ - List: make([]*pb.Node, 0, len(rows)), - } - var pn *pb.Node - attr := &Attr{} - for _, n := range rows { - pn = s.pools[0].Get().(*pb.Node) - pn.Inode = uint64(n.Inode) - m.parseAttr(&n, attr) - pn.Data = m.marshal(attr) - pns.List = append(pns.List, pn) +func (m *dbMeta) loadCounters(ctx Context, msg proto.Message) error { + var rows []interface{} + for _, c := range msg.(*pb.Batch).Counters { + rows = append(rows, counter{Name: c.Key, Value: c.Value}) } - atomic.AddInt64(sum, int64(len(pns.List))) - return pns, nil + return m.insertRows(rows) } -func (s *sqlNodeDBS) release(msg proto.Message) { - pns := msg.(*pb.NodeList) - for _, node := range pns.List { - s.pools[0].Put(node) +func (m *dbMeta) loadNodes(ctx Context, msg proto.Message) error { + nodes := msg.(*pb.Batch).Nodes + b := m.getBase() + rows := make([]interface{}, 0, len(nodes)) + ns := make([]node, len(nodes)) + attr := &Attr{} + for i, n := range nodes { + pn := &ns[i] + pn.Inode = Ino(n.Inode) + b.parseAttr(n.Data, attr) + m.parseNode(attr, pn) + rows = append(rows, pn) } - pns.List = nil -} - -type sqlChunkDBS struct { - dumpedBatchSeg + return m.insertRows(rows) } -func (s *sqlChunkDBS) dump(ctx Context, ch chan *dumpedResult) error { - return sqlQueryBatch(ctx, s, s.opt, ch, s.doQuery) -} - -func (s *sqlChunkDBS) doQuery(ctx context.Context, limit, start int, sum *int64) (proto.Message, error) { - var rows []chunk - if err := s.meta.(*dbMeta).execStmt(ctx, s.txn, func(s *xorm.Session) error { - rows = rows[:0] - return s.Limit(limit, start).Find(&rows) - }); err != nil { - return nil, err - } - if len(rows) == 0 { - return nil, nil - } - pcs := &pb.ChunkList{ - List: make([]*pb.Chunk, 0, len(rows)), - } - var pc *pb.Chunk - for _, c := range rows { - pc = s.pools[0].Get().(*pb.Chunk) - pc.Inode = uint64(c.Inode) - pc.Index = c.Indx +func (m *dbMeta) loadChunks(ctx Context, msg proto.Message) error { + chunks := msg.(*pb.Batch).Chunks + rows := make([]interface{}, 0, len(chunks)) + cs := make([]chunk, len(chunks)) + for i, c := range chunks { + pc := &cs[i] + pc.Inode = Ino(c.Inode) + pc.Indx = c.Index pc.Slices = c.Slices - pcs.List = append(pcs.List, pc) - } - atomic.AddInt64(sum, int64(len(pcs.List))) - return pcs, nil -} - -func (s *sqlChunkDBS) release(msg proto.Message) { - pcs := msg.(*pb.ChunkList) - for _, pc := range pcs.List { - s.pools[0].Put(pc) + rows = append(rows, pc) } - pcs.List = nil -} - -type sqlEdgeDBS struct { - dumpedBatchSeg - lock sync.Mutex -} - -func (s *sqlEdgeDBS) dump(ctx Context, ch chan *dumpedResult) error { - ctx.WithValue("parents", make(map[uint64][]uint64)) - return sqlQueryBatch(ctx, s, s.opt, ch, s.doQuery) + return m.insertRows(rows) } -func (s *sqlEdgeDBS) doQuery(ctx context.Context, limit, start int, sum *int64) (proto.Message, error) { - // TODO: optimize parents - s.lock.Lock() - parents := ctx.Value("parents").(map[uint64][]uint64) - s.lock.Unlock() - - var rows []edge - if err := s.meta.(*dbMeta).execStmt(ctx, s.txn, func(s *xorm.Session) error { - rows = rows[:0] - return s.Limit(limit, start).Find(&rows) - }); err != nil { - return nil, err - } - if len(rows) == 0 { - return nil, nil - } - pes := &pb.EdgeList{ - List: make([]*pb.Edge, 0, len(rows)), - } - var pe *pb.Edge - for _, e := range rows { - pe = s.pools[0].Get().(*pb.Edge) - pe.Parent = uint64(e.Parent) - pe.Inode = uint64(e.Inode) +func (m *dbMeta) loadEdges(ctx Context, msg proto.Message) error { + edges := msg.(*pb.Batch).Edges + rows := make([]interface{}, 0, len(edges)) + es := make([]edge, len(edges)) + for i, e := range edges { + pe := &es[i] + pe.Parent = Ino(e.Parent) + pe.Inode = Ino(e.Inode) pe.Name = e.Name - pe.Type = uint32(e.Type) - - s.lock.Lock() - parents[uint64(e.Inode)] = append(parents[uint64(e.Inode)], uint64(e.Parent)) - s.lock.Unlock() - pes.List = append(pes.List, pe) - } - atomic.AddInt64(sum, int64(len(pes.List))) - return pes, nil -} - -func (s *sqlEdgeDBS) release(msg proto.Message) { - pes := msg.(*pb.EdgeList) - for _, pe := range pes.List { - s.pools[0].Put(pe) - } - pes.List = nil -} - -type sqlParentDS struct { - dumpedSeg -} - -func (s *sqlParentDS) dump(ctx Context, ch chan *dumpedResult) error { - val := ctx.Value("parents") - if val == nil { - return nil - } - - parents := val.(map[uint64][]uint64) - pls := &pb.ParentList{ - List: make([]*pb.Parent, 0, sqlDumpBatchSize), - } - st := make(map[uint64]int64) - for inode, ps := range parents { - if len(ps) > 1 { - for k := range st { - delete(st, k) - } - for _, p := range ps { - st[p] = st[p] + 1 - } - for parent, cnt := range st { - pls.List = append(pls.List, &pb.Parent{Inode: inode, Parent: parent, Cnt: cnt}) - } - } - if len(pls.List) >= sqlDumpBatchSize { - if err := dumpResult(ctx, ch, &dumpedResult{s, pls}); err != nil { - return err - } - pls = &pb.ParentList{ - List: make([]*pb.Parent, 0, sqlDumpBatchSize), - } - } - } - - if len(pls.List) > 0 { - if err := dumpResult(ctx, ch, &dumpedResult{s, pls}); err != nil { - return err - } - } - return nil -} - -type sqlSymlinkDBS struct { - dumpedBatchSeg -} - -func (s *sqlSymlinkDBS) dump(ctx Context, ch chan *dumpedResult) error { - return sqlQueryBatch(ctx, s, s.opt, ch, s.doQuery) -} - -func (s *sqlSymlinkDBS) doQuery(ctx context.Context, limit, start int, sum *int64) (proto.Message, error) { - var rows []symlink - if err := s.meta.(*dbMeta).execStmt(ctx, s.txn, func(s *xorm.Session) error { - rows = rows[:0] - return s.Limit(limit, start).Find(&rows) - }); err != nil { - return nil, err - } - if len(rows) == 0 { - return nil, nil - } - pss := &pb.SymlinkList{ - List: make([]*pb.Symlink, 0, len(rows)), - } - var ps *pb.Symlink - for _, sl := range rows { - ps = s.pools[0].Get().(*pb.Symlink) - ps.Inode = uint64(sl.Inode) - ps.Target = sl.Target - pss.List = append(pss.List, ps) - } - atomic.AddInt64(sum, int64(len(pss.List))) - return pss, nil -} - -func (s *sqlSymlinkDBS) release(msg proto.Message) { - pss := msg.(*pb.SymlinkList) - for _, ps := range pss.List { - s.pools[0].Put(ps) + pe.Type = uint8(e.Type) + rows = append(rows, pe) } - pss.List = nil -} - -type sqlFormatLS struct { - loadedSeg -} - -func (s *sqlFormatLS) load(ctx Context, msg proto.Message) error { - return s.meta.(*dbMeta).insertRows([]interface{}{ - &setting{ - Name: "format", - Value: string(msg.(*pb.Format).Data), - }, - }) + return m.insertRows(rows) } -type sqlCounterLS struct { - loadedSeg -} - -func (s *sqlCounterLS) load(ctx Context, msg proto.Message) error { - counters := msg.(*pb.Counters) - fields := getSQLCounterFields(counters) - - var rows []interface{} - for name, field := range fields { - rows = append(rows, counter{Name: name, Value: *field}) +func (m *dbMeta) loadSymlinks(ctx Context, msg proto.Message) error { + symlinks := msg.(*pb.Batch).Symlinks + rows := make([]interface{}, 0, len(symlinks)) + for _, sl := range symlinks { + rows = append(rows, &symlink{Ino(sl.Inode), sl.Target}) } - logger.Debugf("insert counters %+v", rows) - return s.meta.(*dbMeta).insertRows(rows) -} - -type sqlSustainedLS struct { - loadedSeg + return m.insertRows(rows) } -func (s *sqlSustainedLS) load(ctx Context, msg proto.Message) error { - sustaineds := msg.(*pb.SustainedList) - rows := make([]interface{}, 0, len(sustaineds.List)) - for _, s := range sustaineds.List { +func (m *dbMeta) loadSustained(ctx Context, msg proto.Message) error { + sustaineds := msg.(*pb.Batch).Sustained + rows := make([]interface{}, 0, len(sustaineds)) + for _, s := range sustaineds { for _, inode := range s.Inodes { rows = append(rows, sustained{Sid: s.Sid, Inode: Ino(inode)}) } } - logger.Debugf("insert %s num %d", s, len(rows)) - return s.meta.(*dbMeta).insertRows(rows) -} - -type sqlDelFileLS struct { - loadedSeg + return m.insertRows(rows) } -func (s *sqlDelFileLS) load(ctx Context, msg proto.Message) error { - delfiles := msg.(*pb.DelFileList) - rows := make([]interface{}, 0, len(delfiles.List)) - for _, f := range delfiles.List { +func (m *dbMeta) loadDelFiles(ctx Context, msg proto.Message) error { + delfiles := msg.(*pb.Batch).Delfiles + rows := make([]interface{}, 0, len(delfiles)) + for _, f := range delfiles { rows = append(rows, &delfile{Inode: Ino(f.Inode), Length: f.Length, Expire: f.Expire}) } - logger.Debugf("insert %s num %d", s, len(rows)) - return s.meta.(*dbMeta).insertRows(rows) -} - -type sqlSliceRefLS struct { - loadedSeg + return m.insertRows(rows) } -func (s *sqlSliceRefLS) load(ctx Context, msg proto.Message) error { - srs := msg.(*pb.SliceRefList) - rows := make([]interface{}, 0, len(srs.List)) - for _, sr := range srs.List { +func (m *dbMeta) loadSliceRefs(ctx Context, msg proto.Message) error { + srs := msg.(*pb.Batch).SliceRefs + rows := make([]interface{}, 0, len(srs)) + for _, sr := range srs { rows = append(rows, &sliceRef{Id: sr.Id, Size: sr.Size, Refs: int(sr.Refs)}) } - logger.Debugf("insert %s num %d", s, len(rows)) - return s.meta.(*dbMeta).insertRows(rows) + return m.insertRows(rows) } -type sqlAclLS struct { - loadedSeg -} - -func (s *sqlAclLS) load(ctx Context, msg proto.Message) error { - acls := msg.(*pb.AclList) - rows := make([]interface{}, 0, len(acls.List)) - for _, pa := range acls.List { +func (m *dbMeta) loadAcl(ctx Context, msg proto.Message) error { + acls := msg.(*pb.Batch).Acls + rows := make([]interface{}, 0, len(acls)) + for _, pa := range acls { rule := &aclAPI.Rule{} rule.Decode(pa.Data) acl := newSQLAcl(rule) acl.Id = pa.Id rows = append(rows, acl) } - logger.Debugf("insert %s num %d", s, len(rows)) - return s.meta.(*dbMeta).insertRows(rows) -} - -type sqlXattrLS struct { - loadedSeg + return m.insertRows(rows) } -func (s *sqlXattrLS) load(ctx Context, msg proto.Message) error { - xattrs := msg.(*pb.XattrList) - rows := make([]interface{}, 0, len(xattrs.List)) - for _, x := range xattrs.List { +func (m *dbMeta) loadXattrs(ctx Context, msg proto.Message) error { + xattrs := msg.(*pb.Batch).Xattrs + rows := make([]interface{}, 0, len(xattrs)) + for _, x := range xattrs { rows = append(rows, &xattr{Inode: Ino(x.Inode), Name: x.Name, Value: x.Value}) } - logger.Debugf("insert %s num %d", s, len(rows)) - return s.meta.(*dbMeta).insertRows(rows) -} - -type sqlQuotaLS struct { - loadedSeg + return m.insertRows(rows) } -func (s *sqlQuotaLS) load(ctx Context, msg proto.Message) error { - quotas := msg.(*pb.QuotaList) - rows := make([]interface{}, 0, len(quotas.List)) - for _, q := range quotas.List { +func (m *dbMeta) loadQuota(ctx Context, msg proto.Message) error { + quotas := msg.(*pb.Batch).Quotas + rows := make([]interface{}, 0, len(quotas)) + for _, q := range quotas { rows = append(rows, &dirQuota{ Inode: Ino(q.Inode), MaxSpace: q.MaxSpace, @@ -790,18 +622,13 @@ func (s *sqlQuotaLS) load(ctx Context, msg proto.Message) error { UsedInodes: q.UsedInodes, }) } - logger.Debugf("insert %s num %d", s, len(rows)) - return s.meta.(*dbMeta).insertRows(rows) + return m.insertRows(rows) } -type sqlStatLS struct { - loadedSeg -} - -func (s *sqlStatLS) load(ctx Context, msg proto.Message) error { - stats := msg.(*pb.StatList) - rows := make([]interface{}, 0, len(stats.List)) - for _, st := range stats.List { +func (m *dbMeta) loadDirStats(ctx Context, msg proto.Message) error { + stats := msg.(*pb.Batch).Dirstats + rows := make([]interface{}, 0, len(stats)) + for _, st := range stats { rows = append(rows, &dirStats{ Inode: Ino(st.Inode), DataLength: st.DataLength, @@ -809,138 +636,21 @@ func (s *sqlStatLS) load(ctx Context, msg proto.Message) error { UsedSpace: st.UsedSpace, }) } - logger.Debugf("insert %s num %d", s, len(rows)) - return s.meta.(*dbMeta).insertRows(rows) -} - -type sqlNodeLS struct { - loadedSeg - pools []*sync.Pool -} - -func (s *sqlNodeLS) load(ctx Context, msg proto.Message) error { - nodes := msg.(*pb.NodeList) - m := s.meta.(*dbMeta) - b := m.getBase() - rows := make([]interface{}, 0, len(nodes.List)) - var pn *node - attr := &Attr{} - for _, n := range nodes.List { - pn = s.pools[0].Get().(*node) - pn.Inode = Ino(n.Inode) - attr.Parent, attr.AccessACL, attr.DefaultACL = 0, 0, 0 - b.parseAttr(n.Data, attr) - m.parseNode(attr, pn) - rows = append(rows, pn) - } - err := s.meta.(*dbMeta).insertRows(rows) - for _, n := range rows { - s.pools[0].Put(n) - } - logger.Debugf("insert %s num %d", s, len(rows)) - return err -} - -type sqlChunkLS struct { - loadedSeg - pools []*sync.Pool -} - -func (s *sqlChunkLS) load(ctx Context, msg proto.Message) error { - chunks := msg.(*pb.ChunkList) - rows := make([]interface{}, 0, len(chunks.List)) - var pc *chunk - for _, c := range chunks.List { - pc = s.pools[0].Get().(*chunk) - pc.Id = 0 - pc.Inode = Ino(c.Inode) - pc.Indx = c.Index - pc.Slices = c.Slices - rows = append(rows, pc) - } - err := s.meta.(*dbMeta).insertRows(rows) - - for _, chk := range rows { - s.pools[0].Put(chk) - } - logger.Debugf("insert %s num %d", s, len(rows)) - return err -} - -type sqlEdgeLS struct { - loadedSeg - pools []*sync.Pool -} - -func (s *sqlEdgeLS) load(ctx Context, msg proto.Message) error { - edges := msg.(*pb.EdgeList) - rows := make([]interface{}, 0, len(edges.List)) - var pe *edge - for _, e := range edges.List { - pe = s.pools[0].Get().(*edge) - pe.Id = 0 - pe.Parent = Ino(e.Parent) - pe.Inode = Ino(e.Inode) - pe.Name = e.Name - pe.Type = uint8(e.Type) - rows = append(rows, pe) - } - - err := s.meta.(*dbMeta).insertRows(rows) - for _, e := range rows { - s.pools[0].Put(e) - } - logger.Debugf("insert %s num %d", s, len(rows)) - return err -} - -type sqlParentLS struct { - loadedSeg -} - -func (s *sqlParentLS) load(ctx Context, msg proto.Message) error { - return nil // No need for SQL, skip. -} - -type sqlSymlinkLS struct { - loadedSeg - pools []*sync.Pool -} - -func (s *sqlSymlinkLS) load(ctx Context, msg proto.Message) error { - symlinks := msg.(*pb.SymlinkList) - rows := make([]interface{}, 0, len(symlinks.List)) - var ps *symlink - for _, sl := range symlinks.List { - ps = s.pools[0].Get().(*symlink) - ps.Inode = Ino(sl.Inode) - ps.Target = sl.Target - rows = append(rows, ps) - } - - err := s.meta.(*dbMeta).insertRows(rows) - for _, sl := range rows { - s.pools[0].Put(sl) - } - logger.Debugf("insert %s num %d", s, len(rows)) - return err + return m.insertRows(rows) } func (m *dbMeta) insertRows(beans []interface{}) error { - insert := func(rows []interface{}) error { - return m.txn(func(s *xorm.Session) error { - n, err := s.Insert(rows...) - if err == nil && int(n) != len(rows) { + batch := m.getTxnBatchNum() + for len(beans) > 0 { + bs := utils.Min(batch, len(beans)) + err := m.txn(func(s *xorm.Session) error { + n, err := s.Insert(beans[:bs]...) + if err == nil && int(n) != bs { err = fmt.Errorf("only %d records inserted", n) } return err }) - } - - batch := m.getTxnBatchNum() - for len(beans) > 0 { - bs := utils.Min(batch, len(beans)) - if err := insert(beans[:bs]); err != nil { + if err != nil { logger.Errorf("Write %d beans: %s", bs, err) return err } diff --git a/pkg/meta/tkv.go b/pkg/meta/tkv.go index d074e8fa2cd5..f874a6a23f51 100644 --- a/pkg/meta/tkv.go +++ b/pkg/meta/tkv.go @@ -19,7 +19,6 @@ package meta import ( "bufio" "bytes" - "context" "encoding/binary" "encoding/json" "fmt" @@ -802,10 +801,6 @@ func (m *kvMeta) shouldRetry(err error) bool { return m.client.shouldRetry(err) } -func (m *kvMeta) roTxn(ctx context.Context, f func(*kvTxn) error) error { - return nil -} - func (m *kvMeta) txn(f func(tx *kvTxn) error, inodes ...Ino) error { if m.conf.ReadOnly { return syscall.EROFS diff --git a/pkg/meta/tkv_bak.go b/pkg/meta/tkv_bak.go index 847b299f03f8..1bdea294dc86 100644 --- a/pkg/meta/tkv_bak.go +++ b/pkg/meta/tkv_bak.go @@ -16,20 +16,14 @@ package meta -func (m *kvMeta) buildDumpedSeg(typ int, opt *DumpOption, txn *eTxn) iDumpedSeg { - return nil -} +import "google.golang.org/protobuf/proto" -func (m *kvMeta) buildLoadedSeg(typ int, opt *LoadOption) iLoadedSeg { +func (m *kvMeta) dump(ctx Context, opt *DumpOption, ch chan<- *dumpedResult) error { return nil } -func (m *kvMeta) execETxn(ctx Context, txn *eTxn, f func(Context, *eTxn) error) error { - ctx.WithValue(txMaxRetryKey{}, txn.opt.maxRetry) - return m.roTxn(ctx, func(tx *kvTxn) error { - txn.obj = tx - return f(ctx, txn) - }) +func (m *kvMeta) load(ctx Context, typ int, opt *LoadOption, val proto.Message) error { + return nil } func (m *kvMeta) prepareLoad(ctx Context, opt *LoadOption) error {