From fc897b481fae32be0b3a6e61e64b221a0df89cc5 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Mon, 14 Jun 2021 15:19:27 -0600 Subject: [PATCH 01/95] Group replica wal segments by index This commit changes the replica path format to group segments within a single index in the same directory. This is to eventually add the ability to seek to a record on file-based systems without having to iterate over the records. The DB shadow WAL will also be changed to this same format to support live replicas. --- abs/replica_client.go | 77 +++++++-------- db.go | 40 ++++++-- file/replica_client.go | 135 +++++++++++++++++++++----- file/replica_client_test.go | 2 +- gcs/replica_client.go | 78 +++++++-------- internal/internal.go | 30 ++++++ internal/internal_test.go | 61 ++++++++++++ litestream.go | 134 +++----------------------- litestream_test.go | 88 ----------------- replica_client_test.go | 16 +-- s3/replica_client.go | 78 +++++++-------- sftp/replica_client.go | 187 ++++++++++++++++++++++++++---------- 12 files changed, 502 insertions(+), 424 deletions(-) create mode 100644 internal/internal_test.go diff --git a/abs/replica_client.go b/abs/replica_client.go index 4d5e00ef..551f638a 100644 --- a/abs/replica_client.go +++ b/abs/replica_client.go @@ -102,7 +102,7 @@ func (c *ReplicaClient) Generations(ctx context.Context) ([]string, error) { internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc() resp, err := c.containerURL.ListBlobsHierarchySegment(ctx, marker, "/", azblob.ListBlobsSegmentOptions{ - Prefix: litestream.GenerationsPath(c.Path) + "/", + Prefix: path.Join(c.Path, "generations") + "/", }) if err != nil { return nil, err @@ -125,18 +125,17 @@ func (c *ReplicaClient) Generations(ctx context.Context) ([]string, error) { func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string) error { if err := c.Init(ctx); err != nil { return err + } else if generation == "" { + return fmt.Errorf("generation required") } - dir, err := litestream.GenerationPath(c.Path, generation) - if err != nil { - return fmt.Errorf("cannot determine generation path: %w", err) - } + prefix := path.Join(c.Path, "generations", generation) + "/" var marker azblob.Marker for marker.NotDone() { internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc() - resp, err := c.containerURL.ListBlobsFlatSegment(ctx, marker, azblob.ListBlobsSegmentOptions{Prefix: dir + "/"}) + resp, err := c.containerURL.ListBlobsFlatSegment(ctx, marker, azblob.ListBlobsSegmentOptions{Prefix: prefix}) if err != nil { return err } @@ -171,12 +170,11 @@ func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (lites func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, index int, rd io.Reader) (info litestream.SnapshotInfo, err error) { if err := c.Init(ctx); err != nil { return info, err + } else if generation == "" { + return info, fmt.Errorf("generation required") } - key, err := litestream.SnapshotPath(c.Path, generation, index) - if err != nil { - return info, fmt.Errorf("cannot determine snapshot path: %w", err) - } + key := path.Join(c.Path, "generations", generation, "snapshots", litestream.FormatIndex(index)+".snapshot.lz4") startTime := time.Now() rc := internal.NewReadCounter(rd) @@ -206,12 +204,11 @@ func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, in func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error) { if err := c.Init(ctx); err != nil { return nil, err + } else if generation == "" { + return nil, fmt.Errorf("generation required") } - key, err := litestream.SnapshotPath(c.Path, generation, index) - if err != nil { - return nil, fmt.Errorf("cannot determine snapshot path: %w", err) - } + key := path.Join(c.Path, "generations", generation, "snapshots", litestream.FormatIndex(index)+".snapshot.lz4") blobURL := c.containerURL.NewBlobURL(key) resp, err := blobURL.Download(ctx, 0, 0, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{}) @@ -231,12 +228,11 @@ func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, i func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, index int) error { if err := c.Init(ctx); err != nil { return err + } else if generation == "" { + return fmt.Errorf("generation required") } - key, err := litestream.SnapshotPath(c.Path, generation, index) - if err != nil { - return fmt.Errorf("cannot determine snapshot path: %w", err) - } + key := path.Join(c.Path, "generations", generation, "snapshots", litestream.FormatIndex(index)+".snapshot.lz4") internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc() @@ -261,12 +257,11 @@ func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (lit func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, rd io.Reader) (info litestream.WALSegmentInfo, err error) { if err := c.Init(ctx); err != nil { return info, err + } else if pos.Generation == "" { + return info, fmt.Errorf("generation required") } - key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset) - if err != nil { - return info, fmt.Errorf("cannot determine wal segment path: %w", err) - } + key := path.Join(c.Path, "generations", pos.Generation, "wal", litestream.FormatIndex(pos.Index), litestream.FormatOffset(pos.Offset)+".wal.lz4") startTime := time.Now() rc := internal.NewReadCounter(rd) @@ -296,12 +291,11 @@ func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, func (c *ReplicaClient) WALSegmentReader(ctx context.Context, pos litestream.Pos) (io.ReadCloser, error) { if err := c.Init(ctx); err != nil { return nil, err + } else if pos.Generation == "" { + return nil, fmt.Errorf("generation required") } - key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset) - if err != nil { - return nil, fmt.Errorf("cannot determine wal segment path: %w", err) - } + key := path.Join(c.Path, "generations", pos.Generation, "wal", litestream.FormatIndex(pos.Index), litestream.FormatOffset(pos.Offset)+".wal.lz4") blobURL := c.containerURL.NewBlobURL(key) resp, err := blobURL.Download(ctx, 0, 0, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{}) @@ -324,11 +318,12 @@ func (c *ReplicaClient) DeleteWALSegments(ctx context.Context, a []litestream.Po } for _, pos := range a { - key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset) - if err != nil { - return fmt.Errorf("cannot determine wal segment path: %w", err) + if pos.Generation == "" { + return fmt.Errorf("generation required") } + key := path.Join(c.Path, "generations", pos.Generation, "wal", litestream.FormatIndex(pos.Index), litestream.FormatOffset(pos.Offset)+".wal.lz4") + internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc() blobURL := c.containerURL.NewBlobURL(key) @@ -372,24 +367,24 @@ func newSnapshotIterator(ctx context.Context, generation string, client *Replica func (itr *snapshotIterator) fetch() error { defer close(itr.ch) - dir, err := litestream.SnapshotsPath(itr.client.Path, itr.generation) - if err != nil { - return fmt.Errorf("cannot determine snapshots path: %w", err) + if itr.generation == "" { + return fmt.Errorf("generation required") } + prefix := path.Join(itr.client.Path, "generations", itr.generation) + "/" + var marker azblob.Marker for marker.NotDone() { internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc() - resp, err := itr.client.containerURL.ListBlobsFlatSegment(itr.ctx, marker, azblob.ListBlobsSegmentOptions{Prefix: dir + "/"}) + resp, err := itr.client.containerURL.ListBlobsFlatSegment(itr.ctx, marker, azblob.ListBlobsSegmentOptions{Prefix: prefix}) if err != nil { return err } marker = resp.NextMarker for _, item := range resp.Segment.BlobItems { - key := path.Base(item.Name) - index, err := litestream.ParseSnapshotPath(key) + index, err := internal.ParseSnapshotPath(path.Base(item.Name)) if err != nil { continue } @@ -478,24 +473,24 @@ func newWALSegmentIterator(ctx context.Context, generation string, client *Repli func (itr *walSegmentIterator) fetch() error { defer close(itr.ch) - dir, err := litestream.WALPath(itr.client.Path, itr.generation) - if err != nil { - return fmt.Errorf("cannot determine wal path: %w", err) + if itr.generation == "" { + return fmt.Errorf("generation required") } + prefix := path.Join(itr.client.Path, "generations", itr.generation, "wal") var marker azblob.Marker for marker.NotDone() { internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc() - resp, err := itr.client.containerURL.ListBlobsFlatSegment(itr.ctx, marker, azblob.ListBlobsSegmentOptions{Prefix: dir + "/"}) + resp, err := itr.client.containerURL.ListBlobsFlatSegment(itr.ctx, marker, azblob.ListBlobsSegmentOptions{Prefix: prefix}) if err != nil { return err } marker = resp.NextMarker for _, item := range resp.Segment.BlobItems { - key := path.Base(item.Name) - index, offset, err := litestream.ParseWALSegmentPath(key) + key := strings.TrimPrefix(item.Name, prefix+"/") + index, offset, err := internal.ParseWALSegmentPath(key) if err != nil { continue } diff --git a/db.go b/db.go index dd33d7e5..682abecb 100644 --- a/db.go +++ b/db.go @@ -16,6 +16,8 @@ import ( "math/rand" "os" "path/filepath" + "regexp" + "strconv" "strings" "sync" "time" @@ -168,7 +170,7 @@ func (db *DB) ShadowWALDir(generation string) string { // Panics if generation is blank or index is negative. func (db *DB) ShadowWALPath(generation string, index int) string { assert(index >= 0, "shadow wal index cannot be negative") - return filepath.Join(db.ShadowWALDir(generation), FormatWALPath(index)) + return filepath.Join(db.ShadowWALDir(generation), FormatIndex(index)+".wal") } // CurrentShadowWALPath returns the path to the last shadow WAL in a generation. @@ -191,8 +193,8 @@ func (db *DB) CurrentShadowWALIndex(generation string) (index int, size int64, e // Find highest wal index. for _, fi := range fis { - if v, err := ParseWALPath(fi.Name()); err != nil { - continue // invalid wal filename + if v, err := parseWALPath(fi.Name()); err != nil { + continue // invalid filename } else if v > index { index = v } @@ -584,7 +586,7 @@ func (db *DB) cleanWAL() error { return err } for _, fi := range fis { - if idx, err := ParseWALPath(fi.Name()); err != nil || idx >= min { + if idx, err := parseWALPath(fi.Name()); err != nil || idx >= min { continue } if err := os.Remove(filepath.Join(dir, fi.Name())); err != nil { @@ -928,13 +930,13 @@ func (db *DB) syncWAL(info syncInfo) (newSize int64, err error) { // Parse index of current shadow WAL file. dir, base := filepath.Split(info.shadowWALPath) - index, err := ParseWALPath(base) + index, err := parseWALPath(base) if err != nil { return 0, fmt.Errorf("cannot parse shadow wal filename: %s", base) } // Start a new shadow WAL file with next index. - newShadowWALPath := filepath.Join(dir, FormatWALPath(index+1)) + newShadowWALPath := filepath.Join(dir, formatWALPath(index+1)) newSize, err = db.initShadowWALFile(newShadowWALPath) if err != nil { return 0, fmt.Errorf("cannot init shadow wal file: name=%s err=%w", newShadowWALPath, err) @@ -1298,13 +1300,13 @@ func (db *DB) checkpoint(ctx context.Context, generation, mode string) error { } // Parse index of current shadow WAL file. - index, err := ParseWALPath(shadowWALPath) + index, err := parseWALPath(shadowWALPath) if err != nil { return fmt.Errorf("cannot parse shadow wal filename: %s", shadowWALPath) } // Start a new shadow WAL file with next index. - newShadowWALPath := filepath.Join(filepath.Dir(shadowWALPath), FormatWALPath(index+1)) + newShadowWALPath := filepath.Join(filepath.Dir(shadowWALPath), formatWALPath(index+1)) if _, err := db.initShadowWALFile(newShadowWALPath); err != nil { return fmt.Errorf("cannot init shadow wal file: name=%s err=%w", newShadowWALPath, err) } @@ -1481,6 +1483,28 @@ func (db *DB) CRC64(ctx context.Context) (uint64, Pos, error) { return h.Sum64(), pos, nil } +// parseWALPath returns the index for the WAL file. +// Returns an error if the path is not a valid WAL path. +func parseWALPath(s string) (index int, err error) { + s = filepath.Base(s) + + a := walPathRegex.FindStringSubmatch(s) + if a == nil { + return 0, fmt.Errorf("invalid wal path: %s", s) + } + + i64, _ := strconv.ParseUint(a[1], 16, 64) + return int(i64), nil +} + +// formatWALPath formats a WAL filename with a given index. +func formatWALPath(index int) string { + assert(index >= 0, "wal index must be non-negative") + return FormatIndex(index) + ".wal" +} + +var walPathRegex = regexp.MustCompile(`^([0-9a-f]{8})\.wal$`) + // DefaultRestoreParallelism is the default parallelism when downloading WAL files. const DefaultRestoreParallelism = 8 diff --git a/file/replica_client.go b/file/replica_client.go index 178797af..8d0da749 100644 --- a/file/replica_client.go +++ b/file/replica_client.go @@ -8,6 +8,7 @@ import ( "os" "path/filepath" "sort" + "strings" "github.com/benbjohnson/litestream" "github.com/benbjohnson/litestream/internal" @@ -84,7 +85,7 @@ func (c *ReplicaClient) SnapshotPath(generation string, index int) (string, erro if err != nil { return "", err } - return filepath.Join(dir, litestream.FormatSnapshotPath(index)), nil + return filepath.Join(dir, litestream.FormatIndex(index)+".snapshot.lz4"), nil } // WALDir returns the path to a generation's WAL directory @@ -102,7 +103,7 @@ func (c *ReplicaClient) WALSegmentPath(generation string, index int, offset int6 if err != nil { return "", err } - return filepath.Join(dir, litestream.FormatWALSegmentPath(index, offset)), nil + return filepath.Join(dir, litestream.FormatIndex(index), fmt.Sprintf("%08x.wal.lz4", offset)), nil } // Generations returns a list of available generation names. @@ -148,7 +149,7 @@ func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string) func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { dir, err := c.SnapshotsDir(generation) if err != nil { - return nil, fmt.Errorf("cannot determine snapshots path: %w", err) + return nil, err } f, err := os.Open(dir) @@ -168,7 +169,7 @@ func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (lites infos := make([]litestream.SnapshotInfo, 0, len(fis)) for _, fi := range fis { // Parse index from filename. - index, err := litestream.ParseSnapshotPath(fi.Name()) + index, err := internal.ParseSnapshotPath(filepath.Base(fi.Name())) if err != nil { continue } @@ -190,7 +191,7 @@ func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (lites func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, index int, rd io.Reader) (info litestream.SnapshotInfo, err error) { filename, err := c.SnapshotPath(generation, index) if err != nil { - return info, fmt.Errorf("cannot determine snapshot path: %w", err) + return info, err } var fileInfo, dirInfo os.FileInfo @@ -243,7 +244,7 @@ func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, in func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error) { filename, err := c.SnapshotPath(generation, index) if err != nil { - return nil, fmt.Errorf("cannot determine snapshot path: %w", err) + return nil, err } return os.Open(filename) } @@ -264,7 +265,7 @@ func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, i func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) { dir, err := c.WALDir(generation) if err != nil { - return nil, fmt.Errorf("cannot determine wal path: %w", err) + return nil, err } f, err := os.Open(dir) @@ -281,33 +282,25 @@ func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (lit } // Iterate over every file and convert to metadata. - infos := make([]litestream.WALSegmentInfo, 0, len(fis)) + indexes := make([]int, 0, len(fis)) for _, fi := range fis { - // Parse index from filename. - index, offset, err := litestream.ParseWALSegmentPath(fi.Name()) - if err != nil { + index, err := litestream.ParseIndex(fi.Name()) + if err != nil || !fi.IsDir() { continue } - - infos = append(infos, litestream.WALSegmentInfo{ - Generation: generation, - Index: index, - Offset: offset, - Size: fi.Size(), - CreatedAt: fi.ModTime().UTC(), - }) + indexes = append(indexes, index) } - sort.Sort(litestream.WALSegmentInfoSlice(infos)) + sort.Ints(indexes) - return litestream.NewWALSegmentInfoSliceIterator(infos), nil + return newWALSegmentIterator(dir, generation, indexes), nil } // WriteWALSegment writes LZ4 compressed data from rd into a file on disk. func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, rd io.Reader) (info litestream.WALSegmentInfo, err error) { filename, err := c.WALSegmentPath(pos.Generation, pos.Index, pos.Offset) if err != nil { - return info, fmt.Errorf("cannot determine wal segment path: %w", err) + return info, err } var fileInfo, dirInfo os.FileInfo @@ -361,7 +354,7 @@ func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, func (c *ReplicaClient) WALSegmentReader(ctx context.Context, pos litestream.Pos) (io.ReadCloser, error) { filename, err := c.WALSegmentPath(pos.Generation, pos.Index, pos.Offset) if err != nil { - return nil, fmt.Errorf("cannot determine wal segment path: %w", err) + return nil, err } return os.Open(filename) } @@ -371,7 +364,7 @@ func (c *ReplicaClient) DeleteWALSegments(ctx context.Context, a []litestream.Po for _, pos := range a { filename, err := c.WALSegmentPath(pos.Generation, pos.Index, pos.Offset) if err != nil { - return fmt.Errorf("cannot determine wal segment path: %w", err) + return err } if err := os.Remove(filename); err != nil && !os.IsNotExist(err) { return err @@ -379,3 +372,97 @@ func (c *ReplicaClient) DeleteWALSegments(ctx context.Context, a []litestream.Po } return nil } + +type walSegmentIterator struct { + dir string + generation string + indexes []int + + infos []litestream.WALSegmentInfo + err error +} + +func newWALSegmentIterator(dir, generation string, indexes []int) *walSegmentIterator { + return &walSegmentIterator{ + dir: dir, + generation: generation, + indexes: indexes, + } +} + +func (itr *walSegmentIterator) Close() (err error) { + return itr.err +} + +func (itr *walSegmentIterator) Next() bool { + // Exit if an error has already occurred. + if itr.err != nil { + return false + } + + for { + // Move to the next segment in cache, if available. + if len(itr.infos) > 1 { + itr.infos = itr.infos[1:] + return true + } + itr.infos = itr.infos[:0] // otherwise clear infos + + // Move to the next index unless this is the first time initializing. + if itr.infos != nil && len(itr.indexes) > 0 { + itr.indexes = itr.indexes[1:] + } + + // If no indexes remain, stop iteration. + if len(itr.indexes) == 0 { + return false + } + + // Read segments into a cache for the current index. + index := itr.indexes[0] + f, err := os.Open(filepath.Join(itr.dir, litestream.FormatIndex(index))) + if err != nil { + itr.err = err + return false + } + defer f.Close() + + fis, err := f.Readdir(-1) + if err != nil { + itr.err = err + return false + } + for _, fi := range fis { + filename := filepath.Base(fi.Name()) + if fi.IsDir() { + continue + } + + offset, err := litestream.ParseOffset(strings.TrimSuffix(filename, ".wal.lz4")) + if err != nil { + continue + } + + itr.infos = append(itr.infos, litestream.WALSegmentInfo{ + Generation: itr.generation, + Index: index, + Offset: offset, + Size: fi.Size(), + CreatedAt: fi.ModTime().UTC(), + }) + } + + if len(itr.infos) > 0 { + return true + } + } +} + +func (itr *walSegmentIterator) Err() error { return itr.err } + +func (itr *walSegmentIterator) WALSegment() litestream.WALSegmentInfo { + if len(itr.infos) == 0 { + return litestream.WALSegmentInfo{} + } + return itr.infos[0] +} diff --git a/file/replica_client_test.go b/file/replica_client_test.go index 94d2e447..bafeefd5 100644 --- a/file/replica_client_test.go +++ b/file/replica_client_test.go @@ -118,7 +118,7 @@ func TestReplicaClient_WALSegmentPath(t *testing.T) { t.Run("OK", func(t *testing.T) { if got, err := file.NewReplicaClient("/foo").WALSegmentPath("0123456701234567", 1000, 1001); err != nil { t.Fatal(err) - } else if want := "/foo/generations/0123456701234567/wal/000003e8_000003e9.wal.lz4"; got != want { + } else if want := "/foo/generations/0123456701234567/wal/000003e8/000003e9.wal.lz4"; got != want { t.Fatalf("WALPath()=%v, want %v", got, want) } }) diff --git a/gcs/replica_client.go b/gcs/replica_client.go index 7b2b2c67..0a45b2b0 100644 --- a/gcs/replica_client.go +++ b/gcs/replica_client.go @@ -68,7 +68,7 @@ func (c *ReplicaClient) Generations(ctx context.Context) ([]string, error) { // Construct query to only pull generation directory names. query := &storage.Query{ Delimiter: "/", - Prefix: litestream.GenerationsPath(c.Path) + "/", + Prefix: path.Join(c.Path, "generations") + "/", } // Loop over results and only build list of generation-formatted names. @@ -96,16 +96,15 @@ func (c *ReplicaClient) Generations(ctx context.Context) ([]string, error) { func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string) error { if err := c.Init(ctx); err != nil { return err + } else if generation == "" { + return fmt.Errorf("generation required") } - dir, err := litestream.GenerationPath(c.Path, generation) - if err != nil { - return fmt.Errorf("cannot determine generation path: %w", err) - } + prefix := path.Join(c.Path, "generations", generation) + "/" // Iterate over every object in generation and delete it. internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc() - for it := c.bkt.Objects(ctx, &storage.Query{Prefix: dir + "/"}); ; { + for it := c.bkt.Objects(ctx, &storage.Query{Prefix: prefix}); ; { attrs, err := it.Next() if err == iterator.Done { break @@ -130,24 +129,22 @@ func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string) func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { if err := c.Init(ctx); err != nil { return nil, err + } else if generation == "" { + return nil, fmt.Errorf("generation required") } - dir, err := litestream.SnapshotsPath(c.Path, generation) - if err != nil { - return nil, fmt.Errorf("cannot determine snapshots path: %w", err) - } - return newSnapshotIterator(generation, c.bkt.Objects(ctx, &storage.Query{Prefix: dir + "/"})), nil + prefix := path.Join(c.Path, "generations", generation) + "/" + return newSnapshotIterator(generation, c.bkt.Objects(ctx, &storage.Query{Prefix: prefix})), nil } // WriteSnapshot writes LZ4 compressed data from rd to the object storage. func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, index int, rd io.Reader) (info litestream.SnapshotInfo, err error) { if err := c.Init(ctx); err != nil { return info, err + } else if generation == "" { + return info, fmt.Errorf("generation required") } - key, err := litestream.SnapshotPath(c.Path, generation, index) - if err != nil { - return info, fmt.Errorf("cannot determine snapshot path: %w", err) - } + key := path.Join(c.Path, "generations", generation, "snapshots", litestream.FormatIndex(index)+".snapshot.lz4") startTime := time.Now() w := c.bkt.Object(key).NewWriter(ctx) @@ -177,12 +174,11 @@ func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, in func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error) { if err := c.Init(ctx); err != nil { return nil, err + } else if generation == "" { + return nil, fmt.Errorf("generation required") } - key, err := litestream.SnapshotPath(c.Path, generation, index) - if err != nil { - return nil, fmt.Errorf("cannot determine snapshot path: %w", err) - } + key := path.Join(c.Path, "generations", generation, "snapshots", litestream.FormatIndex(index)+".snapshot.lz4") r, err := c.bkt.Object(key).NewReader(ctx) if isNotExists(err) { @@ -201,12 +197,11 @@ func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, i func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, index int) error { if err := c.Init(ctx); err != nil { return err + } else if generation == "" { + return fmt.Errorf("generation required") } - key, err := litestream.SnapshotPath(c.Path, generation, index) - if err != nil { - return fmt.Errorf("cannot determine snapshot path: %w", err) - } + key := path.Join(c.Path, "generations", generation, "snapshots", litestream.FormatIndex(index), ".snapshot.lz4") if err := c.bkt.Object(key).Delete(ctx); err != nil && !isNotExists(err) { return fmt.Errorf("cannot delete snapshot %q: %w", key, err) @@ -220,24 +215,22 @@ func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, i func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) { if err := c.Init(ctx); err != nil { return nil, err + } else if generation == "" { + return nil, fmt.Errorf("generation required") } - dir, err := litestream.WALPath(c.Path, generation) - if err != nil { - return nil, fmt.Errorf("cannot determine wal path: %w", err) - } - return newWALSegmentIterator(generation, c.bkt.Objects(ctx, &storage.Query{Prefix: dir + "/"})), nil + prefix := path.Join(c.Path, "generations", generation, "wal") + "/" + return newWALSegmentIterator(generation, prefix, c.bkt.Objects(ctx, &storage.Query{Prefix: prefix})), nil } // WriteWALSegment writes LZ4 compressed data from rd into a file on disk. func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, rd io.Reader) (info litestream.WALSegmentInfo, err error) { if err := c.Init(ctx); err != nil { return info, err + } else if pos.Generation == "" { + return info, fmt.Errorf("generation required") } - key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset) - if err != nil { - return info, fmt.Errorf("cannot determine wal segment path: %w", err) - } + key := path.Join(c.Path, "generations", pos.Generation, "wal", litestream.FormatIndex(pos.Index), litestream.FormatOffset(pos.Offset)+".wal.lz4") startTime := time.Now() w := c.bkt.Object(key).NewWriter(ctx) @@ -267,12 +260,11 @@ func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, func (c *ReplicaClient) WALSegmentReader(ctx context.Context, pos litestream.Pos) (io.ReadCloser, error) { if err := c.Init(ctx); err != nil { return nil, err + } else if pos.Generation == "" { + return nil, fmt.Errorf("generation required") } - key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset) - if err != nil { - return nil, fmt.Errorf("cannot determine wal segment path: %w", err) - } + key := path.Join(c.Path, "generations", pos.Generation, "wal", litestream.FormatIndex(pos.Index), litestream.FormatOffset(pos.Offset)+".wal.lz4") r, err := c.bkt.Object(key).NewReader(ctx) if isNotExists(err) { @@ -294,11 +286,11 @@ func (c *ReplicaClient) DeleteWALSegments(ctx context.Context, a []litestream.Po } for _, pos := range a { - key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset) - if err != nil { - return fmt.Errorf("cannot determine wal segment path: %w", err) + if pos.Generation == "" { + return fmt.Errorf("generation required") } + key := path.Join(c.Path, "generations", pos.Generation, "wal", litestream.FormatIndex(pos.Index), litestream.FormatOffset(pos.Offset)+".wal.lz4") if err := c.bkt.Object(key).Delete(ctx); err != nil && !isNotExists(err) { return fmt.Errorf("cannot delete wal segment %q: %w", key, err) } @@ -344,7 +336,7 @@ func (itr *snapshotIterator) Next() bool { } // Parse index, otherwise skip to the next object. - index, err := litestream.ParseSnapshotPath(path.Base(attrs.Name)) + index, err := internal.ParseSnapshotPath(path.Base(attrs.Name)) if err != nil { continue } @@ -366,15 +358,17 @@ func (itr *snapshotIterator) Snapshot() litestream.SnapshotInfo { return itr.inf type walSegmentIterator struct { generation string + prefix string it *storage.ObjectIterator info litestream.WALSegmentInfo err error } -func newWALSegmentIterator(generation string, it *storage.ObjectIterator) *walSegmentIterator { +func newWALSegmentIterator(generation, prefix string, it *storage.ObjectIterator) *walSegmentIterator { return &walSegmentIterator{ generation: generation, + prefix: prefix, it: it, } } @@ -400,7 +394,7 @@ func (itr *walSegmentIterator) Next() bool { } // Parse index & offset, otherwise skip to the next object. - index, offset, err := litestream.ParseWALSegmentPath(path.Base(attrs.Name)) + index, offset, err := internal.ParseWALSegmentPath(strings.TrimPrefix(attrs.Name, itr.prefix)) if err != nil { continue } diff --git a/internal/internal.go b/internal/internal.go index b22399ca..be5027f9 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -1,8 +1,11 @@ package internal import ( + "fmt" "io" "os" + "regexp" + "strconv" "syscall" "github.com/prometheus/client_golang/prometheus" @@ -127,6 +130,33 @@ func MkdirAll(path string, fi os.FileInfo) error { return nil } +// ParseSnapshotPath parses the index from a snapshot filename. Used by path-based replicas. +func ParseSnapshotPath(s string) (index int, err error) { + a := snapshotPathRegex.FindStringSubmatch(s) + if a == nil { + return 0, fmt.Errorf("invalid snapshot path") + } + + i64, _ := strconv.ParseUint(a[1], 16, 64) + return int(i64), nil +} + +var snapshotPathRegex = regexp.MustCompile(`^([0-9a-f]{8})\.snapshot\.lz4$`) + +// ParseWALSegmentPath parses the index/offset from a segment filename. Used by path-based replicas. +func ParseWALSegmentPath(s string) (index int, offset int64, err error) { + a := walSegmentPathRegex.FindStringSubmatch(s) + if a == nil { + return 0, 0, fmt.Errorf("invalid wal segment path") + } + + i64, _ := strconv.ParseUint(a[1], 16, 64) + off64, _ := strconv.ParseUint(a[2], 16, 64) + return int(i64), int64(off64), nil +} + +var walSegmentPathRegex = regexp.MustCompile(`^([0-9a-f]{8})\/([0-9a-f]{8})\.wal\.lz4$`) + // Shared replica metrics. var ( OperationTotalCounterVec = promauto.NewCounterVec(prometheus.CounterOpts{ diff --git a/internal/internal_test.go b/internal/internal_test.go new file mode 100644 index 00000000..a8eda5d5 --- /dev/null +++ b/internal/internal_test.go @@ -0,0 +1,61 @@ +package internal_test + +import ( + "fmt" + "reflect" + "testing" + + "github.com/benbjohnson/litestream/internal" +) + +func TestParseSnapshotPath(t *testing.T) { + for _, tt := range []struct { + s string + index int + err error + }{ + {"00bc614e.snapshot.lz4", 12345678, nil}, + {"xxxxxxxx.snapshot.lz4", 0, fmt.Errorf("invalid snapshot path")}, + {"00bc614.snapshot.lz4", 0, fmt.Errorf("invalid snapshot path")}, + {"00bc614e.snapshot.lz", 0, fmt.Errorf("invalid snapshot path")}, + {"00bc614e.snapshot", 0, fmt.Errorf("invalid snapshot path")}, + {"00bc614e", 0, fmt.Errorf("invalid snapshot path")}, + {"", 0, fmt.Errorf("invalid snapshot path")}, + } { + t.Run("", func(t *testing.T) { + index, err := internal.ParseSnapshotPath(tt.s) + if got, want := index, tt.index; got != want { + t.Errorf("index=%#v, want %#v", got, want) + } else if got, want := err, tt.err; !reflect.DeepEqual(got, want) { + t.Errorf("err=%#v, want %#v", got, want) + } + }) + } +} + +func TestParseWALSegmentPath(t *testing.T) { + for _, tt := range []struct { + s string + index int + offset int64 + err error + }{ + {"00bc614e/000003e8.wal.lz4", 12345678, 1000, nil}, + {"00000000/00000000.wal", 0, 0, fmt.Errorf("invalid wal segment path")}, + {"00000000/00000000", 0, 0, fmt.Errorf("invalid wal segment path")}, + {"00000000/", 0, 0, fmt.Errorf("invalid wal segment path")}, + {"00000000", 0, 0, fmt.Errorf("invalid wal segment path")}, + {"", 0, 0, fmt.Errorf("invalid wal segment path")}, + } { + t.Run("", func(t *testing.T) { + index, offset, err := internal.ParseWALSegmentPath(tt.s) + if got, want := index, tt.index; got != want { + t.Errorf("index=%#v, want %#v", got, want) + } else if got, want := offset, tt.offset; got != want { + t.Errorf("offset=%#v, want %#v", got, want) + } else if got, want := err, tt.err; !reflect.DeepEqual(got, want) { + t.Errorf("err=%#v, want %#v", got, want) + } + }) + } +} diff --git a/litestream.go b/litestream.go index f31985b6..bd0477c5 100644 --- a/litestream.go +++ b/litestream.go @@ -7,9 +7,7 @@ import ( "fmt" "io" "os" - "path" "path/filepath" - "regexp" "strconv" "strings" "time" @@ -384,134 +382,34 @@ func IsGenerationName(s string) bool { return true } -// GenerationsPath returns the path to a generation root directory. -func GenerationsPath(root string) string { - return path.Join(root, "generations") +// FormatIndex formats an index as an 8-character hex value. +func FormatIndex(index int) string { + return fmt.Sprintf("%08x", index) } -// GenerationPath returns the path to a generation's root directory. -func GenerationPath(root, generation string) (string, error) { - dir := GenerationsPath(root) - if generation == "" { - return "", fmt.Errorf("generation required") - } - return path.Join(dir, generation), nil -} - -// SnapshotsPath returns the path to a generation's snapshot directory. -func SnapshotsPath(root, generation string) (string, error) { - dir, err := GenerationPath(root, generation) - if err != nil { - return "", err - } - return path.Join(dir, "snapshots"), nil -} - -// SnapshotPath returns the path to an uncompressed snapshot file. -func SnapshotPath(root, generation string, index int) (string, error) { - dir, err := SnapshotsPath(root, generation) +// ParseIndex parses a hex-formatted index into an integer. +func ParseIndex(s string) (int, error) { + v, err := strconv.ParseUint(s, 16, 32) if err != nil { - return "", err + return -1, fmt.Errorf("cannot parse index: %q", s) } - return path.Join(dir, FormatSnapshotPath(index)), nil + return int(v), nil } -// WALPath returns the path to a generation's WAL directory -func WALPath(root, generation string) (string, error) { - dir, err := GenerationPath(root, generation) - if err != nil { - return "", err - } - return path.Join(dir, "wal"), nil +// FormatOffset formats an offset as an 8-character hex value. +func FormatOffset(offset int64) string { + return fmt.Sprintf("%08x", offset) } -// WALSegmentPath returns the path to a WAL segment file. -func WALSegmentPath(root, generation string, index int, offset int64) (string, error) { - dir, err := WALPath(root, generation) +// ParseOffset parses a hex-formatted offset into an integer. +func ParseOffset(s string) (int64, error) { + v, err := strconv.ParseInt(s, 16, 32) if err != nil { - return "", err + return -1, fmt.Errorf("cannot parse index: %q", s) } - return path.Join(dir, FormatWALSegmentPath(index, offset)), nil + return v, nil } -// IsSnapshotPath returns true if s is a path to a snapshot file. -func IsSnapshotPath(s string) bool { - return snapshotPathRegex.MatchString(s) -} - -// ParseSnapshotPath returns the index for the snapshot. -// Returns an error if the path is not a valid snapshot path. -func ParseSnapshotPath(s string) (index int, err error) { - s = filepath.Base(s) - - a := snapshotPathRegex.FindStringSubmatch(s) - if a == nil { - return 0, fmt.Errorf("invalid snapshot path: %s", s) - } - - i64, _ := strconv.ParseUint(a[1], 16, 64) - return int(i64), nil -} - -// FormatSnapshotPath formats a snapshot filename with a given index. -func FormatSnapshotPath(index int) string { - assert(index >= 0, "snapshot index must be non-negative") - return fmt.Sprintf("%08x%s", index, SnapshotExt) -} - -var snapshotPathRegex = regexp.MustCompile(`^([0-9a-f]{8})\.snapshot\.lz4$`) - -// IsWALPath returns true if s is a path to a WAL file. -func IsWALPath(s string) bool { - return walPathRegex.MatchString(s) -} - -// ParseWALPath returns the index for the WAL file. -// Returns an error if the path is not a valid WAL path. -func ParseWALPath(s string) (index int, err error) { - s = filepath.Base(s) - - a := walPathRegex.FindStringSubmatch(s) - if a == nil { - return 0, fmt.Errorf("invalid wal path: %s", s) - } - - i64, _ := strconv.ParseUint(a[1], 16, 64) - return int(i64), nil -} - -// FormatWALPath formats a WAL filename with a given index. -func FormatWALPath(index int) string { - assert(index >= 0, "wal index must be non-negative") - return fmt.Sprintf("%08x%s", index, WALExt) -} - -var walPathRegex = regexp.MustCompile(`^([0-9a-f]{8})\.wal$`) - -// ParseWALSegmentPath returns the index & offset for the WAL segment file. -// Returns an error if the path is not a valid wal segment path. -func ParseWALSegmentPath(s string) (index int, offset int64, err error) { - s = filepath.Base(s) - - a := walSegmentPathRegex.FindStringSubmatch(s) - if a == nil { - return 0, 0, fmt.Errorf("invalid wal segment path: %s", s) - } - - i64, _ := strconv.ParseUint(a[1], 16, 64) - off64, _ := strconv.ParseUint(a[2], 16, 64) - return int(i64), int64(off64), nil -} - -// FormatWALSegmentPath formats a WAL segment filename with a given index & offset. -func FormatWALSegmentPath(index int, offset int64) string { - assert(index >= 0, "wal index must be non-negative") - assert(offset >= 0, "wal offset must be non-negative") - return fmt.Sprintf("%08x_%08x%s", index, offset, WALSegmentExt) -} - -var walSegmentPathRegex = regexp.MustCompile(`^([0-9a-f]{8})(?:_([0-9a-f]{8}))\.wal\.lz4$`) - // isHexChar returns true if ch is a lowercase hex character. func isHexChar(ch rune) bool { return (ch >= '0' && ch <= '9') || (ch >= 'a' && ch <= 'f') diff --git a/litestream_test.go b/litestream_test.go index 0f1bb859..a03a7489 100644 --- a/litestream_test.go +++ b/litestream_test.go @@ -40,94 +40,6 @@ func TestChecksum(t *testing.T) { }) } -func TestGenerationsPath(t *testing.T) { - t.Run("OK", func(t *testing.T) { - if got, want := litestream.GenerationsPath("foo"), "foo/generations"; got != want { - t.Fatalf("GenerationsPath()=%v, want %v", got, want) - } - }) - t.Run("NoPath", func(t *testing.T) { - if got, want := litestream.GenerationsPath(""), "generations"; got != want { - t.Fatalf("GenerationsPath()=%v, want %v", got, want) - } - }) -} - -func TestGenerationPath(t *testing.T) { - t.Run("OK", func(t *testing.T) { - if got, err := litestream.GenerationPath("foo", "0123456701234567"); err != nil { - t.Fatal(err) - } else if want := "foo/generations/0123456701234567"; got != want { - t.Fatalf("GenerationPath()=%v, want %v", got, want) - } - }) - t.Run("ErrNoGeneration", func(t *testing.T) { - if _, err := litestream.GenerationPath("foo", ""); err == nil || err.Error() != `generation required` { - t.Fatalf("expected error: %v", err) - } - }) -} - -func TestSnapshotsPath(t *testing.T) { - t.Run("OK", func(t *testing.T) { - if got, err := litestream.SnapshotsPath("foo", "0123456701234567"); err != nil { - t.Fatal(err) - } else if want := "foo/generations/0123456701234567/snapshots"; got != want { - t.Fatalf("SnapshotsPath()=%v, want %v", got, want) - } - }) - t.Run("ErrNoGeneration", func(t *testing.T) { - if _, err := litestream.SnapshotsPath("foo", ""); err == nil || err.Error() != `generation required` { - t.Fatalf("unexpected error: %v", err) - } - }) -} - -func TestSnapshotPath(t *testing.T) { - t.Run("OK", func(t *testing.T) { - if got, err := litestream.SnapshotPath("foo", "0123456701234567", 1000); err != nil { - t.Fatal(err) - } else if want := "foo/generations/0123456701234567/snapshots/000003e8.snapshot.lz4"; got != want { - t.Fatalf("SnapshotPath()=%v, want %v", got, want) - } - }) - t.Run("ErrNoGeneration", func(t *testing.T) { - if _, err := litestream.SnapshotPath("foo", "", 1000); err == nil || err.Error() != `generation required` { - t.Fatalf("unexpected error: %v", err) - } - }) -} - -func TestWALPath(t *testing.T) { - t.Run("OK", func(t *testing.T) { - if got, err := litestream.WALPath("foo", "0123456701234567"); err != nil { - t.Fatal(err) - } else if want := "foo/generations/0123456701234567/wal"; got != want { - t.Fatalf("WALPath()=%v, want %v", got, want) - } - }) - t.Run("ErrNoGeneration", func(t *testing.T) { - if _, err := litestream.WALPath("foo", ""); err == nil || err.Error() != `generation required` { - t.Fatalf("unexpected error: %v", err) - } - }) -} - -func TestWALSegmentPath(t *testing.T) { - t.Run("OK", func(t *testing.T) { - if got, err := litestream.WALSegmentPath("foo", "0123456701234567", 1000, 1001); err != nil { - t.Fatal(err) - } else if want := "foo/generations/0123456701234567/wal/000003e8_000003e9.wal.lz4"; got != want { - t.Fatalf("WALPath()=%v, want %v", got, want) - } - }) - t.Run("ErrNoGeneration", func(t *testing.T) { - if _, err := litestream.WALSegmentPath("foo", "", 1000, 0); err == nil || err.Error() != `generation required` { - t.Fatalf("unexpected error: %v", err) - } - }) -} - func MustDecodeHexString(s string) []byte { b, err := hex.DecodeString(s) if err != nil { diff --git a/replica_client_test.go b/replica_client_test.go index 69f9746a..ec2d8411 100644 --- a/replica_client_test.go +++ b/replica_client_test.go @@ -177,7 +177,7 @@ func TestReplicaClient_Snapshots(t *testing.T) { if err == nil { err = itr.Close() } - if err == nil || err.Error() != `cannot determine snapshots path: generation required` { + if err == nil || err.Error() != `generation required` { t.Fatalf("unexpected error: %v", err) } }) @@ -204,7 +204,7 @@ func TestReplicaClient_WriteSnapshot(t *testing.T) { RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { t.Parallel() - if _, err := c.WriteSnapshot(context.Background(), "", 0, nil); err == nil || err.Error() != `cannot determine snapshot path: generation required` { + if _, err := c.WriteSnapshot(context.Background(), "", 0, nil); err == nil || err.Error() != `generation required` { t.Fatalf("unexpected error: %v", err) } }) @@ -242,13 +242,13 @@ func TestReplicaClient_SnapshotReader(t *testing.T) { RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { t.Parallel() - if _, err := c.SnapshotReader(context.Background(), "", 1); err == nil || err.Error() != `cannot determine snapshot path: generation required` { + if _, err := c.SnapshotReader(context.Background(), "", 1); err == nil || err.Error() != `generation required` { t.Fatalf("unexpected error: %v", err) } }) } -func TestReplicaClient_WALs(t *testing.T) { +func TestReplicaClient_WALSegments(t *testing.T) { RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { t.Parallel() @@ -362,7 +362,7 @@ func TestReplicaClient_WALs(t *testing.T) { if err == nil { err = itr.Close() } - if err == nil || err.Error() != `cannot determine wal path: generation required` { + if err == nil || err.Error() != `generation required` { t.Fatalf("unexpected error: %v", err) } }) @@ -389,13 +389,13 @@ func TestReplicaClient_WriteWALSegment(t *testing.T) { RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { t.Parallel() - if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "", Index: 0, Offset: 0}, nil); err == nil || err.Error() != `cannot determine wal segment path: generation required` { + if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "", Index: 0, Offset: 0}, nil); err == nil || err.Error() != `generation required` { t.Fatalf("unexpected error: %v", err) } }) } -func TestReplicaClient_WALReader(t *testing.T) { +func TestReplicaClient_WALSegmentReader(t *testing.T) { RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { t.Parallel() @@ -451,7 +451,7 @@ func TestReplicaClient_DeleteWALSegments(t *testing.T) { RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { t.Parallel() - if err := c.DeleteWALSegments(context.Background(), []litestream.Pos{{}}); err == nil || err.Error() != `cannot determine wal segment path: generation required` { + if err := c.DeleteWALSegments(context.Background(), []litestream.Pos{{}}); err == nil || err.Error() != `generation required` { t.Fatalf("unexpected error: %v", err) } }) diff --git a/s3/replica_client.go b/s3/replica_client.go index b68628a1..a739a5f5 100644 --- a/s3/replica_client.go +++ b/s3/replica_client.go @@ -10,6 +10,7 @@ import ( "os" "path" "regexp" + "strings" "sync" "time" @@ -154,7 +155,7 @@ func (c *ReplicaClient) Generations(ctx context.Context) ([]string, error) { var generations []string if err := c.s3.ListObjectsPagesWithContext(ctx, &s3.ListObjectsInput{ Bucket: aws.String(c.Bucket), - Prefix: aws.String(litestream.GenerationsPath(c.Path) + "/"), + Prefix: aws.String(path.Join(c.Path, "generations") + "/"), Delimiter: aws.String("/"), }, func(page *s3.ListObjectsOutput, lastPage bool) bool { internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc() @@ -178,18 +179,15 @@ func (c *ReplicaClient) Generations(ctx context.Context) ([]string, error) { func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string) error { if err := c.Init(ctx); err != nil { return err - } - - dir, err := litestream.GenerationPath(c.Path, generation) - if err != nil { - return fmt.Errorf("cannot determine generation path: %w", err) + } else if generation == "" { + return fmt.Errorf("generation required") } // Collect all files for the generation. var objIDs []*s3.ObjectIdentifier if err := c.s3.ListObjectsPagesWithContext(ctx, &s3.ListObjectsInput{ Bucket: aws.String(c.Bucket), - Prefix: aws.String(dir), + Prefix: aws.String(path.Join(c.Path, "generations", generation)), }, func(page *s3.ListObjectsOutput, lastPage bool) bool { internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc() @@ -236,12 +234,11 @@ func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (lites func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, index int, rd io.Reader) (info litestream.SnapshotInfo, err error) { if err := c.Init(ctx); err != nil { return info, err + } else if generation == "" { + return info, fmt.Errorf("generation required") } - key, err := litestream.SnapshotPath(c.Path, generation, index) - if err != nil { - return info, fmt.Errorf("cannot determine snapshot path: %w", err) - } + key := path.Join(c.Path, "generations", generation, "snapshots", litestream.FormatIndex(index)+".snapshot.lz4") startTime := time.Now() rc := internal.NewReadCounter(rd) @@ -270,12 +267,11 @@ func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, in func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error) { if err := c.Init(ctx); err != nil { return nil, err + } else if generation == "" { + return nil, fmt.Errorf("generation required") } - key, err := litestream.SnapshotPath(c.Path, generation, index) - if err != nil { - return nil, fmt.Errorf("cannot determine snapshot path: %w", err) - } + key := path.Join(c.Path, "generations", generation, "snapshots", litestream.FormatIndex(index)+".snapshot.lz4") out, err := c.s3.GetObjectWithContext(ctx, &s3.GetObjectInput{ Bucket: aws.String(c.Bucket), @@ -296,12 +292,11 @@ func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, i func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, index int) error { if err := c.Init(ctx); err != nil { return err + } else if generation == "" { + return fmt.Errorf("generation required") } - key, err := litestream.SnapshotPath(c.Path, generation, index) - if err != nil { - return fmt.Errorf("cannot determine snapshot path: %w", err) - } + key := path.Join(c.Path, "generations", generation, "snapshots", litestream.FormatIndex(index)+".snapshot.lz4") if _, err := c.s3.DeleteObjectsWithContext(ctx, &s3.DeleteObjectsInput{ Bucket: aws.String(c.Bucket), @@ -326,12 +321,11 @@ func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (lit func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, rd io.Reader) (info litestream.WALSegmentInfo, err error) { if err := c.Init(ctx); err != nil { return info, err + } else if pos.Generation == "" { + return info, fmt.Errorf("generation required") } - key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset) - if err != nil { - return info, fmt.Errorf("cannot determine wal segment path: %w", err) - } + key := path.Join(c.Path, "generations", pos.Generation, "wal", litestream.FormatIndex(pos.Index), litestream.FormatOffset(pos.Offset)+".wal.lz4") startTime := time.Now() rc := internal.NewReadCounter(rd) @@ -360,12 +354,11 @@ func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, func (c *ReplicaClient) WALSegmentReader(ctx context.Context, pos litestream.Pos) (io.ReadCloser, error) { if err := c.Init(ctx); err != nil { return nil, err + } else if pos.Generation == "" { + return nil, fmt.Errorf("generation required") } - key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset) - if err != nil { - return nil, fmt.Errorf("cannot determine wal segment path: %w", err) - } + key := path.Join(c.Path, "generations", pos.Generation, "wal", litestream.FormatIndex(pos.Index), litestream.FormatOffset(pos.Offset)+".wal.lz4") out, err := c.s3.GetObjectWithContext(ctx, &s3.GetObjectInput{ Bucket: aws.String(c.Bucket), @@ -397,10 +390,10 @@ func (c *ReplicaClient) DeleteWALSegments(ctx context.Context, a []litestream.Po // Generate a batch of object IDs for deleting the WAL segments. for i, pos := range a[:n] { - key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset) - if err != nil { - return fmt.Errorf("cannot determine wal segment path: %w", err) + if pos.Generation == "" { + return fmt.Errorf("generation required") } + key := path.Join(c.Path, "generations", pos.Generation, "wal", litestream.FormatIndex(pos.Index), litestream.FormatOffset(pos.Offset)+".wal.lz4") objIDs[i] = &s3.ObjectIdentifier{Key: &key} } @@ -498,11 +491,12 @@ func newSnapshotIterator(ctx context.Context, client *ReplicaClient, generation func (itr *snapshotIterator) fetch() error { defer close(itr.ch) - dir, err := litestream.SnapshotsPath(itr.client.Path, itr.generation) - if err != nil { - return fmt.Errorf("cannot determine snapshots path: %w", err) + if itr.generation == "" { + return fmt.Errorf("generation required") } + dir := path.Join(itr.client.Path, "generations", itr.generation, "snapshots") + return itr.client.s3.ListObjectsPagesWithContext(itr.ctx, &s3.ListObjectsInput{ Bucket: aws.String(itr.client.Bucket), Prefix: aws.String(dir + "/"), @@ -511,8 +505,7 @@ func (itr *snapshotIterator) fetch() error { internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc() for _, obj := range page.Contents { - key := path.Base(*obj.Key) - index, err := litestream.ParseSnapshotPath(key) + index, err := internal.ParseSnapshotPath(path.Base(*obj.Key)) if err != nil { continue } @@ -601,21 +594,20 @@ func newWALSegmentIterator(ctx context.Context, client *ReplicaClient, generatio func (itr *walSegmentIterator) fetch() error { defer close(itr.ch) - dir, err := litestream.WALPath(itr.client.Path, itr.generation) - if err != nil { - return fmt.Errorf("cannot determine wal path: %w", err) + if itr.generation == "" { + return fmt.Errorf("generation required") } + prefix := path.Join(itr.client.Path, "generations", itr.generation, "wal") + "/" + return itr.client.s3.ListObjectsPagesWithContext(itr.ctx, &s3.ListObjectsInput{ - Bucket: aws.String(itr.client.Bucket), - Prefix: aws.String(dir + "/"), - Delimiter: aws.String("/"), + Bucket: aws.String(itr.client.Bucket), + Prefix: aws.String(prefix), }, func(page *s3.ListObjectsOutput, lastPage bool) bool { internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc() for _, obj := range page.Contents { - key := path.Base(*obj.Key) - index, offset, err := litestream.ParseWALSegmentPath(key) + index, offset, err := internal.ParseWALSegmentPath(strings.TrimPrefix(*obj.Key, prefix)) if err != nil { continue } diff --git a/sftp/replica_client.go b/sftp/replica_client.go index 6b082b4a..30d8fa87 100644 --- a/sftp/replica_client.go +++ b/sftp/replica_client.go @@ -9,6 +9,7 @@ import ( "os" "path" "sort" + "strings" "sync" "time" @@ -121,7 +122,7 @@ func (c *ReplicaClient) Generations(ctx context.Context) (_ []string, err error) return nil, err } - fis, err := sftpClient.ReadDir(litestream.GenerationsPath(c.Path)) + fis, err := sftpClient.ReadDir(path.Join(c.Path, "generations")) if os.IsNotExist(err) { return nil, nil } else if err != nil { @@ -153,12 +154,11 @@ func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string) sftpClient, err := c.Init(ctx) if err != nil { return err + } else if generation == "" { + return fmt.Errorf("generation required") } - dir, err := litestream.GenerationPath(c.Path, generation) - if err != nil { - return fmt.Errorf("cannot determine generation path: %w", err) - } + dir := path.Join(c.Path, "generations", generation) var dirs []string walker := sftpClient.Walk(dir) @@ -198,12 +198,11 @@ func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (_ lit sftpClient, err := c.Init(ctx) if err != nil { return nil, err + } else if generation == "" { + return nil, fmt.Errorf("generation required") } - dir, err := litestream.SnapshotsPath(c.Path, generation) - if err != nil { - return nil, fmt.Errorf("cannot determine snapshots path: %w", err) - } + dir := path.Join(c.Path, "generations", generation, "snapshots") fis, err := sftpClient.ReadDir(dir) if os.IsNotExist(err) { @@ -216,7 +215,7 @@ func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (_ lit infos := make([]litestream.SnapshotInfo, 0, len(fis)) for _, fi := range fis { // Parse index from filename. - index, err := litestream.ParseSnapshotPath(path.Base(fi.Name())) + index, err := internal.ParseSnapshotPath(path.Base(fi.Name())) if err != nil { continue } @@ -241,12 +240,11 @@ func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, in sftpClient, err := c.Init(ctx) if err != nil { return info, err + } else if generation == "" { + return info, fmt.Errorf("generation required") } - filename, err := litestream.SnapshotPath(c.Path, generation, index) - if err != nil { - return info, fmt.Errorf("cannot determine snapshot path: %w", err) - } + filename := path.Join(c.Path, "generations", generation, "snapshots", litestream.FormatIndex(index)+".snapshot.lz4") startTime := time.Now() if err := sftpClient.MkdirAll(path.Dir(filename)); err != nil { @@ -286,12 +284,11 @@ func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, i sftpClient, err := c.Init(ctx) if err != nil { return nil, err + } else if generation == "" { + return nil, fmt.Errorf("generation required") } - filename, err := litestream.SnapshotPath(c.Path, generation, index) - if err != nil { - return nil, fmt.Errorf("cannot determine snapshot path: %w", err) - } + filename := path.Join(c.Path, "generations", generation, "snapshots", litestream.FormatIndex(index)+".snapshot.lz4") f, err := sftpClient.Open(filename) if err != nil { @@ -310,12 +307,11 @@ func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, i sftpClient, err := c.Init(ctx) if err != nil { return err + } else if generation == "" { + return fmt.Errorf("generation required") } - filename, err := litestream.SnapshotPath(c.Path, generation, index) - if err != nil { - return fmt.Errorf("cannot determine snapshot path: %w", err) - } + filename := path.Join(c.Path, "generations", generation, "snapshots", litestream.FormatIndex(index)+".snapshot.lz4") if err := sftpClient.Remove(filename); err != nil && !os.IsNotExist(err) { return fmt.Errorf("cannot delete snapshot %q: %w", filename, err) @@ -332,12 +328,11 @@ func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (_ l sftpClient, err := c.Init(ctx) if err != nil { return nil, err + } else if generation == "" { + return nil, fmt.Errorf("generation required") } - dir, err := litestream.WALPath(c.Path, generation) - if err != nil { - return nil, fmt.Errorf("cannot determine wal path: %w", err) - } + dir := path.Join(c.Path, "generations", generation, "wal") fis, err := sftpClient.ReadDir(dir) if os.IsNotExist(err) { @@ -347,25 +342,18 @@ func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (_ l } // Iterate over every file and convert to metadata. - infos := make([]litestream.WALSegmentInfo, 0, len(fis)) + indexes := make([]int, 0, len(fis)) for _, fi := range fis { - index, offset, err := litestream.ParseWALSegmentPath(path.Base(fi.Name())) - if err != nil { + index, err := litestream.ParseIndex(fi.Name()) + if err != nil || !fi.IsDir() { continue } - - infos = append(infos, litestream.WALSegmentInfo{ - Generation: generation, - Index: index, - Offset: offset, - Size: fi.Size(), - CreatedAt: fi.ModTime().UTC(), - }) + indexes = append(indexes, index) } - sort.Sort(litestream.WALSegmentInfoSlice(infos)) + sort.Ints(indexes) - return litestream.NewWALSegmentInfoSliceIterator(infos), nil + return newWALSegmentIterator(ctx, c, dir, generation, indexes), nil } // WriteWALSegment writes LZ4 compressed data from rd into a file on disk. @@ -375,12 +363,11 @@ func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, sftpClient, err := c.Init(ctx) if err != nil { return info, err + } else if pos.Generation == "" { + return info, fmt.Errorf("generation required") } - filename, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset) - if err != nil { - return info, fmt.Errorf("cannot determine wal segment path: %w", err) - } + filename := path.Join(c.Path, "generations", pos.Generation, "wal", litestream.FormatIndex(pos.Index), litestream.FormatOffset(pos.Offset)+".wal.lz4") startTime := time.Now() if err := sftpClient.MkdirAll(path.Dir(filename)); err != nil { @@ -420,12 +407,11 @@ func (c *ReplicaClient) WALSegmentReader(ctx context.Context, pos litestream.Pos sftpClient, err := c.Init(ctx) if err != nil { return nil, err + } else if pos.Generation == "" { + return nil, fmt.Errorf("generation required") } - filename, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset) - if err != nil { - return nil, fmt.Errorf("cannot determine wal segment path: %w", err) - } + filename := path.Join(c.Path, "generations", pos.Generation, "wal", litestream.FormatIndex(pos.Index), litestream.FormatOffset(pos.Offset)+".wal.lz4") f, err := sftpClient.Open(filename) if err != nil { @@ -447,11 +433,12 @@ func (c *ReplicaClient) DeleteWALSegments(ctx context.Context, a []litestream.Po } for _, pos := range a { - filename, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset) - if err != nil { - return fmt.Errorf("cannot determine wal segment path: %w", err) + if pos.Generation == "" { + return fmt.Errorf("generation required") } + filename := path.Join(c.Path, "generations", pos.Generation, "wal", litestream.FormatIndex(pos.Index), litestream.FormatOffset(pos.Offset)+".wal.lz4") + if err := sftpClient.Remove(filename); err != nil && !os.IsNotExist(err) { return fmt.Errorf("cannot delete wal segment %q: %w", filename, err) } @@ -470,7 +457,7 @@ func (c *ReplicaClient) Cleanup(ctx context.Context) (err error) { return err } - if err := sftpClient.RemoveDirectory(litestream.GenerationsPath(c.Path)); err != nil && !os.IsNotExist(err) { + if err := sftpClient.RemoveDirectory(path.Join(c.Path, "generations")); err != nil && !os.IsNotExist(err) { return fmt.Errorf("cannot delete generations path: %w", err) } else if err := sftpClient.RemoveDirectory(c.Path); err != nil && !os.IsNotExist(err) { return fmt.Errorf("cannot delete path: %w", err) @@ -493,3 +480,101 @@ func (c *ReplicaClient) resetOnConnError(err error) { c.sshClient = nil } } + +type walSegmentIterator struct { + ctx context.Context + client *ReplicaClient + dir string + generation string + indexes []int + + infos []litestream.WALSegmentInfo + err error +} + +func newWALSegmentIterator(ctx context.Context, client *ReplicaClient, dir, generation string, indexes []int) *walSegmentIterator { + return &walSegmentIterator{ + ctx: ctx, + client: client, + dir: dir, + generation: generation, + indexes: indexes, + } +} + +func (itr *walSegmentIterator) Close() (err error) { + return itr.err +} + +func (itr *walSegmentIterator) Next() bool { + sftpClient, err := itr.client.Init(itr.ctx) + if err != nil { + itr.err = err + return false + } + + // Exit if an error has already occurred. + if itr.err != nil { + return false + } + + for { + // Move to the next segment in cache, if available. + if len(itr.infos) > 1 { + itr.infos = itr.infos[1:] + return true + } + itr.infos = itr.infos[:0] // otherwise clear infos + + // Move to the next index unless this is the first time initializing. + if itr.infos != nil && len(itr.indexes) > 0 { + itr.indexes = itr.indexes[1:] + } + + // If no indexes remain, stop iteration. + if len(itr.indexes) == 0 { + return false + } + + // Read segments into a cache for the current index. + index := itr.indexes[0] + fis, err := sftpClient.ReadDir(path.Join(itr.dir, litestream.FormatIndex(index))) + if err != nil { + itr.err = err + return false + } + + for _, fi := range fis { + filename := path.Base(fi.Name()) + if fi.IsDir() { + continue + } + + offset, err := litestream.ParseOffset(strings.TrimSuffix(filename, ".wal.lz4")) + if err != nil { + continue + } + + itr.infos = append(itr.infos, litestream.WALSegmentInfo{ + Generation: itr.generation, + Index: index, + Offset: offset, + Size: fi.Size(), + CreatedAt: fi.ModTime().UTC(), + }) + } + + if len(itr.infos) > 0 { + return true + } + } +} + +func (itr *walSegmentIterator) Err() error { return itr.err } + +func (itr *walSegmentIterator) WALSegment() litestream.WALSegmentInfo { + if len(itr.infos) == 0 { + return litestream.WALSegmentInfo{} + } + return itr.infos[0] +} From 77274abf81fc878fff4445c6306b9da3354b6960 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Thu, 22 Jul 2021 16:03:29 -0600 Subject: [PATCH 02/95] Refactor shadow WAL to use segments --- cmd/litestream/main.go | 6 +- cmd/litestream/main_notwindows.go | 7 +- cmd/litestream/main_test.go | 6 + cmd/litestream/main_windows.go | 7 +- cmd/litestream/replicate.go | 14 - cmd/litestream/replicate_test.go | 135 ++++ db.go | 989 ++++++++++++++++++------------ db_test.go | 303 +++------ file/replica_client.go | 13 +- file/replica_client_test.go | 88 --- internal/internal.go | 33 + litestream.go | 28 + replica.go | 201 +++--- replica_test.go | 23 +- 14 files changed, 1021 insertions(+), 832 deletions(-) create mode 100644 cmd/litestream/replicate_test.go diff --git a/cmd/litestream/main.go b/cmd/litestream/main.go index d186f61f..783f73e6 100644 --- a/cmd/litestream/main.go +++ b/cmd/litestream/main.go @@ -9,6 +9,7 @@ import ( "log" "net/url" "os" + "os/signal" "os/user" "path" "path/filepath" @@ -86,7 +87,8 @@ func (m *Main) Run(ctx context.Context, args []string) (err error) { // Setup signal handler. ctx, cancel := context.WithCancel(ctx) - signalCh := signalChan() + signalCh := make(chan os.Signal, 1) + signal.Notify(signalCh, notifySignals...) if err := c.Run(ctx); err != nil { return err @@ -94,6 +96,8 @@ func (m *Main) Run(ctx context.Context, args []string) (err error) { // Wait for signal to stop program. select { + case <-ctx.Done(): + fmt.Println("context done, litestream shutting down") case err = <-c.execCh: cancel() fmt.Println("subprocess exited, litestream shutting down") diff --git a/cmd/litestream/main_notwindows.go b/cmd/litestream/main_notwindows.go index aaf87a10..6d4dcef9 100644 --- a/cmd/litestream/main_notwindows.go +++ b/cmd/litestream/main_notwindows.go @@ -5,7 +5,6 @@ package main import ( "context" "os" - "os/signal" "syscall" ) @@ -19,8 +18,4 @@ func runWindowsService(ctx context.Context) error { panic("cannot run windows service as unix process") } -func signalChan() <-chan os.Signal { - ch := make(chan os.Signal, 2) - signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM) - return ch -} +var notifySignals = []os.Signal{syscall.SIGINT, syscall.SIGTERM} diff --git a/cmd/litestream/main_test.go b/cmd/litestream/main_test.go index d99c52df..75131e4b 100644 --- a/cmd/litestream/main_test.go +++ b/cmd/litestream/main_test.go @@ -2,16 +2,22 @@ package main_test import ( "io/ioutil" + "log" "os" "path/filepath" "testing" + "github.com/benbjohnson/litestream" main "github.com/benbjohnson/litestream/cmd/litestream" "github.com/benbjohnson/litestream/file" "github.com/benbjohnson/litestream/gcs" "github.com/benbjohnson/litestream/s3" ) +func init() { + litestream.LogFlags = log.Lmsgprefix | log.Ldate | log.Ltime | log.Lmicroseconds | log.LUTC | log.Lshortfile +} + func TestReadConfigFile(t *testing.T) { // Ensure global AWS settings are propagated down to replica configurations. t.Run("PropagateGlobalSettings", func(t *testing.T) { diff --git a/cmd/litestream/main_windows.go b/cmd/litestream/main_windows.go index a762d322..512ab263 100644 --- a/cmd/litestream/main_windows.go +++ b/cmd/litestream/main_windows.go @@ -7,7 +7,6 @@ import ( "io" "log" "os" - "os/signal" "golang.org/x/sys/windows" "golang.org/x/sys/windows/svc" @@ -105,8 +104,4 @@ func (w *eventlogWriter) Write(p []byte) (n int, err error) { return 0, elog.Info(1, string(p)) } -func signalChan() <-chan os.Signal { - ch := make(chan os.Signal, 1) - signal.Notify(ch, os.Interrupt) - return ch -} +var notifySignals = []os.Signal{os.Interrupt} diff --git a/cmd/litestream/replicate.go b/cmd/litestream/replicate.go index 7c0403bf..fdaebd2f 100644 --- a/cmd/litestream/replicate.go +++ b/cmd/litestream/replicate.go @@ -42,7 +42,6 @@ func NewReplicateCommand() *ReplicateCommand { func (c *ReplicateCommand) ParseFlags(ctx context.Context, args []string) (err error) { fs := flag.NewFlagSet("litestream-replicate", flag.ContinueOnError) execFlag := fs.String("exec", "", "execute subcommand") - tracePath := fs.String("trace", "", "trace path") configPath, noExpandEnv := registerConfigFlag(fs) fs.Usage = c.Usage if err := fs.Parse(args); err != nil { @@ -80,16 +79,6 @@ func (c *ReplicateCommand) ParseFlags(ctx context.Context, args []string) (err e c.Config.Exec = *execFlag } - // Enable trace logging. - if *tracePath != "" { - f, err := os.Create(*tracePath) - if err != nil { - return err - } - defer f.Close() - litestream.Tracef = log.New(f, "", log.LstdFlags|log.Lmicroseconds|log.LUTC|log.Lshortfile).Printf - } - return nil } @@ -215,8 +204,5 @@ Arguments: -no-expand-env Disables environment variable expansion in configuration file. - -trace PATH - Write verbose trace logging to PATH. - `[1:], DefaultConfigPath()) } diff --git a/cmd/litestream/replicate_test.go b/cmd/litestream/replicate_test.go new file mode 100644 index 00000000..47085808 --- /dev/null +++ b/cmd/litestream/replicate_test.go @@ -0,0 +1,135 @@ +package main_test + +import ( + "context" + "database/sql" + "errors" + "fmt" + "hash/crc64" + "io" + "os" + "path/filepath" + "runtime" + "testing" + "time" + + main "github.com/benbjohnson/litestream/cmd/litestream" + "golang.org/x/sync/errgroup" +) + +func TestReplicateCommand(t *testing.T) { + if testing.Short() { + t.Skip("long running test, skipping") + } else if runtime.GOOS != "linux" { + t.Skip("must run system tests on Linux, skipping") + } + + const writeTime = 10 * time.Second + + dir := t.TempDir() + configPath := filepath.Join(dir, "litestream.yml") + dbPath := filepath.Join(dir, "db") + restorePath := filepath.Join(dir, "restored") + replicaPath := filepath.Join(dir, "replica") + + if err := os.WriteFile(configPath, []byte(` +dbs: + - path: `+dbPath+` + replicas: + - path: `+replicaPath+` +`), 0666); err != nil { + t.Fatal(err) + } + + // Generate data into SQLite database from separate goroutine. + g, ctx := errgroup.WithContext(context.Background()) + mainctx, cancel := context.WithCancel(ctx) + g.Go(func() error { + defer cancel() + + db, err := sql.Open("sqlite3", dbPath) + if err != nil { + return err + } + defer db.Close() + + if _, err := db.ExecContext(ctx, `PRAGMA journal_mode = WAL`); err != nil { + return fmt.Errorf("cannot enable wal: %w", err) + } else if _, err := db.ExecContext(ctx, `PRAGMA synchronous = NORMAL`); err != nil { + return fmt.Errorf("cannot enable wal: %w", err) + } else if _, err := db.ExecContext(ctx, `CREATE TABLE t (id INTEGER PRIMARY KEY)`); err != nil { + return fmt.Errorf("cannot create table: %w", err) + } + + ticker := time.NewTicker(1 * time.Millisecond) + defer ticker.Stop() + timer := time.NewTimer(writeTime) + defer timer.Stop() + + for i := 0; ; i++ { + select { + case <-ctx.Done(): + return ctx.Err() + case <-timer.C: + return nil + case <-ticker.C: + if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (?);`, i); err != nil { + return fmt.Errorf("cannot insert: i=%d err=%w", i, err) + } + } + } + }) + + // Replicate database unless the context is canceled. + g.Go(func() error { + return main.NewMain().Run(mainctx, []string{"replicate", "-config", configPath}) + }) + + if err := g.Wait(); err != nil { + t.Fatal(err) + } + + // Checkpoint database. + mustCheckpoint(t, dbPath) + chksum0 := mustChecksum(t, dbPath) + + // Restore to another path. + if err := main.NewMain().Run(context.Background(), []string{"restore", "-config", configPath, "-o", restorePath, dbPath}); err != nil && !errors.Is(err, context.Canceled) { + t.Fatal(err) + } + + // Verify contents match. + if chksum1 := mustChecksum(t, restorePath); chksum0 != chksum1 { + t.Fatal("restore mismatch") + } +} + +func mustCheckpoint(tb testing.TB, path string) { + tb.Helper() + + db, err := sql.Open("sqlite3", path) + if err != nil { + tb.Fatal(err) + } + defer db.Close() + + if _, err := db.Exec(`PRAGMA wal_checkpoint(TRUNCATE)`); err != nil { + tb.Fatal(err) + } +} + +func mustChecksum(tb testing.TB, path string) uint64 { + tb.Helper() + + f, err := os.Open(path) + if err != nil { + tb.Fatal(err) + } + defer f.Close() + + h := crc64.New(crc64.MakeTable(crc64.ISO)) + if _, err := io.Copy(h, f); err != nil { + tb.Fatal(err) + } + return h.Sum64() +} diff --git a/db.go b/db.go index 682abecb..2a421ad6 100644 --- a/db.go +++ b/db.go @@ -17,12 +17,14 @@ import ( "os" "path/filepath" "regexp" + "sort" "strconv" "strings" "sync" "time" "github.com/benbjohnson/litestream/internal" + "github.com/pierrec/lz4/v4" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" ) @@ -49,9 +51,17 @@ type DB struct { db *sql.DB // target database f *os.File // long-running db file descriptor rtx *sql.Tx // long running read transaction + pos Pos // cached position pageSize int // page size, in bytes notify chan struct{} // closes on WAL change + // Cached salt & checksum from current shadow header. + hdr []byte + frame []byte + salt0, salt1 uint32 + chksum0, chksum1 uint32 + byteOrder binary.ByteOrder + fileInfo os.FileInfo // db info cached during init dirInfo os.FileInfo // parent dir info cached during init @@ -96,6 +106,8 @@ type DB struct { // List of replicas for the database. // Must be set before calling Open(). Replicas []*Replica + + Logger *log.Logger } // NewDB returns a new instance of DB for a given path. @@ -108,6 +120,8 @@ func NewDB(path string) *DB { MaxCheckpointPageN: DefaultMaxCheckpointPageN, CheckpointInterval: DefaultCheckpointInterval, MonitorInterval: DefaultMonitorInterval, + + Logger: log.New(LogWriter, fmt.Sprintf("%s: ", path), LogFlags), } db.dbSizeGauge = dbSizeGaugeVec.WithLabelValues(db.path) @@ -145,7 +159,7 @@ func (db *DB) WALPath() string { // MetaPath returns the path to the database metadata. func (db *DB) MetaPath() string { dir, file := filepath.Split(db.path) - return filepath.Join(dir, "."+file+MetaDirSuffix) + return filepath.Join(dir, file+MetaDirSuffix) } // GenerationNamePath returns the path of the name of the current generation. @@ -166,44 +180,6 @@ func (db *DB) ShadowWALDir(generation string) string { return filepath.Join(db.GenerationPath(generation), "wal") } -// ShadowWALPath returns the path of a single shadow WAL file. -// Panics if generation is blank or index is negative. -func (db *DB) ShadowWALPath(generation string, index int) string { - assert(index >= 0, "shadow wal index cannot be negative") - return filepath.Join(db.ShadowWALDir(generation), FormatIndex(index)+".wal") -} - -// CurrentShadowWALPath returns the path to the last shadow WAL in a generation. -func (db *DB) CurrentShadowWALPath(generation string) (string, error) { - index, _, err := db.CurrentShadowWALIndex(generation) - if err != nil { - return "", err - } - return db.ShadowWALPath(generation, index), nil -} - -// CurrentShadowWALIndex returns the current WAL index & total size. -func (db *DB) CurrentShadowWALIndex(generation string) (index int, size int64, err error) { - fis, err := ioutil.ReadDir(filepath.Join(db.GenerationPath(generation), "wal")) - if os.IsNotExist(err) { - return 0, 0, nil // no wal files written for generation - } else if err != nil { - return 0, 0, err - } - - // Find highest wal index. - for _, fi := range fis { - if v, err := parseWALPath(fi.Name()); err != nil { - continue // invalid filename - } else if v > index { - index = v - } - - size += fi.Size() - } - return index, size, nil -} - // FileInfo returns the cached file stats for the database file when it was initialized. func (db *DB) FileInfo() os.FileInfo { return db.fileInfo @@ -224,28 +200,212 @@ func (db *DB) Replica(name string) *Replica { return nil } -// Pos returns the current position of the database. -func (db *DB) Pos() (Pos, error) { +// Pos returns the cached position of the database. +// Returns a zero position if no position has been calculated or if there is no generation. +func (db *DB) Pos() Pos { + db.mu.RLock() + defer db.mu.RUnlock() + return db.pos +} + +// reset clears all cached data. +func (db *DB) reset() { + db.pos = Pos{} + db.hdr, db.frame = nil, nil + db.salt0, db.salt1 = 0, 0 + db.chksum0, db.chksum1 = 0, 0 + db.byteOrder = nil +} + +// invalidate refreshes cached position, salt, & checksum from on-disk data. +func (db *DB) invalidate(ctx context.Context) (err error) { + // Clear cached data before starting. + db.reset() + + // If any error occurs, ensure all cached data is cleared. + defer func() { + if err != nil { + db.reset() + } + }() + + // Determine the last position of the current generation. + if err := db.invalidatePos(ctx); err != nil { + return fmt.Errorf("cannot determine pos: %w", err) + } else if db.pos.IsZero() { + db.Logger.Printf("init: no wal files available, clearing generation") + if err := db.clearGeneration(ctx); err != nil { + return fmt.Errorf("clear generation: %w", err) + } + return nil // no position, exit + } + + // Determine salt & last checksum. + if err := db.invalidateChecksum(ctx); err != nil { + return fmt.Errorf("cannot determine last salt/checksum: %w", err) + } + return nil +} + +func (db *DB) invalidatePos(ctx context.Context) error { + // Determine generation based off "generation" file in meta directory. generation, err := db.CurrentGeneration() if err != nil { - return Pos{}, err + return err } else if generation == "" { - return Pos{}, nil + return nil } - index, _, err := db.CurrentShadowWALIndex(generation) + // Iterate over all segments to find the last one. + itr, err := db.WALSegments(context.Background(), generation) if err != nil { - return Pos{}, err + return err } + defer itr.Close() - fi, err := os.Stat(db.ShadowWALPath(generation, index)) - if os.IsNotExist(err) { - return Pos{Generation: generation, Index: index}, nil - } else if err != nil { - return Pos{}, err + var pos Pos + for itr.Next() { + info := itr.WALSegment() + pos = info.Pos() + } + if err := itr.Close(); err != nil { + return err + } + + // Exit if no WAL segments exist. + if pos.IsZero() { + return nil } - return Pos{Generation: generation, Index: index, Offset: frameAlign(fi.Size(), db.pageSize)}, nil + // Read size of last segment to determine ending position. + rd, err := db.WALSegmentReader(ctx, pos) + if err != nil { + return fmt.Errorf("cannot read last wal segment: %w", err) + } + defer rd.Close() + + n, err := io.Copy(ioutil.Discard, lz4.NewReader(rd)) + if err != nil { + return err + } + pos.Offset += n + + // Save position to cache. + db.pos = pos + + return nil +} + +func (db *DB) invalidateChecksum(ctx context.Context) error { + assert(!db.pos.IsZero(), "position required to invalidate checksum") + + // Read entire WAL from combined segments. + walReader, err := db.WALReader(ctx, db.pos.Generation, db.pos.Index) + if err != nil { + return fmt.Errorf("cannot read last wal: %w", err) + } + defer walReader.Close() + + // Ensure we don't read past our position. + r := &io.LimitedReader{R: walReader, N: db.pos.Offset} + + // Read header. + hdr := make([]byte, WALHeaderSize) + if _, err := io.ReadFull(r, hdr); err != nil { + return fmt.Errorf("read shadow wal header: %w", err) + } + + // Read byte order. + byteOrder, err := headerByteOrder(hdr) + if err != nil { + return err + } + + // Save salt & checksum to cache, although checksum may be overridden later. + db.salt0 = binary.BigEndian.Uint32(hdr[16:]) + db.salt1 = binary.BigEndian.Uint32(hdr[20:]) + db.chksum0 = binary.BigEndian.Uint32(hdr[24:]) + db.chksum1 = binary.BigEndian.Uint32(hdr[28:]) + db.byteOrder = byteOrder + + // Iterate over each page in the WAL and save the checksum. + frame := make([]byte, db.pageSize+WALFrameHeaderSize) + var hasFrame bool + for { + // Read next page from WAL file. + if _, err := io.ReadFull(r, frame); err == io.EOF { + break // end of WAL file + } else if err != nil { + return fmt.Errorf("read wal: %w", err) + } + + // Save frame checksum to cache. + hasFrame = true + db.chksum0 = binary.BigEndian.Uint32(frame[16:]) + db.chksum1 = binary.BigEndian.Uint32(frame[20:]) + } + + // Save last frame to cache. + if hasFrame { + db.frame = frame + } else { + db.frame = nil + } + + return nil +} + +// WALReader returns the entire uncompressed WAL file for a given index. +func (db *DB) WALReader(ctx context.Context, generation string, index int) (_ io.ReadCloser, err error) { + // If any error occurs, we need to clean up all open handles. + var rcs []io.ReadCloser + defer func() { + if err != nil { + for _, rc := range rcs { + rc.Close() + } + } + }() + + offsets, err := db.walSegmentOffsetsByIndex(generation, index) + if err != nil { + return nil, fmt.Errorf("wal segment offsets: %w", err) + } + + for _, offset := range offsets { + f, err := os.Open(filepath.Join(db.ShadowWALDir(generation), FormatIndex(index), FormatOffset(offset)+".wal.lz4")) + if err != nil { + return nil, err + } + rcs = append(rcs, internal.NewReadCloser(lz4.NewReader(f), f)) + } + + return internal.NewMultiReadCloser(rcs), nil +} + +func (db *DB) walSegmentOffsetsByIndex(generation string, index int) ([]int64, error) { + // Read files from index directory. + ents, err := os.ReadDir(filepath.Join(db.ShadowWALDir(generation), FormatIndex(index))) + if err != nil { + return nil, err + } + + var offsets []int64 + for _, ent := range ents { + if !strings.HasSuffix(ent.Name(), ".wal.lz4") { + continue + } + offset, err := ParseOffset(strings.TrimSuffix(filepath.Base(ent.Name()), ".wal.lz4")) + if err != nil { + continue + } + offsets = append(offsets, offset) + } + + // Sort before returning. + sort.Slice(offsets, func(i, j int) bool { return offsets[i] < offsets[j] }) + + return offsets, nil } // Notify returns a channel that closes when the shadow WAL changes. @@ -431,13 +591,13 @@ func (db *DB) init() (err error) { // Create a table to force writes to the WAL when empty. // There should only ever be one row with id=1. - if _, err := db.db.Exec(`CREATE TABLE IF NOT EXISTS _litestream_seq (id INTEGER PRIMARY KEY, seq INTEGER);`); err != nil { + if _, err := db.db.ExecContext(db.ctx, `CREATE TABLE IF NOT EXISTS _litestream_seq (id INTEGER PRIMARY KEY, seq INTEGER);`); err != nil { return fmt.Errorf("create _litestream_seq table: %w", err) } // Create a lock table to force write locks during sync. // The sync write transaction always rolls back so no data should be in this table. - if _, err := db.db.Exec(`CREATE TABLE IF NOT EXISTS _litestream_lock (id INTEGER);`); err != nil { + if _, err := db.db.ExecContext(db.ctx, `CREATE TABLE IF NOT EXISTS _litestream_lock (id INTEGER);`); err != nil { return fmt.Errorf("create _litestream_lock table: %w", err) } @@ -448,7 +608,7 @@ func (db *DB) init() (err error) { } // Read page size. - if err := db.db.QueryRow(`PRAGMA page_size;`).Scan(&db.pageSize); err != nil { + if err := db.db.QueryRowContext(db.ctx, `PRAGMA page_size;`).Scan(&db.pageSize); err != nil { return fmt.Errorf("read page size: %w", err) } else if db.pageSize <= 0 { return fmt.Errorf("invalid db page size: %d", db.pageSize) @@ -459,16 +619,21 @@ func (db *DB) init() (err error) { return err } + // Determine current position, if available. + if err := db.invalidate(db.ctx); err != nil { + return fmt.Errorf("invalidate: %w", err) + } + // If we have an existing shadow WAL, ensure the headers match. if err := db.verifyHeadersMatch(); err != nil { - log.Printf("%s: init: cannot determine last wal position, clearing generation; %s", db.path, err) - if err := os.Remove(db.GenerationNamePath()); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("remove generation name: %w", err) + db.Logger.Printf("init: cannot determine last wal position, clearing generation; %s", err) + if err := db.clearGeneration(db.ctx); err != nil { + return fmt.Errorf("clear generation: %w", err) } } // Clean up previous generations. - if err := db.clean(); err != nil { + if err := db.clean(db.ctx); err != nil { return fmt.Errorf("clean: %w", err) } @@ -480,52 +645,46 @@ func (db *DB) init() (err error) { return nil } -// verifyHeadersMatch returns true if the primary WAL and last shadow WAL header match. -func (db *DB) verifyHeadersMatch() error { - // Determine current generation. - generation, err := db.CurrentGeneration() - if err != nil { +func (db *DB) clearGeneration(ctx context.Context) error { + if err := os.Remove(db.GenerationNamePath()); err != nil && !os.IsNotExist(err) { return err - } else if generation == "" { - return nil } + return nil +} - // Find current generation & latest shadow WAL. - shadowWALPath, err := db.CurrentShadowWALPath(generation) - if err != nil { - return fmt.Errorf("cannot determine current shadow wal path: %w", err) +// verifyHeadersMatch returns true if the primary WAL and last shadow WAL header match. +func (db *DB) verifyHeadersMatch() error { + // Skip verification if we have no current position. + if db.pos.IsZero() { + return nil } - hdr0, err := readWALHeader(db.WALPath()) + // Read header from the real WAL file. + hdr, err := readWALHeader(db.WALPath()) if os.IsNotExist(err) { return fmt.Errorf("no primary wal: %w", err) } else if err != nil { return fmt.Errorf("primary wal header: %w", err) } - hdr1, err := readWALHeader(shadowWALPath) - if os.IsNotExist(err) { - return fmt.Errorf("no shadow wal") - } else if err != nil { - return fmt.Errorf("shadow wal header: %w", err) - } - - if !bytes.Equal(hdr0, hdr1) { - return fmt.Errorf("wal header mismatch %x <> %x on %s", hdr0, hdr1, shadowWALPath) + // Compare real WAL header with shadow WAL header. + // If there is a mismatch then the real WAL has been restarted outside Litestream. + if !bytes.Equal(hdr, db.hdr) { + return fmt.Errorf("wal header mismatch at %s", db.pos.Truncate()) } return nil } // clean removes old generations & WAL files. -func (db *DB) clean() error { - if err := db.cleanGenerations(); err != nil { +func (db *DB) clean(ctx context.Context) error { + if err := db.cleanGenerations(ctx); err != nil { return err } - return db.cleanWAL() + return db.cleanWAL(ctx) } // cleanGenerations removes old generations. -func (db *DB) cleanGenerations() error { +func (db *DB) cleanGenerations(ctx context.Context) error { generation, err := db.CurrentGeneration() if err != nil { return err @@ -553,46 +712,50 @@ func (db *DB) cleanGenerations() error { } // cleanWAL removes WAL files that have been replicated. -func (db *DB) cleanWAL() error { +func (db *DB) cleanWAL(ctx context.Context) error { generation, err := db.CurrentGeneration() if err != nil { - return err + return fmt.Errorf("current generation: %w", err) } // Determine lowest index that's been replicated to all replicas. - min := -1 + minIndex := -1 for _, r := range db.Replicas { - pos := r.Pos() + pos := r.Pos().Truncate() if pos.Generation != generation { - pos = Pos{} // different generation, reset index to zero - } - if min == -1 || pos.Index < min { - min = pos.Index + continue // different generation, skip + } else if minIndex == -1 || pos.Index < minIndex { + minIndex = pos.Index } } - // Skip if our lowest index is too small. - if min <= 0 { + // Skip if our lowest position is too small. + if minIndex <= 0 { return nil } - min-- // Keep an extra WAL file. - // Remove all WAL files for the generation before the lowest index. + // Delete all WAL index directories below the minimum position. dir := db.ShadowWALDir(generation) - fis, err := ioutil.ReadDir(dir) - if os.IsNotExist(err) { - return nil - } else if err != nil { + ents, err := os.ReadDir(dir) + if err != nil { return err } - for _, fi := range fis { - if idx, err := parseWALPath(fi.Name()); err != nil || idx >= min { + + for _, ent := range ents { + index, err := ParseIndex(ent.Name()) + if err != nil { continue + } else if index >= minIndex { + continue // not below min, skip } - if err := os.Remove(filepath.Join(dir, fi.Name())); err != nil { + + if err := os.RemoveAll(filepath.Join(dir, FormatIndex(index))); err != nil { return err } + + db.Logger.Printf("remove shadow index: %s/%08x", generation, index) } + return nil } @@ -609,7 +772,7 @@ func (db *DB) acquireReadLock() error { } // Execute read query to obtain read lock. - if _, err := tx.ExecContext(db.ctx, `SELECT COUNT(1) FROM _litestream_seq;`); err != nil { + if _, err := tx.Exec(`SELECT COUNT(1) FROM _litestream_seq;`); err != nil { _ = tx.Rollback() return err } @@ -635,15 +798,13 @@ func (db *DB) releaseReadLock() error { // CurrentGeneration returns the name of the generation saved to the "generation" // file in the meta data directory. Returns empty string if none exists. func (db *DB) CurrentGeneration() (string, error) { - buf, err := ioutil.ReadFile(db.GenerationNamePath()) + buf, err := os.ReadFile(db.GenerationNamePath()) if os.IsNotExist(err) { return "", nil } else if err != nil { return "", err } - // TODO: Verify if generation directory exists. If not, delete name file. - generation := strings.TrimSpace(string(buf)) if len(generation) != GenerationNameLen { return "", nil @@ -654,7 +815,7 @@ func (db *DB) CurrentGeneration() (string, error) { // createGeneration starts a new generation by creating the generation // directory, snapshotting to each replica, and updating the current // generation name. -func (db *DB) createGeneration() (string, error) { +func (db *DB) createGeneration(ctx context.Context) (string, error) { // Generate random generation hex name. buf := make([]byte, GenerationNameLen/2) _, _ = rand.New(rand.NewSource(time.Now().UnixNano())).Read(buf) @@ -667,7 +828,7 @@ func (db *DB) createGeneration() (string, error) { } // Initialize shadow WAL with copy of header. - if _, err := db.initShadowWALFile(db.ShadowWALPath(generation, 0)); err != nil { + if err := db.initShadowWALIndex(ctx, Pos{Generation: generation}); err != nil { return "", fmt.Errorf("initialize shadow wal: %w", err) } @@ -677,7 +838,7 @@ func (db *DB) createGeneration() (string, error) { if db.fileInfo != nil { mode = db.fileInfo.Mode() } - if err := ioutil.WriteFile(generationNamePath+".tmp", []byte(generation+"\n"), mode); err != nil { + if err := os.WriteFile(generationNamePath+".tmp", []byte(generation+"\n"), mode); err != nil { return "", fmt.Errorf("write generation temp file: %w", err) } uid, gid := internal.Fileinfo(db.fileInfo) @@ -687,7 +848,7 @@ func (db *DB) createGeneration() (string, error) { } // Remove old generations. - if err := db.clean(); err != nil { + if err := db.clean(db.ctx); err != nil { return "", err } @@ -703,10 +864,24 @@ func (db *DB) Sync(ctx context.Context) (err error) { if err := db.init(); err != nil { return err } else if db.db == nil { - Tracef("%s: sync: no database found", db.path) return nil } + // Ensure the cached position exists. + if db.pos.IsZero() { + if err := db.invalidate(ctx); err != nil { + return fmt.Errorf("invalidate: %w", err) + } + } + origPos := db.pos + + // If sync fails, reset position & cache. + defer func() { + if err != nil { + db.reset() + } + }() + // Track total sync metrics. t := time.Now() defer func() { @@ -729,73 +904,73 @@ func (db *DB) Sync(ctx context.Context) (err error) { if err != nil { return fmt.Errorf("cannot verify wal state: %w", err) } - Tracef("%s: sync: info=%#v", db.path, info) - - // Track if anything in the shadow WAL changes and then notify at the end. - changed := info.walSize != info.shadowWALSize || info.restart || info.reason != "" // If we are unable to verify the WAL state then we start a new generation. if info.reason != "" { // Start new generation & notify user via log message. - if info.generation, err = db.createGeneration(); err != nil { + if info.generation, err = db.createGeneration(ctx); err != nil { return fmt.Errorf("create generation: %w", err) } - log.Printf("%s: sync: new generation %q, %s", db.path, info.generation, info.reason) + db.Logger.Printf("sync: new generation %q, %s", info.generation, info.reason) // Clear shadow wal info. - info.shadowWALPath = db.ShadowWALPath(info.generation, 0) - info.shadowWALSize = WALHeaderSize info.restart = false info.reason = "" - } // Synchronize real WAL with current shadow WAL. - newWALSize, err := db.syncWAL(info) - if err != nil { - return fmt.Errorf("sync wal: %w", err) + if err := db.copyToShadowWAL(ctx); err != nil { + return fmt.Errorf("cannot copy to shadow wal: %w", err) + } + + // If we are at the end of the WAL file, start a new index. + if info.restart { + // Move to beginning of next index. + pos := db.pos.Truncate() + pos.Index++ + + // Attempt to restart WAL from beginning of new index. + // Position is only committed to cache if successful. + if err := db.initShadowWALIndex(ctx, pos); err != nil { + return fmt.Errorf("cannot init shadow wal: pos=%s err=%w", pos, err) + } } // If WAL size is great than max threshold, force checkpoint. // If WAL size is greater than min threshold, attempt checkpoint. var checkpoint bool checkpointMode := CheckpointModePassive - if db.MaxCheckpointPageN > 0 && newWALSize >= calcWALSize(db.pageSize, db.MaxCheckpointPageN) { + if db.MaxCheckpointPageN > 0 && db.pos.Offset >= calcWALSize(db.pageSize, db.MaxCheckpointPageN) { checkpoint, checkpointMode = true, CheckpointModeRestart - } else if newWALSize >= calcWALSize(db.pageSize, db.MinCheckpointPageN) { + } else if db.pos.Offset >= calcWALSize(db.pageSize, db.MinCheckpointPageN) { checkpoint = true - } else if db.CheckpointInterval > 0 && !info.dbModTime.IsZero() && time.Since(info.dbModTime) > db.CheckpointInterval && newWALSize > calcWALSize(db.pageSize, 1) { + } else if db.CheckpointInterval > 0 && !info.dbModTime.IsZero() && time.Since(info.dbModTime) > db.CheckpointInterval && db.pos.Offset > calcWALSize(db.pageSize, 1) { checkpoint = true } // Issue the checkpoint. if checkpoint { - changed = true - if err := db.checkpoint(ctx, info.generation, checkpointMode); err != nil { return fmt.Errorf("checkpoint: mode=%v err=%w", checkpointMode, err) } } // Clean up any old files. - if err := db.clean(); err != nil { + if err := db.clean(ctx); err != nil { return fmt.Errorf("cannot clean: %w", err) } // Compute current index and total shadow WAL size. // This is only for metrics so we ignore any errors that occur. - index, size, _ := db.CurrentShadowWALIndex(info.generation) - db.shadowWALIndexGauge.Set(float64(index)) - db.shadowWALSizeGauge.Set(float64(size)) + db.shadowWALIndexGauge.Set(float64(db.pos.Index)) + db.shadowWALSizeGauge.Set(float64(db.pos.Offset)) // Notify replicas of WAL changes. - if changed { + if db.pos != origPos { close(db.notify) db.notify = make(chan struct{}) } - Tracef("%s: sync: ok", db.path) - return nil } @@ -838,67 +1013,36 @@ func (db *DB) verify() (info syncInfo, err error) { if err != nil { return info, err } - info.walSize = frameAlign(fi.Size(), db.pageSize) + walSize := fi.Size() info.walModTime = fi.ModTime() - db.walSizeGauge.Set(float64(fi.Size())) + db.walSizeGauge.Set(float64(walSize)) - // Open shadow WAL to copy append to. - index, _, err := db.CurrentShadowWALIndex(info.generation) - if err != nil { - return info, fmt.Errorf("cannot determine shadow WAL index: %w", err) - } else if index >= MaxIndex { + // Verify the index is not out of bounds. + if db.pos.Index >= MaxIndex { info.reason = "max index exceeded" return info, nil } - info.shadowWALPath = db.ShadowWALPath(generation, index) - - // Determine shadow WAL current size. - fi, err = os.Stat(info.shadowWALPath) - if os.IsNotExist(err) { - info.reason = "no shadow wal" - return info, nil - } else if err != nil { - return info, err - } - info.shadowWALSize = frameAlign(fi.Size(), db.pageSize) - - // Exit if shadow WAL does not contain a full header. - if info.shadowWALSize < WALHeaderSize { - info.reason = "short shadow wal" - return info, nil - } - // If shadow WAL is larger than real WAL then the WAL has been truncated - // so we cannot determine our last state. - if info.shadowWALSize > info.walSize { + // If shadow WAL position is larger than real WAL then the WAL has been + // truncated so we cannot determine our last state. + if db.pos.Offset > walSize { info.reason = "wal truncated by another process" return info, nil } // Compare WAL headers. Start a new shadow WAL if they are mismatched. - if hdr0, err := readWALHeader(db.WALPath()); err != nil { + if hdr, err := readWALHeader(db.WALPath()); err != nil { return info, fmt.Errorf("cannot read wal header: %w", err) - } else if hdr1, err := readWALHeader(info.shadowWALPath); err != nil { - return info, fmt.Errorf("cannot read shadow wal header: %w", err) - } else if !bytes.Equal(hdr0, hdr1) { - info.restart = !bytes.Equal(hdr0, hdr1) - } - - // If we only have a header then ensure header matches. - // Otherwise we need to start a new generation. - if info.shadowWALSize == WALHeaderSize && info.restart { - info.reason = "wal header only, mismatched" - return info, nil + } else if !bytes.Equal(hdr, db.hdr) { + info.restart = true } - // Verify last page synced still matches. - if info.shadowWALSize > WALHeaderSize { - offset := info.shadowWALSize - int64(db.pageSize+WALFrameHeaderSize) - if buf0, err := readWALFileAt(db.WALPath(), offset, int64(db.pageSize+WALFrameHeaderSize)); err != nil { + // Verify last frame synced still matches. + if db.pos.Offset > WALHeaderSize { + offset := db.pos.Offset - int64(db.pageSize+WALFrameHeaderSize) + if frame, err := readWALFileAt(db.WALPath(), offset, int64(db.pageSize+WALFrameHeaderSize)); err != nil { return info, fmt.Errorf("cannot read last synced wal page: %w", err) - } else if buf1, err := readWALFileAt(info.shadowWALPath, offset, int64(db.pageSize+WALFrameHeaderSize)); err != nil { - return info, fmt.Errorf("cannot read last synced shadow wal page: %w", err) - } else if !bytes.Equal(buf0, buf1) { + } else if !bytes.Equal(frame, db.frame) { info.reason = "wal overwritten by another process" return info, nil } @@ -908,254 +1052,351 @@ func (db *DB) verify() (info syncInfo, err error) { } type syncInfo struct { - generation string // generation name - dbModTime time.Time // last modified date of real DB file - walSize int64 // size of real WAL file - walModTime time.Time // last modified date of real WAL file - shadowWALPath string // name of last shadow WAL file - shadowWALSize int64 // size of last shadow WAL file - restart bool // if true, real WAL header does not match shadow WAL - reason string // if non-blank, reason for sync failure -} - -// syncWAL copies pending bytes from the real WAL to the shadow WAL. -func (db *DB) syncWAL(info syncInfo) (newSize int64, err error) { - // Copy WAL starting from end of shadow WAL. Exit if no new shadow WAL needed. - newSize, err = db.copyToShadowWAL(info.shadowWALPath) - if err != nil { - return newSize, fmt.Errorf("cannot copy to shadow wal: %w", err) - } else if !info.restart { - return newSize, nil // If no restart required, exit. - } - - // Parse index of current shadow WAL file. - dir, base := filepath.Split(info.shadowWALPath) - index, err := parseWALPath(base) - if err != nil { - return 0, fmt.Errorf("cannot parse shadow wal filename: %s", base) - } - - // Start a new shadow WAL file with next index. - newShadowWALPath := filepath.Join(dir, formatWALPath(index+1)) - newSize, err = db.initShadowWALFile(newShadowWALPath) - if err != nil { - return 0, fmt.Errorf("cannot init shadow wal file: name=%s err=%w", newShadowWALPath, err) - } - return newSize, nil + generation string // generation name + dbModTime time.Time // last modified date of real DB file + walModTime time.Time // last modified date of real WAL file + restart bool // if true, real WAL header does not match shadow WAL + reason string // if non-blank, reason for sync failure } -func (db *DB) initShadowWALFile(filename string) (int64, error) { +func (db *DB) initShadowWALIndex(ctx context.Context, pos Pos) error { + assert(pos.Offset == 0, "must init shadow wal index with zero offset") + hdr, err := readWALHeader(db.WALPath()) if err != nil { - return 0, fmt.Errorf("read header: %w", err) + return fmt.Errorf("read header: %w", err) } // Determine byte order for checksumming from header magic. - bo, err := headerByteOrder(hdr) + byteOrder, err := headerByteOrder(hdr) if err != nil { - return 0, err + return err } // Verify checksum. - s0 := binary.BigEndian.Uint32(hdr[24:]) - s1 := binary.BigEndian.Uint32(hdr[28:]) - if v0, v1 := Checksum(bo, 0, 0, hdr[:24]); v0 != s0 || v1 != s1 { - return 0, fmt.Errorf("invalid header checksum: (%x,%x) != (%x,%x)", v0, v1, s0, s1) + chksum0 := binary.BigEndian.Uint32(hdr[24:]) + chksum1 := binary.BigEndian.Uint32(hdr[28:]) + if v0, v1 := Checksum(byteOrder, 0, 0, hdr[:24]); v0 != chksum0 || v1 != chksum1 { + return fmt.Errorf("invalid header checksum: (%x,%x) != (%x,%x)", v0, v1, chksum0, chksum1) } - // Write header to new WAL shadow file. - mode := os.FileMode(0600) - if fi := db.fileInfo; fi != nil { - mode = fi.Mode() + // Compress header to LZ4. + var buf bytes.Buffer + zw := lz4.NewWriter(&buf) + if _, err := zw.Write(hdr); err != nil { + return err + } else if err := zw.Close(); err != nil { + return err } - if err := internal.MkdirAll(filepath.Dir(filename), db.dirInfo); err != nil { - return 0, err - } else if err := ioutil.WriteFile(filename, hdr, mode); err != nil { - return 0, err + + // Write header segment to shadow WAL & update position. + if err := db.writeWALSegment(ctx, pos, &buf); err != nil { + return fmt.Errorf("write shadow wal header: %w", err) } - uid, gid := internal.Fileinfo(db.fileInfo) - _ = os.Chown(filename, uid, gid) + pos.Offset += int64(len(hdr)) + db.pos = pos + + // Save header, salt & checksum to cache. + db.hdr = hdr + db.salt0 = binary.BigEndian.Uint32(hdr[16:]) + db.salt1 = binary.BigEndian.Uint32(hdr[20:]) + db.chksum0, db.chksum1 = chksum0, chksum1 + db.byteOrder = byteOrder // Copy as much shadow WAL as available. - newSize, err := db.copyToShadowWAL(filename) - if err != nil { - return 0, fmt.Errorf("cannot copy to new shadow wal: %w", err) + if err := db.copyToShadowWAL(ctx); err != nil { + return fmt.Errorf("cannot copy to new shadow wal: %w", err) } - return newSize, nil + return nil } -func (db *DB) copyToShadowWAL(filename string) (newSize int64, err error) { - Tracef("%s: copy-shadow: %s", db.path, filename) +func (db *DB) copyToShadowWAL(ctx context.Context) error { + pos := db.pos + assert(!pos.IsZero(), "zero pos for wal copy") r, err := os.Open(db.WALPath()) if err != nil { - return 0, err + return err } defer r.Close() - w, err := os.OpenFile(filename, os.O_RDWR, 0666) - if err != nil { - return 0, err - } - defer w.Close() - - fi, err := w.Stat() - if err != nil { - return 0, err - } - origSize := frameAlign(fi.Size(), db.pageSize) - - // Read shadow WAL header to determine byte order for checksum & salt. - hdr := make([]byte, WALHeaderSize) - if _, err := io.ReadFull(w, hdr); err != nil { - return 0, fmt.Errorf("read header: %w", err) - } - hsalt0 := binary.BigEndian.Uint32(hdr[16:]) - hsalt1 := binary.BigEndian.Uint32(hdr[20:]) - - bo, err := headerByteOrder(hdr) - if err != nil { - return 0, err - } + // Write to a temporary WAL segment file. + tempFilename := filepath.Join(db.ShadowWALDir(pos.Generation), FormatIndex(pos.Index), FormatOffset(pos.Offset)+".wal.tmp") + defer os.Remove(tempFilename) - // Read previous checksum. - chksum0, chksum1, err := readLastChecksumFrom(w, db.pageSize) + f, err := internal.CreateFile(tempFilename, db.fileInfo) if err != nil { - return 0, fmt.Errorf("last checksum: %w", err) + return err } + defer f.Close() // Seek to correct position on real wal. - if _, err := r.Seek(origSize, io.SeekStart); err != nil { - return 0, fmt.Errorf("real wal seek: %w", err) - } else if _, err := w.Seek(origSize, io.SeekStart); err != nil { - return 0, fmt.Errorf("shadow wal seek: %w", err) + if _, err := r.Seek(pos.Offset, io.SeekStart); err != nil { + return fmt.Errorf("real wal seek: %w", err) } - // Read through WAL from last position to find the page of the last - // committed transaction. + // The high water mark (HWM) tracks the position & checksum of the position + // of the last committed transaction frame. + hwm := struct { + pos Pos + chksum0 uint32 + chksum1 uint32 + frame []byte + }{db.pos, db.chksum0, db.chksum1, make([]byte, db.pageSize+WALFrameHeaderSize)} + + // Copy from last position in real WAL to the last committed transaction. frame := make([]byte, db.pageSize+WALFrameHeaderSize) - var buf bytes.Buffer - offset := origSize - lastCommitSize := origSize + chksum0, chksum1 := db.chksum0, db.chksum1 for { // Read next page from WAL file. if _, err := io.ReadFull(r, frame); err == io.EOF || err == io.ErrUnexpectedEOF { - Tracef("%s: copy-shadow: break %s @ %d; err=%s", db.path, filename, offset, err) break // end of file or partial page } else if err != nil { - return 0, fmt.Errorf("read wal: %w", err) + return fmt.Errorf("read wal: %w", err) } // Read frame salt & compare to header salt. Stop reading on mismatch. salt0 := binary.BigEndian.Uint32(frame[8:]) salt1 := binary.BigEndian.Uint32(frame[12:]) - if salt0 != hsalt0 || salt1 != hsalt1 { - Tracef("%s: copy-shadow: break: salt mismatch", db.path) + if salt0 != db.salt0 || salt1 != db.salt1 { break } // Verify checksum of page is valid. fchksum0 := binary.BigEndian.Uint32(frame[16:]) fchksum1 := binary.BigEndian.Uint32(frame[20:]) - chksum0, chksum1 = Checksum(bo, chksum0, chksum1, frame[:8]) // frame header - chksum0, chksum1 = Checksum(bo, chksum0, chksum1, frame[24:]) // frame data + chksum0, chksum1 = Checksum(db.byteOrder, chksum0, chksum1, frame[:8]) // frame header + chksum0, chksum1 = Checksum(db.byteOrder, chksum0, chksum1, frame[24:]) // frame data if chksum0 != fchksum0 || chksum1 != fchksum1 { - Tracef("%s: copy shadow: checksum mismatch, skipping: offset=%d (%x,%x) != (%x,%x)", db.path, offset, chksum0, chksum1, fchksum0, fchksum1) break } // Add page to the new size of the shadow WAL. - buf.Write(frame) + if _, err := f.Write(frame); err != nil { + return fmt.Errorf("write temp shadow wal segment: %w", err) + } - Tracef("%s: copy-shadow: ok %s offset=%d salt=%x %x", db.path, filename, offset, salt0, salt1) - offset += int64(len(frame)) + pos.Offset += int64(len(frame)) // Flush to shadow WAL if commit record. newDBSize := binary.BigEndian.Uint32(frame[4:]) if newDBSize != 0 { - if _, err := buf.WriteTo(w); err != nil { - return 0, fmt.Errorf("write shadow wal: %w", err) - } - buf.Reset() - lastCommitSize = offset + hwm.pos = pos + hwm.chksum0, hwm.chksum1 = chksum0, chksum1 + copy(hwm.frame, frame) } } - // Sync & close. - if err := w.Sync(); err != nil { - return 0, err - } else if err := w.Close(); err != nil { - return 0, err + // If no WAL writes found, exit. + if db.pos == hwm.pos { + return nil + } + + walByteN := hwm.pos.Offset - db.pos.Offset + + // Move to beginning of temporary file. + if _, err := f.Seek(0, io.SeekStart); err != nil { + return fmt.Errorf("temp file seek: %w", err) + } + + // Copy temporary file to a pipe while compressing the data. + // Only read up to the number of bytes from the original position to the HWM. + pr, pw := io.Pipe() + go func() { + zw := lz4.NewWriter(pw) + if _, err := io.Copy(zw, &io.LimitedReader{R: f, N: walByteN}); err != nil { + pw.CloseWithError(err) + } else if err := zw.Close(); err != nil { + pw.CloseWithError(err) + } + pw.Close() + }() + + // Write a new, compressed segment via pipe. + if err := db.writeWALSegment(ctx, db.pos, pr); err != nil { + return fmt.Errorf("write wal segment: pos=%s err=%w", db.pos, err) + } + + // Update the position & checksum on success. + db.pos = hwm.pos + db.chksum0, db.chksum1 = hwm.chksum0, hwm.chksum1 + db.frame = hwm.frame + + // Close & remove temporary file. + if err := f.Close(); err != nil { + return err + } else if err := os.Remove(tempFilename); err != nil { + return err } // Track total number of bytes written to WAL. - db.totalWALBytesCounter.Add(float64(lastCommitSize - origSize)) + db.totalWALBytesCounter.Add(float64(walByteN)) - return lastCommitSize, nil + return nil } -// ShadowWALReader opens a reader for a shadow WAL file at a given position. -// If the reader is at the end of the file, it attempts to return the next file. -// -// The caller should check Pos() & Size() on the returned reader to check offset. -func (db *DB) ShadowWALReader(pos Pos) (r *ShadowWALReader, err error) { - // Fetch reader for the requested position. Return if it has data. - r, err = db.shadowWALReader(pos) +// WALSegmentReader returns a reader for a section of WAL data at the given position. +// Returns os.ErrNotExist if no matching index/offset is found. +func (db *DB) WALSegmentReader(ctx context.Context, pos Pos) (io.ReadCloser, error) { + if pos.Generation == "" { + return nil, fmt.Errorf("generation required") + } + return os.Open(filepath.Join(db.ShadowWALDir(pos.Generation), FormatIndex(pos.Index), FormatOffset(pos.Offset)+".wal.lz4")) +} + +// writeWALSegment writes LZ4 compressed data from rd into a file on disk. +func (db *DB) writeWALSegment(ctx context.Context, pos Pos, rd io.Reader) error { + if pos.Generation == "" { + return fmt.Errorf("generation required") + } + filename := filepath.Join(db.ShadowWALDir(pos.Generation), FormatIndex(pos.Index), FormatOffset(pos.Offset)+".wal.lz4") + + // Ensure parent directory exists. + if err := internal.MkdirAll(filepath.Dir(filename), db.dirInfo); err != nil { + return err + } + + // Write WAL segment to temporary file next to destination path. + f, err := internal.CreateFile(filename+".tmp", db.fileInfo) if err != nil { - return nil, err - } else if r.N() > 0 { - return r, nil - } else if err := r.Close(); err != nil { // no data, close, try next - return nil, err + return err } + defer f.Close() - // Otherwise attempt to read the start of the next WAL file. - pos.Index, pos.Offset = pos.Index+1, 0 + if _, err := io.Copy(f, rd); err != nil { + return err + } else if err := f.Sync(); err != nil { + return err + } else if err := f.Close(); err != nil { + return err + } - r, err = db.shadowWALReader(pos) - if os.IsNotExist(err) { - return nil, io.EOF + // Move WAL segment to final path when it has been written & synced to disk. + if err := os.Rename(filename+".tmp", filename); err != nil { + return err } - return r, err -} -// shadowWALReader opens a file reader for a shadow WAL file at a given position. -func (db *DB) shadowWALReader(pos Pos) (r *ShadowWALReader, err error) { - filename := db.ShadowWALPath(pos.Generation, pos.Index) + return nil +} - f, err := os.Open(filename) - if err != nil { +// WALSegments returns an iterator over all available WAL files for a generation. +func (db *DB) WALSegments(ctx context.Context, generation string) (WALSegmentIterator, error) { + ents, err := os.ReadDir(db.ShadowWALDir(generation)) + if os.IsNotExist(err) { + return NewWALSegmentInfoSliceIterator(nil), nil + } else if err != nil { return nil, err } - // Ensure file is closed if any error occurs. - defer func() { + // Iterate over every file and convert to metadata. + indexes := make([]int, 0, len(ents)) + for _, ent := range ents { + index, err := ParseIndex(ent.Name()) if err != nil { - f.Close() + continue } - }() + indexes = append(indexes, index) + } - // Fetch frame-aligned file size and ensure requested offset is not past EOF. - fi, err := f.Stat() - if err != nil { - return nil, err + sort.Ints(indexes) + + return newShadowWALSegmentIterator(db, generation, indexes), nil +} + +type shadowWALSegmentIterator struct { + db *DB + generation string + indexes []int + + infos []WALSegmentInfo + err error +} + +func newShadowWALSegmentIterator(db *DB, generation string, indexes []int) *shadowWALSegmentIterator { + return &shadowWALSegmentIterator{ + db: db, + generation: generation, + indexes: indexes, } +} + +func (itr *shadowWALSegmentIterator) Close() (err error) { + return itr.err +} - fileSize := frameAlign(fi.Size(), db.pageSize) - if pos.Offset > fileSize { - return nil, fmt.Errorf("wal reader offset too high: %d > %d", pos.Offset, fi.Size()) +func (itr *shadowWALSegmentIterator) Next() bool { + // Exit if an error has already occurred. + if itr.err != nil { + return false } - // Move file handle to offset position. - if _, err := f.Seek(pos.Offset, io.SeekStart); err != nil { - return nil, err + for { + // Move to the next segment in cache, if available. + if len(itr.infos) > 1 { + itr.infos = itr.infos[1:] + return true + } + itr.infos = itr.infos[:0] // otherwise clear infos + + // If no indexes remain, stop iteration. + if len(itr.indexes) == 0 { + return false + } + + // Read segments into a cache for the current index. + index := itr.indexes[0] + itr.indexes = itr.indexes[1:] + f, err := os.Open(filepath.Join(itr.db.ShadowWALDir(itr.generation), FormatIndex(index))) + if err != nil { + itr.err = err + return false + } + defer func() { _ = f.Close() }() + + fis, err := f.Readdir(-1) + if err != nil { + itr.err = err + return false + } else if err := f.Close(); err != nil { + itr.err = err + return false + } + for _, fi := range fis { + filename := filepath.Base(fi.Name()) + if fi.IsDir() { + continue + } + + offset, err := ParseOffset(strings.TrimSuffix(filename, ".wal.lz4")) + if err != nil { + continue + } + + itr.infos = append(itr.infos, WALSegmentInfo{ + Generation: itr.generation, + Index: index, + Offset: offset, + Size: fi.Size(), + CreatedAt: fi.ModTime().UTC(), + }) + } + + // Ensure segments are sorted within index. + sort.Sort(WALSegmentInfoSlice(itr.infos)) + + if len(itr.infos) > 0 { + return true + } } +} + +func (itr *shadowWALSegmentIterator) Err() error { return itr.err } - return &ShadowWALReader{ - f: f, - n: fileSize - pos.Offset, - pos: pos, - }, nil +func (itr *shadowWALSegmentIterator) WALSegment() WALSegmentInfo { + if len(itr.infos) == 0 { + return WALSegmentInfo{} + } + return itr.infos[0] } // frameAlign returns a frame-aligned offset. @@ -1173,40 +1414,6 @@ func frameAlign(offset int64, pageSize int) int64 { return (frameN * frameSize) + WALHeaderSize } -// ShadowWALReader represents a reader for a shadow WAL file that tracks WAL position. -type ShadowWALReader struct { - f *os.File - n int64 - pos Pos -} - -// Name returns the filename of the underlying file. -func (r *ShadowWALReader) Name() string { return r.f.Name() } - -// Close closes the underlying WAL file handle. -func (r *ShadowWALReader) Close() error { return r.f.Close() } - -// N returns the remaining bytes in the reader. -func (r *ShadowWALReader) N() int64 { return r.n } - -// Pos returns the current WAL position. -func (r *ShadowWALReader) Pos() Pos { return r.pos } - -// Read reads bytes into p, updates the position, and returns the bytes read. -// Returns io.EOF at the end of the available section of the WAL. -func (r *ShadowWALReader) Read(p []byte) (n int, err error) { - if r.n <= 0 { - return 0, io.EOF - } - if int64(len(p)) > r.n { - p = p[0:r.n] - } - n, err = r.f.Read(p) - r.n -= int64(n) - r.pos.Offset += int64(n) - return n, err -} - // SQLite WAL constants const ( WALHeaderChecksumOffset = 24 @@ -1248,11 +1455,6 @@ func (db *DB) Checkpoint(ctx context.Context, mode string) (err error) { // checkpointAndInit performs a checkpoint on the WAL file and initializes a // new shadow WAL file. func (db *DB) checkpoint(ctx context.Context, generation, mode string) error { - shadowWALPath, err := db.CurrentShadowWALPath(generation) - if err != nil { - return err - } - // Read WAL header before checkpoint to check if it has been restarted. hdr, err := readWALHeader(db.WALPath()) if err != nil { @@ -1260,7 +1462,7 @@ func (db *DB) checkpoint(ctx context.Context, generation, mode string) error { } // Copy shadow WAL before checkpoint to copy as much as possible. - if _, err := db.copyToShadowWAL(shadowWALPath); err != nil { + if err := db.copyToShadowWAL(ctx); err != nil { return fmt.Errorf("cannot copy to end of shadow wal before checkpoint: %w", err) } @@ -1295,20 +1497,14 @@ func (db *DB) checkpoint(ctx context.Context, generation, mode string) error { } // Copy the end of the previous WAL before starting a new shadow WAL. - if _, err := db.copyToShadowWAL(shadowWALPath); err != nil { + if err := db.copyToShadowWAL(ctx); err != nil { return fmt.Errorf("cannot copy to end of shadow wal: %w", err) } - // Parse index of current shadow WAL file. - index, err := parseWALPath(shadowWALPath) - if err != nil { - return fmt.Errorf("cannot parse shadow wal filename: %s", shadowWALPath) - } - // Start a new shadow WAL file with next index. - newShadowWALPath := filepath.Join(filepath.Dir(shadowWALPath), formatWALPath(index+1)) - if _, err := db.initShadowWALFile(newShadowWALPath); err != nil { - return fmt.Errorf("cannot init shadow wal file: name=%s err=%w", newShadowWALPath, err) + pos := Pos{Generation: db.pos.Generation, Index: db.pos.Index + 1} + if err := db.initShadowWALIndex(ctx, pos); err != nil { + return fmt.Errorf("cannot init shadow wal file: pos=%s err=%w", pos, err) } // Release write lock before checkpointing & exiting. @@ -1354,11 +1550,11 @@ func (db *DB) execCheckpoint(mode string) (err error) { if err := db.db.QueryRow(rawsql).Scan(&row[0], &row[1], &row[2]); err != nil { return err } - Tracef("%s: checkpoint: mode=%v (%d,%d,%d)", db.path, mode, row[0], row[1], row[2]) + db.Logger.Printf("checkpoint(%s): [%d,%d,%d]", mode, row[0], row[1], row[2]) // Reacquire the read lock immediately after the checkpoint. if err := db.acquireReadLock(); err != nil { - return fmt.Errorf("release read lock: %w", err) + return fmt.Errorf("reacquire read lock: %w", err) } return nil @@ -1379,7 +1575,7 @@ func (db *DB) monitor() { // Sync the database to the shadow WAL. if err := db.Sync(db.ctx); err != nil && !errors.Is(err, context.Canceled) { - log.Printf("%s: sync error: %s", db.path, err) + db.Logger.Printf("sync error: %s", err) } } } @@ -1467,10 +1663,7 @@ func (db *DB) CRC64(ctx context.Context) (uint64, Pos, error) { // Obtain current position. Clear the offset since we are only reading the // DB and not applying the current WAL. - pos, err := db.Pos() - if err != nil { - return 0, pos, err - } + pos := db.pos pos.Offset = 0 // Seek to the beginning of the db file descriptor and checksum whole file. diff --git a/db_test.go b/db_test.go index b7eb54b0..220f7e67 100644 --- a/db_test.go +++ b/db_test.go @@ -3,7 +3,6 @@ package litestream_test import ( "context" "database/sql" - "io/ioutil" "os" "path/filepath" "strings" @@ -30,13 +29,13 @@ func TestDB_WALPath(t *testing.T) { func TestDB_MetaPath(t *testing.T) { t.Run("Absolute", func(t *testing.T) { db := litestream.NewDB("/tmp/db") - if got, want := db.MetaPath(), `/tmp/.db-litestream`; got != want { + if got, want := db.MetaPath(), `/tmp/db-litestream`; got != want { t.Fatalf("MetaPath()=%v, want %v", got, want) } }) t.Run("Relative", func(t *testing.T) { db := litestream.NewDB("db") - if got, want := db.MetaPath(), `.db-litestream`; got != want { + if got, want := db.MetaPath(), `db-litestream`; got != want { t.Fatalf("MetaPath()=%v, want %v", got, want) } }) @@ -44,32 +43,25 @@ func TestDB_MetaPath(t *testing.T) { func TestDB_GenerationNamePath(t *testing.T) { db := litestream.NewDB("/tmp/db") - if got, want := db.GenerationNamePath(), `/tmp/.db-litestream/generation`; got != want { + if got, want := db.GenerationNamePath(), `/tmp/db-litestream/generation`; got != want { t.Fatalf("GenerationNamePath()=%v, want %v", got, want) } } func TestDB_GenerationPath(t *testing.T) { db := litestream.NewDB("/tmp/db") - if got, want := db.GenerationPath("xxxx"), `/tmp/.db-litestream/generations/xxxx`; got != want { + if got, want := db.GenerationPath("xxxx"), `/tmp/db-litestream/generations/xxxx`; got != want { t.Fatalf("GenerationPath()=%v, want %v", got, want) } } func TestDB_ShadowWALDir(t *testing.T) { db := litestream.NewDB("/tmp/db") - if got, want := db.ShadowWALDir("xxxx"), `/tmp/.db-litestream/generations/xxxx/wal`; got != want { + if got, want := db.ShadowWALDir("xxxx"), `/tmp/db-litestream/generations/xxxx/wal`; got != want { t.Fatalf("ShadowWALDir()=%v, want %v", got, want) } } -func TestDB_ShadowWALPath(t *testing.T) { - db := litestream.NewDB("/tmp/db") - if got, want := db.ShadowWALPath("xxxx", 1000), `/tmp/.db-litestream/generations/xxxx/wal/000003e8.wal`; got != want { - t.Fatalf("ShadowWALPath()=%v, want %v", got, want) - } -} - // Ensure we can check the last modified time of the real database and its WAL. func TestDB_UpdatedAt(t *testing.T) { t.Run("ErrNotExist", func(t *testing.T) { @@ -195,9 +187,7 @@ func TestDB_Sync(t *testing.T) { } // Ensure position now available. - if pos, err := db.Pos(); err != nil { - t.Fatal(err) - } else if pos.Generation == "" { + if pos := db.Pos(); pos.Generation == "" { t.Fatal("expected generation") } else if got, want := pos.Index, 0; got != want { t.Fatalf("pos.Index=%v, want %v", got, want) @@ -221,10 +211,7 @@ func TestDB_Sync(t *testing.T) { t.Fatal(err) } - pos0, err := db.Pos() - if err != nil { - t.Fatal(err) - } + pos0 := db.Pos() // Insert into table. if _, err := sqldb.Exec(`INSERT INTO foo (bar) VALUES ('baz');`); err != nil { @@ -234,9 +221,7 @@ func TestDB_Sync(t *testing.T) { // Sync to ensure position moves forward one page. if err := db.Sync(context.Background()); err != nil { t.Fatal(err) - } else if pos1, err := db.Pos(); err != nil { - t.Fatal(err) - } else if pos0.Generation != pos1.Generation { + } else if pos1 := db.Pos(); pos0.Generation != pos1.Generation { t.Fatal("expected the same generation") } else if got, want := pos1.Index, pos0.Index; got != want { t.Fatalf("Index=%v, want %v", got, want) @@ -256,10 +241,7 @@ func TestDB_Sync(t *testing.T) { } // Obtain initial position. - pos0, err := db.Pos() - if err != nil { - t.Fatal(err) - } + pos0 := db.Pos() // Checkpoint & fully close which should close WAL file. if err := db.Checkpoint(context.Background(), litestream.CheckpointModeTruncate); err != nil { @@ -285,9 +267,7 @@ func TestDB_Sync(t *testing.T) { } // Obtain initial position. - if pos1, err := db.Pos(); err != nil { - t.Fatal(err) - } else if pos0.Generation == pos1.Generation { + if pos1 := db.Pos(); pos0.Generation == pos1.Generation { t.Fatal("expected new generation after truncation") } }) @@ -308,10 +288,7 @@ func TestDB_Sync(t *testing.T) { } // Obtain initial position. - pos0, err := db.Pos() - if err != nil { - t.Fatal(err) - } + pos0 := db.Pos() // Fully close which should close WAL file. if err := db.Close(); err != nil { @@ -344,190 +321,98 @@ func TestDB_Sync(t *testing.T) { } // Obtain initial position. - if pos1, err := db.Pos(); err != nil { - t.Fatal(err) - } else if pos0.Generation == pos1.Generation { + if pos1 := db.Pos(); pos0.Generation == pos1.Generation { t.Fatal("expected new generation after truncation") } }) - // Ensure DB can handle a mismatched header-only and start new generation. - t.Run("WALHeaderMismatch", func(t *testing.T) { - db, sqldb := MustOpenDBs(t) - defer MustCloseDBs(t, db, sqldb) - - // Execute a query to force a write to the WAL and then sync. - if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil { - t.Fatal(err) - } else if err := db.Sync(context.Background()); err != nil { - t.Fatal(err) - } - - // Grab initial position & close. - pos0, err := db.Pos() - if err != nil { - t.Fatal(err) - } else if err := db.Close(); err != nil { - t.Fatal(err) - } - - // Read existing file, update header checksum, and write back only header - // to simulate a header with a mismatched checksum. - shadowWALPath := db.ShadowWALPath(pos0.Generation, pos0.Index) - if buf, err := ioutil.ReadFile(shadowWALPath); err != nil { - t.Fatal(err) - } else if err := ioutil.WriteFile(shadowWALPath, append(buf[:litestream.WALHeaderSize-8], 0, 0, 0, 0, 0, 0, 0, 0), 0600); err != nil { - t.Fatal(err) - } - - // Reopen managed database & ensure sync will still work. - db = MustOpenDBAt(t, db.Path()) - defer MustCloseDB(t, db) - if err := db.Sync(context.Background()); err != nil { - t.Fatal(err) - } - - // Verify a new generation was started. - if pos1, err := db.Pos(); err != nil { - t.Fatal(err) - } else if pos0.Generation == pos1.Generation { - t.Fatal("expected new generation") - } - }) - - // Ensure DB can handle partial shadow WAL header write. - t.Run("PartialShadowWALHeader", func(t *testing.T) { - db, sqldb := MustOpenDBs(t) - defer MustCloseDBs(t, db, sqldb) - - // Execute a query to force a write to the WAL and then sync. - if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil { - t.Fatal(err) - } else if err := db.Sync(context.Background()); err != nil { - t.Fatal(err) - } - - pos0, err := db.Pos() - if err != nil { - t.Fatal(err) - } - - // Close & truncate shadow WAL to simulate a partial header write. - if err := db.Close(); err != nil { - t.Fatal(err) - } else if err := os.Truncate(db.ShadowWALPath(pos0.Generation, pos0.Index), litestream.WALHeaderSize-1); err != nil { - t.Fatal(err) - } - - // Reopen managed database & ensure sync will still work. - db = MustOpenDBAt(t, db.Path()) - defer MustCloseDB(t, db) - if err := db.Sync(context.Background()); err != nil { - t.Fatal(err) - } - - // Verify a new generation was started. - if pos1, err := db.Pos(); err != nil { - t.Fatal(err) - } else if pos0.Generation == pos1.Generation { - t.Fatal("expected new generation") - } - }) - - // Ensure DB can handle partial shadow WAL writes. - t.Run("PartialShadowWALFrame", func(t *testing.T) { - db, sqldb := MustOpenDBs(t) - defer MustCloseDBs(t, db, sqldb) - - // Execute a query to force a write to the WAL and then sync. - if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil { - t.Fatal(err) - } else if err := db.Sync(context.Background()); err != nil { - t.Fatal(err) - } - - pos0, err := db.Pos() - if err != nil { - t.Fatal(err) - } - - // Obtain current shadow WAL size. - fi, err := os.Stat(db.ShadowWALPath(pos0.Generation, pos0.Index)) - if err != nil { - t.Fatal(err) - } - - // Close & truncate shadow WAL to simulate a partial frame write. - if err := db.Close(); err != nil { - t.Fatal(err) - } else if err := os.Truncate(db.ShadowWALPath(pos0.Generation, pos0.Index), fi.Size()-1); err != nil { - t.Fatal(err) - } + // TODO: Fix test to check for header mismatch + /* + // Ensure DB can handle a mismatched header-only and start new generation. + t.Run("WALHeaderMismatch", func(t *testing.T) { + db, sqldb := MustOpenDBs(t) + defer MustCloseDBs(t, db, sqldb) - // Reopen managed database & ensure sync will still work. - db = MustOpenDBAt(t, db.Path()) - defer MustCloseDB(t, db) - if err := db.Sync(context.Background()); err != nil { - t.Fatal(err) - } + // Execute a query to force a write to the WAL and then sync. + if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil { + t.Fatal(err) + } else if err := db.Sync(context.Background()); err != nil { + t.Fatal(err) + } - // Verify same generation is kept. - if pos1, err := db.Pos(); err != nil { - t.Fatal(err) - } else if got, want := pos1, pos0; got != want { - t.Fatalf("Pos()=%s want %s", got, want) - } + // Grab initial position & close. + pos0 := db.Pos() + if err := db.Close(); err != nil { + t.Fatal(err) + } - // Ensure shadow WAL has recovered. - if fi0, err := os.Stat(db.ShadowWALPath(pos0.Generation, pos0.Index)); err != nil { - t.Fatal(err) - } else if got, want := fi0.Size(), fi.Size(); got != want { - t.Fatalf("Size()=%v, want %v", got, want) - } - }) + // Read existing file, update header checksum, and write back only header + // to simulate a header with a mismatched checksum. + shadowWALPath := db.ShadowWALPath(pos0.Generation, pos0.Index) + if buf, err := os.ReadFile(shadowWALPath); err != nil { + t.Fatal(err) + } else if err := os.WriteFile(shadowWALPath, append(buf[:litestream.WALHeaderSize-8], 0, 0, 0, 0, 0, 0, 0, 0), 0600); err != nil { + t.Fatal(err) + } - // Ensure DB can handle a generation directory with a missing shadow WAL. - t.Run("NoShadowWAL", func(t *testing.T) { - db, sqldb := MustOpenDBs(t) - defer MustCloseDBs(t, db, sqldb) + // Reopen managed database & ensure sync will still work. + db = MustOpenDBAt(t, db.Path()) + defer MustCloseDB(t, db) + if err := db.Sync(context.Background()); err != nil { + t.Fatal(err) + } - // Execute a query to force a write to the WAL and then sync. - if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil { - t.Fatal(err) - } else if err := db.Sync(context.Background()); err != nil { - t.Fatal(err) - } + // Verify a new generation was started. + if pos1, err := db.Pos(); err != nil { + t.Fatal(err) + } else if pos0.Generation == pos1.Generation { + t.Fatal("expected new generation") + } + }) + */ + + // TODO: Fix test for segmented shadow WAL. + /* + // Ensure DB can handle a generation directory with a missing shadow WAL. + t.Run("NoShadowWAL", func(t *testing.T) { + db, sqldb := MustOpenDBs(t) + defer MustCloseDBs(t, db, sqldb) + + // Execute a query to force a write to the WAL and then sync. + if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil { + t.Fatal(err) + } else if err := db.Sync(context.Background()); err != nil { + t.Fatal(err) + } - pos0, err := db.Pos() - if err != nil { - t.Fatal(err) - } + pos0 := db.Pos() - // Close & delete shadow WAL to simulate dir created but not WAL. - if err := db.Close(); err != nil { - t.Fatal(err) - } else if err := os.Remove(db.ShadowWALPath(pos0.Generation, pos0.Index)); err != nil { - t.Fatal(err) - } + // Close & delete shadow WAL to simulate dir created but not WAL. + if err := db.Close(); err != nil { + t.Fatal(err) + } else if err := os.Remove(db.ShadowWALPath(pos0.Generation, pos0.Index)); err != nil { + t.Fatal(err) + } - // Reopen managed database & ensure sync will still work. - db = MustOpenDBAt(t, db.Path()) - defer MustCloseDB(t, db) - if err := db.Sync(context.Background()); err != nil { - t.Fatal(err) - } + // Reopen managed database & ensure sync will still work. + db = MustOpenDBAt(t, db.Path()) + defer MustCloseDB(t, db) + if err := db.Sync(context.Background()); err != nil { + t.Fatal(err) + } - // Verify new generation created but index/offset the same. - if pos1, err := db.Pos(); err != nil { - t.Fatal(err) - } else if pos0.Generation == pos1.Generation { - t.Fatal("expected new generation") - } else if got, want := pos1.Index, pos0.Index; got != want { - t.Fatalf("Index=%v want %v", got, want) - } else if got, want := pos1.Offset, pos0.Offset; got != want { - t.Fatalf("Offset=%v want %v", got, want) - } - }) + // Verify new generation created but index/offset the same. + if pos1, err := db.Pos(); err != nil { + t.Fatal(err) + } else if pos0.Generation == pos1.Generation { + t.Fatal("expected new generation") + } else if got, want := pos1.Index, pos0.Index; got != want { + t.Fatalf("Index=%v want %v", got, want) + } else if got, want := pos1.Offset, pos0.Offset; got != want { + t.Fatalf("Offset=%v want %v", got, want) + } + }) + */ // Ensure DB checkpoints after minimum number of pages. t.Run("MinCheckpointPageN", func(t *testing.T) { @@ -554,9 +439,7 @@ func TestDB_Sync(t *testing.T) { } // Ensure position is now on the second index. - if pos, err := db.Pos(); err != nil { - t.Fatal(err) - } else if got, want := pos.Index, 1; got != want { + if got, want := db.Pos().Index, 1; got != want { t.Fatalf("Index=%v, want %v", got, want) } }) @@ -584,9 +467,7 @@ func TestDB_Sync(t *testing.T) { } // Ensure position is now on the second index. - if pos, err := db.Pos(); err != nil { - t.Fatal(err) - } else if got, want := pos.Index, 1; got != want { + if got, want := db.Pos().Index, 1; got != want { t.Fatalf("Index=%v, want %v", got, want) } }) diff --git a/file/replica_client.go b/file/replica_client.go index 8d0da749..ef7d7b91 100644 --- a/file/replica_client.go +++ b/file/replica_client.go @@ -408,11 +408,6 @@ func (itr *walSegmentIterator) Next() bool { } itr.infos = itr.infos[:0] // otherwise clear infos - // Move to the next index unless this is the first time initializing. - if itr.infos != nil && len(itr.indexes) > 0 { - itr.indexes = itr.indexes[1:] - } - // If no indexes remain, stop iteration. if len(itr.indexes) == 0 { return false @@ -420,6 +415,7 @@ func (itr *walSegmentIterator) Next() bool { // Read segments into a cache for the current index. index := itr.indexes[0] + itr.indexes = itr.indexes[1:] f, err := os.Open(filepath.Join(itr.dir, litestream.FormatIndex(index))) if err != nil { itr.err = err @@ -431,7 +427,11 @@ func (itr *walSegmentIterator) Next() bool { if err != nil { itr.err = err return false + } else if err := f.Close(); err != nil { + itr.err = err + return false } + for _, fi := range fis { filename := filepath.Base(fi.Name()) if fi.IsDir() { @@ -452,6 +452,9 @@ func (itr *walSegmentIterator) Next() bool { }) } + // Ensure segments are sorted within index. + sort.Sort(litestream.WALSegmentInfoSlice(itr.infos)) + if len(itr.infos) > 0 { return true } diff --git a/file/replica_client_test.go b/file/replica_client_test.go index bafeefd5..465e8357 100644 --- a/file/replica_client_test.go +++ b/file/replica_client_test.go @@ -133,91 +133,3 @@ func TestReplicaClient_WALSegmentPath(t *testing.T) { } }) } - -/* -func TestReplica_Sync(t *testing.T) { - // Ensure replica can successfully sync after DB has sync'd. - t.Run("InitialSync", func(t *testing.T) { - db, sqldb := MustOpenDBs(t) - defer MustCloseDBs(t, db, sqldb) - - r := litestream.NewReplica(db, "", file.NewReplicaClient(t.TempDir())) - r.MonitorEnabled = false - db.Replicas = []*litestream.Replica{r} - - // Sync database & then sync replica. - if err := db.Sync(context.Background()); err != nil { - t.Fatal(err) - } else if err := r.Sync(context.Background()); err != nil { - t.Fatal(err) - } - - // Ensure posistions match. - if want, err := db.Pos(); err != nil { - t.Fatal(err) - } else if got, err := r.Pos(context.Background()); err != nil { - t.Fatal(err) - } else if got != want { - t.Fatalf("Pos()=%v, want %v", got, want) - } - }) - - // Ensure replica can successfully sync multiple times. - t.Run("MultiSync", func(t *testing.T) { - db, sqldb := MustOpenDBs(t) - defer MustCloseDBs(t, db, sqldb) - - r := litestream.NewReplica(db, "", file.NewReplicaClient(t.TempDir())) - r.MonitorEnabled = false - db.Replicas = []*litestream.Replica{r} - - if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil { - t.Fatal(err) - } - - // Write to the database multiple times and sync after each write. - for i, n := 0, db.MinCheckpointPageN*2; i < n; i++ { - if _, err := sqldb.Exec(`INSERT INTO foo (bar) VALUES ('baz')`); err != nil { - t.Fatal(err) - } - - // Sync periodically. - if i%100 == 0 || i == n-1 { - if err := db.Sync(context.Background()); err != nil { - t.Fatal(err) - } else if err := r.Sync(context.Background()); err != nil { - t.Fatal(err) - } - } - } - - // Ensure posistions match. - pos, err := db.Pos() - if err != nil { - t.Fatal(err) - } else if got, want := pos.Index, 2; got != want { - t.Fatalf("Index=%v, want %v", got, want) - } - - if want, err := r.Pos(context.Background()); err != nil { - t.Fatal(err) - } else if got := pos; got != want { - t.Fatalf("Pos()=%v, want %v", got, want) - } - }) - - // Ensure replica returns an error if there is no generation available from the DB. - t.Run("ErrNoGeneration", func(t *testing.T) { - db, sqldb := MustOpenDBs(t) - defer MustCloseDBs(t, db, sqldb) - - r := litestream.NewReplica(db, "", file.NewReplicaClient(t.TempDir())) - r.MonitorEnabled = false - db.Replicas = []*litestream.Replica{r} - - if err := r.Sync(context.Background()); err == nil || err.Error() != `no generation, waiting for data` { - t.Fatal(err) - } - }) -} -*/ diff --git a/internal/internal.go b/internal/internal.go index be5027f9..26d55aae 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -39,6 +39,39 @@ func (r *ReadCloser) Close() error { return r.c.Close() } +// MultiReadCloser is a logical concatenation of io.ReadCloser. +// It works like io.MultiReader except all objects are closed when Close() is called. +type MultiReadCloser struct { + mr io.Reader + closers []io.Closer +} + +// NewMultiReadCloser returns a new instance of MultiReadCloser. +func NewMultiReadCloser(a []io.ReadCloser) *MultiReadCloser { + readers := make([]io.Reader, len(a)) + closers := make([]io.Closer, len(a)) + for i, rc := range a { + readers[i] = rc + closers[i] = rc + } + return &MultiReadCloser{mr: io.MultiReader(readers...), closers: closers} +} + +// Read reads from the next available reader. +func (mrc *MultiReadCloser) Read(p []byte) (n int, err error) { + return mrc.mr.Read(p) +} + +// Close closes all underlying ReadClosers and returns first error encountered. +func (mrc *MultiReadCloser) Close() (err error) { + for _, c := range mrc.closers { + if e := c.Close(); e != nil && err == nil { + err = e + } + } + return err +} + // ReadCounter wraps an io.Reader and counts the total number of bytes read. type ReadCounter struct { r io.Reader diff --git a/litestream.go b/litestream.go index bd0477c5..a6db5429 100644 --- a/litestream.go +++ b/litestream.go @@ -40,6 +40,14 @@ var ( ErrChecksumMismatch = errors.New("invalid replica, checksum mismatch") ) +var ( + // LogWriter is the destination writer for all logging. + LogWriter = os.Stderr + + // LogFlags are the flags passed to log.New(). + LogFlags = 0 +) + // SnapshotIterator represents an iterator over a collection of snapshot metadata. type SnapshotIterator interface { io.Closer @@ -291,6 +299,26 @@ func (p Pos) Truncate() Pos { return Pos{Generation: p.Generation, Index: p.Index} } +// ComparePos returns -1 if a is less than b, 1 if a is greater than b, and +// returns 0 if a and b are equal. Only index & offset are compared. +// Returns an error if generations are not equal. +func ComparePos(a, b Pos) (int, error) { + if a.Generation != b.Generation { + return 0, fmt.Errorf("generation mismatch") + } + + if a.Index < b.Index { + return -1, nil + } else if a.Index > b.Index { + return 1, nil + } else if a.Offset < b.Offset { + return -1, nil + } else if a.Offset > b.Offset { + return 1, nil + } + return 0, nil +} + // Checksum computes a running SQLite checksum over a byte slice. func Checksum(bo binary.ByteOrder, s0, s1 uint32, b []byte) (uint32, uint32) { assert(len(b)%8 == 0, "misaligned checksum byte slice") diff --git a/replica.go b/replica.go index 0cfc21dc..0cb9a6a4 100644 --- a/replica.go +++ b/replica.go @@ -2,7 +2,6 @@ package litestream import ( "context" - "encoding/binary" "fmt" "hash/crc64" "io" @@ -67,6 +66,8 @@ type Replica struct { // If true, replica monitors database for changes automatically. // Set to false if replica is being used synchronously (such as in tests). MonitorEnabled bool + + Logger *log.Logger } func NewReplica(db *DB, name string) *Replica { @@ -81,6 +82,12 @@ func NewReplica(db *DB, name string) *Replica { MonitorEnabled: true, } + prefix := fmt.Sprintf("%s: ", r.Name()) + if db != nil { + prefix = fmt.Sprintf("%s(%s): ", db.Path(), r.Name()) + } + r.Logger = log.New(LogWriter, prefix, LogFlags) + return r } @@ -149,16 +156,12 @@ func (r *Replica) Sync(ctx context.Context) (err error) { }() // Find current position of database. - dpos, err := r.db.Pos() - if err != nil { - return fmt.Errorf("cannot determine current generation: %w", err) - } else if dpos.IsZero() { + dpos := r.db.Pos() + if dpos.IsZero() { return fmt.Errorf("no generation, waiting for data") } generation := dpos.Generation - Tracef("%s(%s): replica sync: db.pos=%s", r.db.Path(), r.Name(), dpos) - // Create snapshot if no snapshots exist for generation. snapshotN, err := r.snapshotN(generation) if err != nil { @@ -180,117 +183,140 @@ func (r *Replica) Sync(ctx context.Context) (err error) { return fmt.Errorf("cannot determine replica position: %s", err) } - Tracef("%s(%s): replica sync: calc new pos: %s", r.db.Path(), r.Name(), pos) r.mu.Lock() r.pos = pos r.mu.Unlock() } // Read all WAL files since the last position. - for { - if err = r.syncWAL(ctx); err == io.EOF { - break - } else if err != nil { - return err - } + if err = r.syncWAL(ctx); err != nil { + return err } return nil } func (r *Replica) syncWAL(ctx context.Context) (err error) { - rd, err := r.db.ShadowWALReader(r.Pos()) - if err == io.EOF { + pos := r.Pos() + + itr, err := r.db.WALSegments(ctx, pos.Generation) + if err != nil { return err - } else if err != nil { - return fmt.Errorf("replica wal reader: %w", err) } - defer rd.Close() + defer itr.Close() + + // Group segments by index. + var segments [][]WALSegmentInfo + for itr.Next() { + info := itr.WALSegment() + if cmp, err := ComparePos(pos, info.Pos()); err != nil { + return fmt.Errorf("compare pos: %w", err) + } else if cmp == 1 { + continue // already processed, skip + } + + // Start a new chunk if index has changed. + if len(segments) == 0 || segments[len(segments)-1][0].Index != info.Index { + segments = append(segments, []WALSegmentInfo{info}) + continue + } + + // Add segment to the end of the current index, if matching. + segments[len(segments)-1] = append(segments[len(segments)-1], info) + } + + // Write out segments to replica by index so they can be combined. + for i := range segments { + if err := r.writeIndexSegments(ctx, segments[i]); err != nil { + return fmt.Errorf("write index segments: index=%d err=%w", segments[i][0].Index, err) + } + } + + return nil +} + +func (r *Replica) writeIndexSegments(ctx context.Context, segments []WALSegmentInfo) (err error) { + assert(len(segments) > 0, "segments required for replication") + + // First segment position must be equal to last replica position or + // the start of the next index. + if pos := r.Pos(); pos != segments[0].Pos() { + nextIndexPos := pos.Truncate() + nextIndexPos.Index++ + if nextIndexPos != segments[0].Pos() { + return fmt.Errorf("replica skipped position: replica=%s initial=%s", pos, segments[0].Pos()) + } + } + + pos := segments[0].Pos() + initialPos := pos // Copy shadow WAL to client write via io.Pipe(). pr, pw := io.Pipe() defer func() { _ = pw.CloseWithError(err) }() - // Obtain initial position from shadow reader. - // It may have moved to the next index if previous position was at the end. - pos := rd.Pos() - // Copy through pipe into client from the starting position. var g errgroup.Group g.Go(func() error { - _, err := r.Client.WriteWALSegment(ctx, pos, pr) + _, err := r.Client.WriteWALSegment(ctx, initialPos, pr) return err }) // Wrap writer to LZ4 compress. zw := lz4.NewWriter(pw) - // Track total WAL bytes written to replica client. - walBytesCounter := replicaWALBytesCounterVec.WithLabelValues(r.db.Path(), r.Name()) - - // Copy header if at offset zero. - var psalt uint64 // previous salt value - if pos := rd.Pos(); pos.Offset == 0 { - buf := make([]byte, WALHeaderSize) - if _, err := io.ReadFull(rd, buf); err != nil { - return err - } - - psalt = binary.BigEndian.Uint64(buf[16:24]) - - n, err := zw.Write(buf) - if err != nil { - return err - } - walBytesCounter.Add(float64(n)) - } + // Write each segment out to the replica. + for _, info := range segments { + if err := func() error { + // Ensure segments are in order and no bytes are skipped. + if pos != info.Pos() { + return fmt.Errorf("non-contiguous segment: expected=%s current=%s", pos, info.Pos()) + } - // Copy frames. - for { - pos := rd.Pos() - assert(pos.Offset == frameAlign(pos.Offset, r.db.pageSize), "shadow wal reader not frame aligned") + rc, err := r.db.WALSegmentReader(ctx, info.Pos()) + if err != nil { + return err + } + defer rc.Close() - buf := make([]byte, WALFrameHeaderSize+r.db.pageSize) - if _, err := io.ReadFull(rd, buf); err == io.EOF { - break - } else if err != nil { - return err - } + n, err := io.Copy(zw, lz4.NewReader(rc)) + if err != nil { + return err + } else if err := rc.Close(); err != nil { + return err + } - // Verify salt matches the previous frame/header read. - salt := binary.BigEndian.Uint64(buf[8:16]) - if psalt != 0 && psalt != salt { - return fmt.Errorf("replica salt mismatch: %s", pos.String()) - } - psalt = salt + // Track last position written. + pos = info.Pos() + pos.Offset += n - n, err := zw.Write(buf) - if err != nil { - return err + return nil + }(); err != nil { + return fmt.Errorf("wal segment: pos=%s err=%w", info.Pos(), err) } - walBytesCounter.Add(float64(n)) } - // Flush LZ4 writer and close pipe. + // Flush LZ4 writer, close pipe, and wait for write to finish. if err := zw.Close(); err != nil { return err } else if err := pw.Close(); err != nil { return err - } - - // Wait for client to finish write. - if err := g.Wait(); err != nil { - return fmt.Errorf("client write: %w", err) + } else if err := g.Wait(); err != nil { + return err } // Save last replicated position. r.mu.Lock() - r.pos = rd.Pos() + r.pos = pos r.mu.Unlock() - // Track current position - replicaWALIndexGaugeVec.WithLabelValues(r.db.Path(), r.Name()).Set(float64(rd.Pos().Index)) - replicaWALOffsetGaugeVec.WithLabelValues(r.db.Path(), r.Name()).Set(float64(rd.Pos().Offset)) + replicaWALBytesCounterVec.WithLabelValues(r.db.Path(), r.Name()).Add(float64(pos.Offset - initialPos.Offset)) + + // Track total WAL bytes written to replica client. + replicaWALIndexGaugeVec.WithLabelValues(r.db.Path(), r.Name()).Set(float64(pos.Index)) + replicaWALOffsetGaugeVec.WithLabelValues(r.db.Path(), r.Name()).Set(float64(pos.Offset)) + + r.Logger.Printf("wal segment written: %s sz=%d", initialPos, pos.Offset-initialPos.Offset) return nil } @@ -448,10 +474,8 @@ func (r *Replica) Snapshot(ctx context.Context) (info SnapshotInfo, err error) { defer func() { _ = tx.Rollback() }() // Obtain current position. - pos, err := r.db.Pos() - if err != nil { - return info, fmt.Errorf("cannot determine db position: %w", err) - } else if pos.IsZero() { + pos := r.db.Pos() + if pos.IsZero() { return info, ErrNoGeneration } @@ -491,7 +515,7 @@ func (r *Replica) Snapshot(ctx context.Context) (info SnapshotInfo, err error) { return info, err } - log.Printf("%s(%s): snapshot written %s/%08x", r.db.Path(), r.Name(), pos.Generation, pos.Index) + r.Logger.Printf("snapshot written %s/%08x", pos.Generation, pos.Index) return info, nil } @@ -559,7 +583,7 @@ func (r *Replica) deleteSnapshotsBeforeIndex(ctx context.Context, generation str if err := r.Client.DeleteSnapshot(ctx, info.Generation, info.Index); err != nil { return fmt.Errorf("delete snapshot %s/%08x: %w", info.Generation, info.Index, err) } - log.Printf("%s(%s): snapshot deleted %s/%08x", r.db.Path(), r.Name(), generation, index) + r.Logger.Printf("snapshot deleted %s/%08x", generation, index) } return itr.Close() @@ -591,7 +615,10 @@ func (r *Replica) deleteWALSegmentsBeforeIndex(ctx context.Context, generation s if err := r.Client.DeleteWALSegments(ctx, a); err != nil { return fmt.Errorf("delete wal segments: %w", err) } - log.Printf("%s(%s): wal segmented deleted before %s/%08x: n=%d", r.db.Path(), r.Name(), generation, index, len(a)) + + for _, pos := range a { + r.Logger.Printf("wal segmented deleted: %s", pos) + } return nil } @@ -628,7 +655,7 @@ func (r *Replica) monitor(ctx context.Context) { // Synchronize the shadow wal into the replication directory. if err := r.Sync(ctx); err != nil { - log.Printf("%s(%s): monitor error: %s", r.db.Path(), r.Name(), err) + r.Logger.Printf("monitor error: %s", err) continue } } @@ -656,7 +683,7 @@ func (r *Replica) retainer(ctx context.Context) { return case <-ticker.C: if err := r.EnforceRetention(ctx); err != nil { - log.Printf("%s(%s): retainer error: %s", r.db.Path(), r.Name(), err) + r.Logger.Printf("retainer error: %s", err) continue } } @@ -678,7 +705,7 @@ func (r *Replica) snapshotter(ctx context.Context) { return case <-ticker.C: if _, err := r.Snapshot(ctx); err != nil && err != ErrNoGeneration { - log.Printf("%s(%s): snapshotter error: %s", r.db.Path(), r.Name(), err) + r.Logger.Printf("snapshotter error: %s", err) continue } } @@ -706,7 +733,7 @@ func (r *Replica) validator(ctx context.Context) { return case <-ticker.C: if err := r.Validate(ctx); err != nil { - log.Printf("%s(%s): validation error: %s", r.db.Path(), r.Name(), err) + r.Logger.Printf("validation error: %s", err) continue } } @@ -768,7 +795,7 @@ func (r *Replica) Validate(ctx context.Context) error { if mismatch { status = "mismatch" } - log.Printf("%s(%s): validator: status=%s db=%016x replica=%016x pos=%s", db.Path(), r.Name(), status, chksum0, chksum1, pos) + r.Logger.Printf("validator: status=%s db=%016x replica=%016x pos=%s", status, chksum0, chksum1, pos) // Validate checksums match. if mismatch { @@ -786,8 +813,6 @@ func (r *Replica) Validate(ctx context.Context) error { // waitForReplica blocks until replica reaches at least the given position. func (r *Replica) waitForReplica(ctx context.Context, pos Pos) error { - db := r.DB() - ticker := time.NewTicker(500 * time.Millisecond) defer ticker.Stop() @@ -810,7 +835,7 @@ func (r *Replica) waitForReplica(ctx context.Context, pos Pos) error { // Obtain current position of replica, check if past target position. curr := r.Pos() if curr.IsZero() { - log.Printf("%s(%s): validator: no replica position available", db.Path(), r.Name()) + r.Logger.Printf("validator: no replica position available") continue } diff --git a/replica_test.go b/replica_test.go index 7f42c08a..1a64cc0d 100644 --- a/replica_test.go +++ b/replica_test.go @@ -43,10 +43,7 @@ func TestReplica_Sync(t *testing.T) { } // Fetch current database position. - dpos, err := db.Pos() - if err != nil { - t.Fatal(err) - } + dpos := db.Pos() c := file.NewReplicaClient(t.TempDir()) r := litestream.NewReplica(db, "") @@ -69,11 +66,11 @@ func TestReplica_Sync(t *testing.T) { // Verify WAL matches replica WAL. if b0, err := os.ReadFile(db.Path() + "-wal"); err != nil { t.Fatal(err) - } else if r, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: generations[0], Index: 0, Offset: 0}); err != nil { + } else if r0, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: generations[0], Index: 0, Offset: 0}); err != nil { t.Fatal(err) - } else if b1, err := io.ReadAll(lz4.NewReader(r)); err != nil { + } else if b1, err := io.ReadAll(lz4.NewReader(r0)); err != nil { t.Fatal(err) - } else if err := r.Close(); err != nil { + } else if err := r0.Close(); err != nil { t.Fatal(err) } else if !bytes.Equal(b0, b1) { t.Fatalf("wal mismatch: len(%d), len(%d)", len(b0), len(b1)) @@ -98,10 +95,8 @@ func TestReplica_Snapshot(t *testing.T) { } // Fetch current database position & snapshot. - pos0, err := db.Pos() - if err != nil { - t.Fatal(err) - } else if info, err := r.Snapshot(context.Background()); err != nil { + pos0 := db.Pos() + if info, err := r.Snapshot(context.Background()); err != nil { t.Fatal(err) } else if got, want := info.Pos(), pos0.Truncate(); got != want { t.Fatalf("pos=%s, want %s", got, want) @@ -122,10 +117,8 @@ func TestReplica_Snapshot(t *testing.T) { } // Fetch current database position & snapshot. - pos1, err := db.Pos() - if err != nil { - t.Fatal(err) - } else if info, err := r.Snapshot(context.Background()); err != nil { + pos1 := db.Pos() + if info, err := r.Snapshot(context.Background()); err != nil { t.Fatal(err) } else if got, want := info.Pos(), pos1.Truncate(); got != want { t.Fatalf("pos=%v, want %v", got, want) From 6db06067b5e507db55c6bb93488983078f2e71aa Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Tue, 21 Sep 2021 15:31:11 -0600 Subject: [PATCH 03/95] README --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 1ce84798..bd44fb64 100644 --- a/README.md +++ b/README.md @@ -33,12 +33,12 @@ energy into the project to help make it better: - Thanks to [Cory LaNou](https://twitter.com/corylanou) for giving early feedback and testing when Litestream was still pre-release. - Thanks to [Michael Lynch](https://github.com/mtlynch) for digging into issues and contributing to the documentation. -- Thanks to [Kurt Mackey](https://twitter.com/mrkurt) for feedback and testing. Also, thanks to fly.io for providing testing resources. +- Thanks to [Kurt Mackey](https://twitter.com/mrkurt) for feedback and testing. - Thanks to [Sam Weston](https://twitter.com/cablespaghetti) for figuring out how to run Litestream on Kubernetes and writing up the docs for it. - Thanks to [Rafael](https://github.com/netstx) & [Jungle Boogie](https://github.com/jungle-boogie) for helping to get OpenBSD release builds working. - Thanks to [Simon Gottschlag](https://github.com/simongottschlag), [Marin](https://github.com/supermarin),[Victor Björklund](https://github.com/victorbjorklund), [Jonathan Beri](https://twitter.com/beriberikix) [Yuri](https://github.com/yurivish), [Nathan Probst](https://github.com/nprbst), [Yann Coleuu](https://github.com/yanc0), and [Nicholas Grilly](https://twitter.com/ngrilly) for frequent feedback, testing, & support. - +Huge thanks to fly.io for their support and for contributing credits for testing and development! ## Open-source, not open-contribution From aa2c684c81e50609a49a1645e94ae7df6964e587 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Sat, 2 Oct 2021 09:21:24 -0600 Subject: [PATCH 04/95] Update contribution policy --- README.md | 29 +++++++++++------------------ 1 file changed, 11 insertions(+), 18 deletions(-) diff --git a/README.md b/README.md index bd44fb64..c25e978f 100644 --- a/README.md +++ b/README.md @@ -41,27 +41,20 @@ energy into the project to help make it better: Huge thanks to fly.io for their support and for contributing credits for testing and development! -## Open-source, not open-contribution +## Contribution Policy -[Similar to SQLite](https://www.sqlite.org/copyright.html), Litestream is open -source but closed to code contributions. This keeps the code base free of -proprietary or licensed code but it also helps me continue to maintain and build -Litestream. +Initially, Litestream was closed to outside contributions. The goal was to +reduce burnout by limiting the maintenance overhead of reviewing and validating +third-party code. However, this policy is overly broad and has prevented small, +easily testable patches from being contributed. -As the author of [BoltDB](https://github.com/boltdb/bolt), I found that -accepting and maintaining third party patches contributed to my burn out and -I eventually archived the project. Writing databases & low-level replication -tools involves nuance and simple one line changes can have profound and -unexpected changes in correctness and performance. Small contributions -typically required hours of my time to properly test and validate them. +Litestream is now open to code contributions for bug fixes only. Features +carry a long-term maintenance burden so they will not be accepted at this +time. Please [submit an issue][new-issue] if you have a feature you'd like to request. -I am grateful for community involvement, bug reports, & feature requests. I do -not wish to come off as anything but welcoming, however, I've -made the decision to keep this project closed to contributions for my own -mental health and long term viability of the project. +If you find mistakes in the documentation, please submit a fix to the +[documentation repository][docs]. -The [documentation repository][docs] is MIT licensed and pull requests are welcome there. - -[releases]: https://github.com/benbjohnson/litestream/releases +[new-issue]: https://github.com/benbjohnson/litestream/issues/new [docs]: https://github.com/benbjohnson/litestream.io From cb33d8c6a91feb42e25ac1db69fb2946b17fe62b Mon Sep 17 00:00:00 2001 From: Colin Arnott Date: Sun, 3 Oct 2021 20:08:16 +0000 Subject: [PATCH 05/95] Replica.Restore fallback to DB.path Per the godoc on Replica.Restore and RestoreOptions.OutputPath, Replica.db.path should be used when RestoreOptions.OutputPath is empty. Fixes #233 --- replica.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/replica.go b/replica.go index 0cb9a6a4..138851f8 100644 --- a/replica.go +++ b/replica.go @@ -980,7 +980,10 @@ func (r *Replica) CalcRestoreTarget(ctx context.Context, opt RestoreOptions) (ge func (r *Replica) Restore(ctx context.Context, opt RestoreOptions) (err error) { // Validate options. if opt.OutputPath == "" { - return fmt.Errorf("output path required") + if r.db.path == "" { + return fmt.Errorf("output path required") + } + opt.OutputPath = r.db.path } else if opt.Generation == "" && opt.Index != math.MaxInt32 { return fmt.Errorf("must specify generation when restoring to index") } else if opt.Index != math.MaxInt32 && !opt.Timestamp.IsZero() { From 755f54f4d9b51885076f7aa31334389056361ea7 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Sun, 10 Oct 2021 08:35:29 -0600 Subject: [PATCH 06/95] Update CONTRIBUTING & remove pull request template --- .github/CONTRIBUTING.md | 29 +++++++++++++++-------------- .github/pull_request_template.md | 7 ------- README.md | 7 ++++--- 3 files changed, 19 insertions(+), 24 deletions(-) delete mode 100644 .github/pull_request_template.md diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 917b04a7..b1cf4525 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -1,17 +1,18 @@ -## Open-source, not open-contribution +## Contribution Policy -[Similar to SQLite](https://www.sqlite.org/copyright.html), Litestream is open -source but closed to contributions. This keeps the code base free of proprietary -or licensed code but it also helps me continue to maintain and build Litestream. +Initially, Litestream was closed to outside contributions. The goal was to +reduce burnout by limiting the maintenance overhead of reviewing and validating +third-party code. However, this policy is overly broad and has prevented small, +easily testable patches from being contributed. -As the author of [BoltDB](https://github.com/boltdb/bolt), I found that -accepting and maintaining third party patches contributed to my burn out and -I eventually archived the project. Writing databases & low-level replication -tools involves nuance and simple one line changes can have profound and -unexpected changes in correctness and performance. Small contributions -typically required hours of my time to properly test and validate them. +Litestream is now open to code contributions for bug fixes only. Features carry +a long-term maintenance burden so they will not be accepted at this time. +Please [submit an issue][new-issue] if you have a feature you'd like to +request. + +If you find mistakes in the documentation, please submit a fix to the +[documentation repository][docs]. + +[new-issue]: https://github.com/benbjohnson/litestream/issues/new +[docs]: https://github.com/benbjohnson/litestream.io -I am grateful for community involvement, bug reports, & feature requests. I do -not wish to come off as anything but welcoming, however, I've -made the decision to keep this project closed to contributions for my own -mental health and long term viability of the project. diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md deleted file mode 100644 index fe28e01f..00000000 --- a/.github/pull_request_template.md +++ /dev/null @@ -1,7 +0,0 @@ -Litestream is not accepting code contributions at this time. You can find a summary of why on the project's GitHub README: - -https://github.com/benbjohnson/litestream#open-source-not-open-contribution - -Web site & Documentation changes, however, are welcome. You can find that repository here: - -https://github.com/benbjohnson/litestream.io diff --git a/README.md b/README.md index c25e978f..68ac73c4 100644 --- a/README.md +++ b/README.md @@ -48,9 +48,10 @@ reduce burnout by limiting the maintenance overhead of reviewing and validating third-party code. However, this policy is overly broad and has prevented small, easily testable patches from being contributed. -Litestream is now open to code contributions for bug fixes only. Features -carry a long-term maintenance burden so they will not be accepted at this -time. Please [submit an issue][new-issue] if you have a feature you'd like to request. +Litestream is now open to code contributions for bug fixes only. Features carry +a long-term maintenance burden so they will not be accepted at this time. +Please [submit an issue][new-issue] if you have a feature you'd like to +request. If you find mistakes in the documentation, please submit a fix to the [documentation repository][docs]. From 61c80cbfc2d158adb631c3935e640aa0fe822b18 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Sun, 5 Dec 2021 08:44:19 -0700 Subject: [PATCH 07/95] README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 68ac73c4..06856c31 100644 --- a/README.md +++ b/README.md @@ -36,7 +36,7 @@ energy into the project to help make it better: - Thanks to [Kurt Mackey](https://twitter.com/mrkurt) for feedback and testing. - Thanks to [Sam Weston](https://twitter.com/cablespaghetti) for figuring out how to run Litestream on Kubernetes and writing up the docs for it. - Thanks to [Rafael](https://github.com/netstx) & [Jungle Boogie](https://github.com/jungle-boogie) for helping to get OpenBSD release builds working. -- Thanks to [Simon Gottschlag](https://github.com/simongottschlag), [Marin](https://github.com/supermarin),[Victor Björklund](https://github.com/victorbjorklund), [Jonathan Beri](https://twitter.com/beriberikix) [Yuri](https://github.com/yurivish), [Nathan Probst](https://github.com/nprbst), [Yann Coleuu](https://github.com/yanc0), and [Nicholas Grilly](https://twitter.com/ngrilly) for frequent feedback, testing, & support. +- Thanks to [Simon Gottschlag](https://github.com/simongottschlag), [Marin](https://github.com/supermarin),[Victor Björklund](https://github.com/victorbjorklund), [Jonathan Beri](https://twitter.com/beriberikix) [Yuri](https://github.com/yurivish), [Nathan Probst](https://github.com/nprbst), [Yann Coleu](https://github.com/yanc0), and [Nicholas Grilly](https://twitter.com/ngrilly) for frequent feedback, testing, & support. Huge thanks to fly.io for their support and for contributing credits for testing and development! From d09f4ef61800492c2ba258b9ffc21e5826ea32a1 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Wed, 8 Dec 2021 18:28:03 -0700 Subject: [PATCH 08/95] Fix FindMinSnapshotByGeneration() loop ref bug This commit fixes an issue where the reference is taken on the loop variable rather than the slice element when computing the minimum snapshot within a generation so it can cause the wrong snapshot to be chosen. --- litestream.go | 6 ++++-- litestream_test.go | 10 ++++++++++ replica.go | 4 +++- 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/litestream.go b/litestream.go index a6db5429..46eb0338 100644 --- a/litestream.go +++ b/litestream.go @@ -213,11 +213,13 @@ func FilterSnapshotsAfter(a []SnapshotInfo, t time.Time) []SnapshotInfo { // FindMinSnapshotByGeneration finds the snapshot with the lowest index in a generation. func FindMinSnapshotByGeneration(a []SnapshotInfo, generation string) *SnapshotInfo { var min *SnapshotInfo - for _, snapshot := range a { + for i := range a { + snapshot := &a[i] + if snapshot.Generation != generation { continue } else if min == nil || snapshot.Index < min.Index { - min = &snapshot + min = snapshot } } return min diff --git a/litestream_test.go b/litestream_test.go index a03a7489..93327df3 100644 --- a/litestream_test.go +++ b/litestream_test.go @@ -40,6 +40,16 @@ func TestChecksum(t *testing.T) { }) } +func TestFindMinSnapshotByGeneration(t *testing.T) { + infos := []litestream.SnapshotInfo{ + {Generation: "29cf4bced74e92ab", Index: 0}, + {Generation: "5dfeb4aa03232553", Index: 24}, + } + if got, want := litestream.FindMinSnapshotByGeneration(infos, "29cf4bced74e92ab"), &infos[0]; got != want { + t.Fatalf("info=%#v, want %#v", got, want) + } +} + func MustDecodeHexString(s string) []byte { b, err := hex.DecodeString(s) if err != nil { diff --git a/replica.go b/replica.go index 138851f8..c6d0f3f4 100644 --- a/replica.go +++ b/replica.go @@ -266,7 +266,9 @@ func (r *Replica) writeIndexSegments(ctx context.Context, segments []WALSegmentI zw := lz4.NewWriter(pw) // Write each segment out to the replica. - for _, info := range segments { + for i := range segments { + info := &segments[i] + if err := func() error { // Ensure segments are in order and no bytes are skipped. if pos != info.Pos() { From ba6e13b5d09a056786f62b99061312c71d414906 Mon Sep 17 00:00:00 2001 From: Michael Lynch Date: Sun, 28 Nov 2021 11:19:50 -0500 Subject: [PATCH 09/95] Sort output of snapshots in descending timestamp order By default, the snapshots command seems to output in alphabetical order of hash, which isn't meaningful, as far as I can tell. This change modifies the order of the command output so that ./litestream snapshots returns snapshots from newest to oldest. --- cmd/litestream/snapshots.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cmd/litestream/snapshots.go b/cmd/litestream/snapshots.go index 72e67a5a..574ec640 100644 --- a/cmd/litestream/snapshots.go +++ b/cmd/litestream/snapshots.go @@ -6,6 +6,7 @@ import ( "fmt" "log" "os" + "sort" "text/tabwriter" "time" @@ -85,6 +86,8 @@ func (c *SnapshotsCommand) Run(ctx context.Context, args []string) (err error) { log.Printf("cannot determine snapshots: %s", err) continue } + // Sort snapshots by creation time from newest to oldest. + sort.Slice(infos, func(i, j int) bool { return infos[i].CreatedAt.After(infos[j].CreatedAt) }) for _, info := range infos { fmt.Fprintf(w, "%s\t%s\t%d\t%d\t%s\n", r.Name(), From 531e19ed6faea423162978e12afaaebbe0bb087e Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Sun, 12 Dec 2021 10:25:20 -0700 Subject: [PATCH 10/95] Refactor checksum calculation; improve test coverage --- db.go | 95 +++++++++++++++++++----------------- db_test.go | 63 ++++++++++++++++++++++++ testdata/read-wal-fields/ok | Bin 0 -> 12392 bytes 3 files changed, 114 insertions(+), 44 deletions(-) create mode 100644 testdata/read-wal-fields/ok diff --git a/db.go b/db.go index 2a421ad6..8c224cc7 100644 --- a/db.go +++ b/db.go @@ -300,58 +300,20 @@ func (db *DB) invalidateChecksum(ctx context.Context) error { assert(!db.pos.IsZero(), "position required to invalidate checksum") // Read entire WAL from combined segments. - walReader, err := db.WALReader(ctx, db.pos.Generation, db.pos.Index) + rc, err := db.WALReader(ctx, db.pos.Generation, db.pos.Index) if err != nil { return fmt.Errorf("cannot read last wal: %w", err) } - defer walReader.Close() + defer func() { _ = rc.Close() }() // Ensure we don't read past our position. - r := &io.LimitedReader{R: walReader, N: db.pos.Offset} + r := &io.LimitedReader{R: rc, N: db.pos.Offset} - // Read header. - hdr := make([]byte, WALHeaderSize) - if _, err := io.ReadFull(r, hdr); err != nil { - return fmt.Errorf("read shadow wal header: %w", err) - } - - // Read byte order. - byteOrder, err := headerByteOrder(hdr) + // Determine cache values from the current WAL file. + db.salt0, db.salt1, db.chksum0, db.chksum1, db.byteOrder, db.frame, err = ReadWALFields(r, db.pageSize) if err != nil { - return err + return fmt.Errorf("calc checksum: %w", err) } - - // Save salt & checksum to cache, although checksum may be overridden later. - db.salt0 = binary.BigEndian.Uint32(hdr[16:]) - db.salt1 = binary.BigEndian.Uint32(hdr[20:]) - db.chksum0 = binary.BigEndian.Uint32(hdr[24:]) - db.chksum1 = binary.BigEndian.Uint32(hdr[28:]) - db.byteOrder = byteOrder - - // Iterate over each page in the WAL and save the checksum. - frame := make([]byte, db.pageSize+WALFrameHeaderSize) - var hasFrame bool - for { - // Read next page from WAL file. - if _, err := io.ReadFull(r, frame); err == io.EOF { - break // end of WAL file - } else if err != nil { - return fmt.Errorf("read wal: %w", err) - } - - // Save frame checksum to cache. - hasFrame = true - db.chksum0 = binary.BigEndian.Uint32(frame[16:]) - db.chksum1 = binary.BigEndian.Uint32(frame[20:]) - } - - // Save last frame to cache. - if hasFrame { - db.frame = frame - } else { - db.frame = nil - } - return nil } @@ -1739,6 +1701,51 @@ func NewRestoreOptions() RestoreOptions { } } +// ReadWALFields iterates over the header & frames in the WAL data in r. +// Returns salt, checksum, byte order & the last frame. WAL data must start +// from the beginning of the WAL header and must end on either the WAL header +// or at the end of a WAL frame. +func ReadWALFields(r io.Reader, pageSize int) (salt0, salt1, chksum0, chksum1 uint32, byteOrder binary.ByteOrder, frame []byte, err error) { + // Read header. + hdr := make([]byte, WALHeaderSize) + if _, err := io.ReadFull(r, hdr); err != nil { + return 0, 0, 0, 0, nil, nil, fmt.Errorf("short wal header: %w", err) + } + + // Save salt, initial checksum, & byte order. + salt0 = binary.BigEndian.Uint32(hdr[16:]) + salt1 = binary.BigEndian.Uint32(hdr[20:]) + chksum0 = binary.BigEndian.Uint32(hdr[24:]) + chksum1 = binary.BigEndian.Uint32(hdr[28:]) + if byteOrder, err = headerByteOrder(hdr); err != nil { + return 0, 0, 0, 0, nil, nil, err + } + + // Iterate over each page in the WAL and save the checksum. + frame = make([]byte, pageSize+WALFrameHeaderSize) + var hasFrame bool + for { + // Read next page from WAL file. + if n, err := io.ReadFull(r, frame); err == io.EOF { + break // end of WAL file + } else if err != nil { + return 0, 0, 0, 0, nil, nil, fmt.Errorf("short wal frame (n=%d): %w", n, err) + } + + // Update checksum on each successful frame. + hasFrame = true + chksum0 = binary.BigEndian.Uint32(frame[16:]) + chksum1 = binary.BigEndian.Uint32(frame[20:]) + } + + // Clear frame if none were successfully read. + if !hasFrame { + frame = nil + } + + return salt0, salt1, chksum0, chksum1, byteOrder, frame, nil +} + // Database metrics. var ( dbSizeGaugeVec = promauto.NewGaugeVec(prometheus.GaugeOpts{ diff --git a/db_test.go b/db_test.go index 220f7e67..5c3f51c1 100644 --- a/db_test.go +++ b/db_test.go @@ -1,8 +1,10 @@ package litestream_test import ( + "bytes" "context" "database/sql" + "encoding/binary" "os" "path/filepath" "strings" @@ -473,6 +475,67 @@ func TestDB_Sync(t *testing.T) { }) } +func TestReadWALFields(t *testing.T) { + b, err := os.ReadFile("testdata/read-wal-fields/ok") + if err != nil { + t.Fatal(err) + } + + t.Run("OK", func(t *testing.T) { + if salt0, salt1, chksum0, chksum1, byteOrder, frame, err := litestream.ReadWALFields(bytes.NewReader(b), 4096); err != nil { + t.Fatal(err) + } else if got, want := salt0, uint32(0x4F7598FD); got != want { + t.Fatalf("salt0=%x, want %x", got, want) + } else if got, want := salt1, uint32(0x875FFD5B); got != want { + t.Fatalf("salt1=%x, want %x", got, want) + } else if got, want := chksum0, uint32(0x2081CAF7); got != want { + t.Fatalf("chksum0=%x, want %x", got, want) + } else if got, want := chksum1, uint32(0x31093CD3); got != want { + t.Fatalf("chksum1=%x, want %x", got, want) + } else if got, want := byteOrder, binary.LittleEndian; got != want { + t.Fatalf("chksum1=%x, want %x", got, want) + } else if !bytes.Equal(frame, b[8272:]) { + t.Fatal("last frame mismatch") + } + }) + + t.Run("HeaderOnly", func(t *testing.T) { + if salt0, salt1, chksum0, chksum1, byteOrder, frame, err := litestream.ReadWALFields(bytes.NewReader(b[:32]), 4096); err != nil { + t.Fatal(err) + } else if got, want := salt0, uint32(0x4F7598FD); got != want { + t.Fatalf("salt0=%x, want %x", got, want) + } else if got, want := salt1, uint32(0x875FFD5B); got != want { + t.Fatalf("salt1=%x, want %x", got, want) + } else if got, want := chksum0, uint32(0xD27F7862); got != want { + t.Fatalf("chksum0=%x, want %x", got, want) + } else if got, want := chksum1, uint32(0xE664AF8E); got != want { + t.Fatalf("chksum1=%x, want %x", got, want) + } else if got, want := byteOrder, binary.LittleEndian; got != want { + t.Fatalf("chksum1=%x, want %x", got, want) + } else if frame != nil { + t.Fatal("expected no frame") + } + }) + + t.Run("ErrShortHeader", func(t *testing.T) { + if _, _, _, _, _, _, err := litestream.ReadWALFields(bytes.NewReader([]byte{}), 4096); err == nil || err.Error() != `short wal header: EOF` { + t.Fatal(err) + } + }) + + t.Run("ErrBadMagic", func(t *testing.T) { + if _, _, _, _, _, _, err := litestream.ReadWALFields(bytes.NewReader(make([]byte, 32)), 4096); err == nil || err.Error() != `invalid wal header magic: 0` { + t.Fatal(err) + } + }) + + t.Run("ErrShortFrame", func(t *testing.T) { + if _, _, _, _, _, _, err := litestream.ReadWALFields(bytes.NewReader(b[:100]), 4096); err == nil || err.Error() != `short wal frame (n=68): unexpected EOF` { + t.Fatal(err) + } + }) +} + // MustOpenDBs returns a new instance of a DB & associated SQL DB. func MustOpenDBs(tb testing.TB) (*litestream.DB, *sql.DB) { tb.Helper() diff --git a/testdata/read-wal-fields/ok b/testdata/read-wal-fields/ok new file mode 100644 index 0000000000000000000000000000000000000000..e019bfe23076f72cea2fe23aa339a85beaf925b2 GIT binary patch literal 12392 zcmeI%F-yZh6u|Mjqf|PSxalIi(IPDr1s4aaWDp8s?I1Xm7ApvfASZMyb#Zfcau)48gcH8L(l^?AuwW@p^-#wdlOx zZZ*Cy-k;o*UYzfaSGGPogO}5j@|A7MxOdI$^l3hPekvYKFY$T{ z_x}xzD(pzsJ<-8Xxr0Ce0R#|0009ILKmY**5I_Kd#05mB*MdM7ZM|{bwALI}!csMq zYU!XHij~b)&V4f8=g<2EM;mXiXCH&e@7ZUqgv6_2K?D#$009ILKmY**5I_I{1Q1vn wfj@l#>GXbznZ2*+zx;^`CQDN%g&=?c0tg_000IagfB*srAb>!E0!Ew6FEVR6MgRZ+ literal 0 HcmV?d00001 From 3f0ec9fa9fff9af47bcb5d4be2b3047d01e90f97 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Tue, 4 Jan 2022 14:47:11 -0700 Subject: [PATCH 11/95] Refactor Restore() This commit refactors out the complexity of downloading ordered WAL files in parallel to a type called `WALDownloader`. This makes it easier to test the restore separately from the download. --- .github/workflows/test.yml | 18 +- Makefile | 7 +- cmd/litestream/databases.go | 13 +- cmd/litestream/generations.go | 17 +- cmd/litestream/main.go | 17 +- cmd/litestream/main_test.go | 3 +- cmd/litestream/replicate.go | 16 +- cmd/litestream/restore.go | 204 +++-- cmd/litestream/snapshots.go | 15 +- cmd/litestream/wal.go | 15 +- db.go | 118 +-- ...eplica_client.go => file_replica_client.go | 141 ++- ...ent_test.go => file_replica_client_test.go | 42 +- integration/replica_client_test.go | 566 ++++++++++++ internal/internal.go | 18 +- litestream.go | 15 + litestream_test.go | 56 +- mock/read_closer.go | 14 + mock/snapshot_iterator.go | 28 + mock/wal_segment_iterator.go | 28 + replica.go | 456 +--------- replica_client.go | 389 ++++++++ replica_client_test.go | 838 +++++++++--------- replica_test.go | 7 +- testdata/Makefile | 8 + .../no-generations/.gitignore | 0 testdata/find-latest-generation/ok/Makefile | 7 + .../snapshots/00000000.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/00000001.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/00000000.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/00000001.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/00000000.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/00000bb8.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/00000000.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/000003e8.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/00000bb8.snapshot.lz4 | Bin 0 -> 93 bytes .../0000000000000000/snapshots/.gitignore | 0 testdata/generation-time-bounds/ok/Makefile | 8 + .../snapshots/00000000.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/00000001.snapshot.lz4 | Bin 0 -> 93 bytes .../wal/00000000/00000000.wal.lz4 | Bin 0 -> 93 bytes .../wal/00000000/00000001.wal.lz4 | Bin 0 -> 93 bytes .../wal/00000001/00000000.wal.lz4 | Bin 0 -> 93 bytes .../snapshots-only/Makefile | 5 + .../snapshots/00000000.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/00000001.snapshot.lz4 | Bin 0 -> 93 bytes .../generations/0000000000000000/.gitignore | 0 .../snapshots/00000000.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/00000001.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/00000000.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/00000001.snapshot.lz4 | Bin 0 -> 93 bytes .../wal/00000000/00000000.wal.lz4 | Bin 0 -> 93 bytes .../wal/00000000/00001234.wal.lz4 | Bin 0 -> 93 bytes .../wal/00000001/00000000.wal.lz4 | Bin 0 -> 93 bytes .../wal/00000002/00000000.wal.lz4 | Bin 0 -> 93 bytes .../snapshots/00000000.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/00000001.snapshot.lz4 | Bin 0 -> 93 bytes .../wal/00000000/00000000.wal.lz4 | Bin 0 -> 93 bytes .../wal/00000000/00001234.wal.lz4 | Bin 0 -> 93 bytes .../generations/0000000000000000/.gitignore | 0 testdata/max-snapshot-index/ok/Makefile | 6 + .../snapshots/00000000.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/000003e8.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/000007d0.snapshot.lz4 | Bin 0 -> 93 bytes .../wal/00000000/00000000.wal.lz4 | Bin 0 -> 93 bytes .../wal/00000000/00001234.wal.lz4 | Bin 0 -> 93 bytes .../wal/00000001/00000000.wal.lz4 | Bin 0 -> 93 bytes .../replica-client-time-bounds/ok/Makefile | 6 + .../snapshots/00000000.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/00000000.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/00000001.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/00000000.snapshot.lz4 | Bin 0 -> 93 bytes testdata/restore/bad-permissions/00000000.db | Bin 0 -> 4096 bytes testdata/restore/bad-permissions/README | 36 + .../snapshots/00000000.snapshot.lz4 | Bin 0 -> 93 bytes testdata/restore/ok/00000002.db | Bin 0 -> 8192 bytes testdata/restore/ok/README | 36 + .../snapshots/00000000.snapshot.lz4 | Bin 0 -> 93 bytes .../wal/00000000/00000000.wal.lz4 | Bin 0 -> 249 bytes .../wal/00000000/00002050.wal.lz4 | Bin 0 -> 90 bytes .../wal/00000000/00003068.wal.lz4 | Bin 0 -> 94 bytes .../wal/00000001/00000000.wal.lz4 | Bin 0 -> 128 bytes .../wal/00000002/00000000.wal.lz4 | Bin 0 -> 125 bytes .../wal/00000002/00001038.wal.lz4 | Bin 0 -> 108 bytes testdata/restore/snapshot-only/00000000.db | Bin 0 -> 4096 bytes .../snapshots/00000000.snapshot.lz4 | Bin 0 -> 93 bytes .../0000000000000000/snapshots/.gitignore | 0 testdata/snapshot-time-bounds/ok/Makefile | 6 + .../snapshots/00000000.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/00000001.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/00000002.snapshot.lz4 | Bin 0 -> 93 bytes .../wal/00000000/00000000.wal.lz4 | Bin 0 -> 249 bytes .../wal/00000000/00000000.wal.lz4 | Bin 0 -> 249 bytes .../wal/00000000/00000000.wal.lz4 | Bin 0 -> 249 bytes .../wal/00000000/00000000.wal.lz4 | Bin 0 -> 249 bytes .../wal/00000000/00002050.wal.lz4 | Bin 0 -> 90 bytes .../wal/00000000/00003068.wal.lz4 | Bin 0 -> 94 bytes .../wal/00000001/00000000.wal.lz4 | Bin 0 -> 128 bytes .../wal/00000001/00000000.wal.lz4 | Bin 0 -> 128 bytes .../wal/00000002/00000000.wal.lz4 | Bin 0 -> 125 bytes .../wal/00000002/00001038.wal.lz4 | Bin 0 -> 108 bytes .../wal/00000000/00000000.wal.lz4 | Bin 0 -> 249 bytes .../wal/00000000/00002050.wal.lz4 | Bin 0 -> 90 bytes .../wal/00000000/00003068.wal.lz4 | Bin 0 -> 94 bytes .../wal/00000002/00000000.wal.lz4 | Bin 0 -> 125 bytes .../wal/00000002/00001038.wal.lz4 | Bin 0 -> 108 bytes .../wal/00000000/00000000.wal.lz4 | Bin 0 -> 249 bytes .../wal/00000000/00003068.wal.lz4 | Bin 0 -> 94 bytes testdata/wal-downloader/ok/00000000.wal | Bin 0 -> 16512 bytes testdata/wal-downloader/ok/00000001.wal | Bin 0 -> 4152 bytes testdata/wal-downloader/ok/00000002.wal | Bin 0 -> 8272 bytes testdata/wal-downloader/ok/README | 40 + .../wal/00000000/00000000.wal.lz4 | Bin 0 -> 249 bytes .../wal/00000000/00002050.wal.lz4 | Bin 0 -> 90 bytes .../wal/00000000/00003068.wal.lz4 | Bin 0 -> 94 bytes .../wal/00000001/00000000.wal.lz4 | Bin 0 -> 128 bytes .../wal/00000002/00000000.wal.lz4 | Bin 0 -> 125 bytes .../wal/00000002/00001038.wal.lz4 | Bin 0 -> 108 bytes testdata/wal-downloader/one/00000000.wal | Bin 0 -> 16512 bytes testdata/wal-downloader/one/README | 17 + .../wal/00000000/00000000.wal.lz4 | Bin 0 -> 249 bytes .../wal/00000000/00002050.wal.lz4 | Bin 0 -> 90 bytes .../wal/00000000/00003068.wal.lz4 | Bin 0 -> 94 bytes .../0000000000000000/wal/.gitignore | 0 testdata/wal-time-bounds/ok/Makefile | 6 + .../wal/00000000/00000000.wal.lz4 | Bin 0 -> 93 bytes .../wal/00000000/00000001.wal.lz4 | Bin 0 -> 93 bytes .../wal/00000001/00000000.wal.lz4 | Bin 0 -> 93 bytes wal_downloader.go | 335 +++++++ wal_downloader_test.go | 534 +++++++++++ 130 files changed, 2890 insertions(+), 1201 deletions(-) rename file/replica_client.go => file_replica_client.go (64%) rename file/replica_client_test.go => file_replica_client_test.go (54%) create mode 100644 integration/replica_client_test.go create mode 100644 mock/read_closer.go create mode 100644 mock/snapshot_iterator.go create mode 100644 mock/wal_segment_iterator.go create mode 100644 testdata/Makefile create mode 100644 testdata/find-latest-generation/no-generations/.gitignore create mode 100644 testdata/find-latest-generation/ok/Makefile create mode 100644 testdata/find-latest-generation/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 create mode 100644 testdata/find-latest-generation/ok/generations/0000000000000000/snapshots/00000001.snapshot.lz4 create mode 100644 testdata/find-latest-generation/ok/generations/0000000000000001/snapshots/00000000.snapshot.lz4 create mode 100644 testdata/find-latest-generation/ok/generations/0000000000000001/snapshots/00000001.snapshot.lz4 create mode 100644 testdata/find-latest-generation/ok/generations/0000000000000002/snapshots/00000000.snapshot.lz4 create mode 100644 testdata/find-snapshot-for-index/no-snapshots-before-index/generations/0000000000000000/snapshots/00000bb8.snapshot.lz4 create mode 100644 testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 create mode 100644 testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/000003e8.snapshot.lz4 create mode 100644 testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/00000bb8.snapshot.lz4 create mode 100644 testdata/generation-time-bounds/no-snapshots/generations/0000000000000000/snapshots/.gitignore create mode 100644 testdata/generation-time-bounds/ok/Makefile create mode 100644 testdata/generation-time-bounds/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 create mode 100644 testdata/generation-time-bounds/ok/generations/0000000000000000/snapshots/00000001.snapshot.lz4 create mode 100644 testdata/generation-time-bounds/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 create mode 100644 testdata/generation-time-bounds/ok/generations/0000000000000000/wal/00000000/00000001.wal.lz4 create mode 100644 testdata/generation-time-bounds/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 create mode 100644 testdata/generation-time-bounds/snapshots-only/Makefile create mode 100644 testdata/generation-time-bounds/snapshots-only/generations/0000000000000000/snapshots/00000000.snapshot.lz4 create mode 100644 testdata/generation-time-bounds/snapshots-only/generations/0000000000000000/snapshots/00000001.snapshot.lz4 create mode 100644 testdata/max-index/no-snapshots/generations/0000000000000000/.gitignore create mode 100644 testdata/max-index/no-wal/generations/0000000000000000/snapshots/00000000.snapshot.lz4 create mode 100644 testdata/max-index/no-wal/generations/0000000000000000/snapshots/00000001.snapshot.lz4 create mode 100644 testdata/max-index/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 create mode 100644 testdata/max-index/ok/generations/0000000000000000/snapshots/00000001.snapshot.lz4 create mode 100644 testdata/max-index/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 create mode 100644 testdata/max-index/ok/generations/0000000000000000/wal/00000000/00001234.wal.lz4 create mode 100644 testdata/max-index/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 create mode 100644 testdata/max-index/ok/generations/0000000000000000/wal/00000002/00000000.wal.lz4 create mode 100644 testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/snapshots/00000000.snapshot.lz4 create mode 100644 testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/snapshots/00000001.snapshot.lz4 create mode 100644 testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/wal/00000000/00000000.wal.lz4 create mode 100644 testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/wal/00000000/00001234.wal.lz4 create mode 100644 testdata/max-snapshot-index/no-snapshots/generations/0000000000000000/.gitignore create mode 100644 testdata/max-snapshot-index/ok/Makefile create mode 100644 testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 create mode 100644 testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/000003e8.snapshot.lz4 create mode 100644 testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/000007d0.snapshot.lz4 create mode 100644 testdata/max-wal-index/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 create mode 100644 testdata/max-wal-index/ok/generations/0000000000000000/wal/00000000/00001234.wal.lz4 create mode 100644 testdata/max-wal-index/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 create mode 100644 testdata/replica-client-time-bounds/ok/Makefile create mode 100644 testdata/replica-client-time-bounds/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 create mode 100644 testdata/replica-client-time-bounds/ok/generations/0000000000000001/snapshots/00000000.snapshot.lz4 create mode 100644 testdata/replica-client-time-bounds/ok/generations/0000000000000001/snapshots/00000001.snapshot.lz4 create mode 100644 testdata/replica-client-time-bounds/ok/generations/0000000000000002/snapshots/00000000.snapshot.lz4 create mode 100644 testdata/restore/bad-permissions/00000000.db create mode 100644 testdata/restore/bad-permissions/README create mode 100644 testdata/restore/bad-permissions/generations/0000000000000000/snapshots/00000000.snapshot.lz4 create mode 100644 testdata/restore/ok/00000002.db create mode 100644 testdata/restore/ok/README create mode 100644 testdata/restore/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 create mode 100644 testdata/restore/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 create mode 100644 testdata/restore/ok/generations/0000000000000000/wal/00000000/00002050.wal.lz4 create mode 100644 testdata/restore/ok/generations/0000000000000000/wal/00000000/00003068.wal.lz4 create mode 100644 testdata/restore/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 create mode 100644 testdata/restore/ok/generations/0000000000000000/wal/00000002/00000000.wal.lz4 create mode 100644 testdata/restore/ok/generations/0000000000000000/wal/00000002/00001038.wal.lz4 create mode 100644 testdata/restore/snapshot-only/00000000.db create mode 100644 testdata/restore/snapshot-only/generations/0000000000000000/snapshots/00000000.snapshot.lz4 create mode 100644 testdata/snapshot-time-bounds/no-snapshots/generations/0000000000000000/snapshots/.gitignore create mode 100644 testdata/snapshot-time-bounds/ok/Makefile create mode 100644 testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 create mode 100644 testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/00000001.snapshot.lz4 create mode 100644 testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/00000002.snapshot.lz4 create mode 100644 testdata/wal-downloader/err-download-wal/generations/0000000000000000/wal/00000000/00000000.wal.lz4 create mode 100644 testdata/wal-downloader/err-read-wal-segment/generations/0000000000000000/wal/00000000/00000000.wal.lz4 create mode 100644 testdata/wal-downloader/err-write-wal/generations/0000000000000000/wal/00000000/00000000.wal.lz4 create mode 100644 testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/00000000/00000000.wal.lz4 create mode 100644 testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/00000000/00002050.wal.lz4 create mode 100644 testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/00000000/00003068.wal.lz4 create mode 100644 testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/00000001/00000000.wal.lz4 create mode 100644 testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/00000001/00000000.wal.lz4 create mode 100644 testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/00000002/00000000.wal.lz4 create mode 100644 testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/00000002/00001038.wal.lz4 create mode 100644 testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/00000000/00000000.wal.lz4 create mode 100644 testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/00000000/00002050.wal.lz4 create mode 100644 testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/00000000/00003068.wal.lz4 create mode 100644 testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/00000002/00000000.wal.lz4 create mode 100644 testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/00000002/00001038.wal.lz4 create mode 100644 testdata/wal-downloader/missing-offset/generations/0000000000000000/wal/00000000/00000000.wal.lz4 create mode 100644 testdata/wal-downloader/missing-offset/generations/0000000000000000/wal/00000000/00003068.wal.lz4 create mode 100644 testdata/wal-downloader/ok/00000000.wal create mode 100644 testdata/wal-downloader/ok/00000001.wal create mode 100644 testdata/wal-downloader/ok/00000002.wal create mode 100644 testdata/wal-downloader/ok/README create mode 100644 testdata/wal-downloader/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 create mode 100644 testdata/wal-downloader/ok/generations/0000000000000000/wal/00000000/00002050.wal.lz4 create mode 100644 testdata/wal-downloader/ok/generations/0000000000000000/wal/00000000/00003068.wal.lz4 create mode 100644 testdata/wal-downloader/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 create mode 100644 testdata/wal-downloader/ok/generations/0000000000000000/wal/00000002/00000000.wal.lz4 create mode 100644 testdata/wal-downloader/ok/generations/0000000000000000/wal/00000002/00001038.wal.lz4 create mode 100644 testdata/wal-downloader/one/00000000.wal create mode 100644 testdata/wal-downloader/one/README create mode 100644 testdata/wal-downloader/one/generations/0000000000000000/wal/00000000/00000000.wal.lz4 create mode 100644 testdata/wal-downloader/one/generations/0000000000000000/wal/00000000/00002050.wal.lz4 create mode 100644 testdata/wal-downloader/one/generations/0000000000000000/wal/00000000/00003068.wal.lz4 create mode 100644 testdata/wal-time-bounds/no-wal-segments/generations/0000000000000000/wal/.gitignore create mode 100644 testdata/wal-time-bounds/ok/Makefile create mode 100644 testdata/wal-time-bounds/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 create mode 100644 testdata/wal-time-bounds/ok/generations/0000000000000000/wal/00000000/00000001.wal.lz4 create mode 100644 testdata/wal-time-bounds/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 create mode 100644 wal_downloader.go create mode 100644 wal_downloader_test.go diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 4d3122b7..aabaa6ea 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -30,31 +30,31 @@ jobs: LITESTREAM_SFTP_KEY: ${{secrets.LITESTREAM_SFTP_KEY}} - name: Run unit tests - run: go test -v ./... + run: make testdata && go test -v ./... - name: Run aws s3 tests - run: go test -v -run=TestReplicaClient . -integration s3 + run: go test -v -run=TestReplicaClient ./integration -replica-type s3 env: LITESTREAM_S3_ACCESS_KEY_ID: ${{ secrets.LITESTREAM_S3_ACCESS_KEY_ID }} LITESTREAM_S3_SECRET_ACCESS_KEY: ${{ secrets.LITESTREAM_S3_SECRET_ACCESS_KEY }} - LITESTREAM_S3_REGION: ${{ secrets.LITESTREAM_S3_REGION }} - LITESTREAM_S3_BUCKET: ${{ secrets.LITESTREAM_S3_BUCKET }} + LITESTREAM_S3_REGION: us-east-1 + LITESTREAM_S3_BUCKET: integration.litestream.io - name: Run google cloud storage (gcs) tests - run: go test -v -run=TestReplicaClient . -integration gcs + run: go test -v -run=TestReplicaClient ./integration -replica-type gcs env: GOOGLE_APPLICATION_CREDENTIALS: /opt/gcp.json - LITESTREAM_GCS_BUCKET: ${{ secrets.LITESTREAM_GCS_BUCKET }} + LITESTREAM_GCS_BUCKET: integration.litestream.io - name: Run azure blob storage (abs) tests - run: go test -v -run=TestReplicaClient . -integration abs + run: go test -v -run=TestReplicaClient ./integration -replica-type abs env: LITESTREAM_ABS_ACCOUNT_NAME: ${{ secrets.LITESTREAM_ABS_ACCOUNT_NAME }} LITESTREAM_ABS_ACCOUNT_KEY: ${{ secrets.LITESTREAM_ABS_ACCOUNT_KEY }} - LITESTREAM_ABS_BUCKET: ${{ secrets.LITESTREAM_ABS_BUCKET }} + LITESTREAM_ABS_BUCKET: integration - name: Run sftp tests - run: go test -v -run=TestReplicaClient . -integration sftp + run: go test -v -run=TestReplicaClient ./integration -replica-type sftp env: LITESTREAM_SFTP_HOST: ${{ secrets.LITESTREAM_SFTP_HOST }} LITESTREAM_SFTP_USER: ${{ secrets.LITESTREAM_SFTP_USER }} diff --git a/Makefile b/Makefile index e3d75e4c..70d3709a 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,9 @@ -default: +.PHONY: default +default: testdata + +.PHONY: testdata +testdata: + make -C testdata docker: docker build -t litestream . diff --git a/cmd/litestream/databases.go b/cmd/litestream/databases.go index 236c01eb..dd7747c5 100644 --- a/cmd/litestream/databases.go +++ b/cmd/litestream/databases.go @@ -10,12 +10,15 @@ import ( ) // DatabasesCommand is a command for listing managed databases. -type DatabasesCommand struct{} +type DatabasesCommand struct { + configPath string + noExpandEnv bool +} // Run executes the command. func (c *DatabasesCommand) Run(ctx context.Context, args []string) (err error) { fs := flag.NewFlagSet("litestream-databases", flag.ContinueOnError) - configPath, noExpandEnv := registerConfigFlag(fs) + registerConfigFlag(fs, &c.configPath, &c.noExpandEnv) fs.Usage = c.Usage if err := fs.Parse(args); err != nil { return err @@ -24,10 +27,10 @@ func (c *DatabasesCommand) Run(ctx context.Context, args []string) (err error) { } // Load configuration. - if *configPath == "" { - *configPath = DefaultConfigPath() + if c.configPath == "" { + c.configPath = DefaultConfigPath() } - config, err := ReadConfigFile(*configPath, !*noExpandEnv) + config, err := ReadConfigFile(c.configPath, !c.noExpandEnv) if err != nil { return err } diff --git a/cmd/litestream/generations.go b/cmd/litestream/generations.go index fefa40c6..e4f9fafd 100644 --- a/cmd/litestream/generations.go +++ b/cmd/litestream/generations.go @@ -13,12 +13,15 @@ import ( ) // GenerationsCommand represents a command to list all generations for a database. -type GenerationsCommand struct{} +type GenerationsCommand struct { + configPath string + noExpandEnv bool +} // Run executes the command. func (c *GenerationsCommand) Run(ctx context.Context, args []string) (err error) { fs := flag.NewFlagSet("litestream-generations", flag.ContinueOnError) - configPath, noExpandEnv := registerConfigFlag(fs) + registerConfigFlag(fs, &c.configPath, &c.noExpandEnv) replicaName := fs.String("replica", "", "replica name") fs.Usage = c.Usage if err := fs.Parse(args); err != nil { @@ -33,19 +36,19 @@ func (c *GenerationsCommand) Run(ctx context.Context, args []string) (err error) var r *litestream.Replica dbUpdatedAt := time.Now() if isURL(fs.Arg(0)) { - if *configPath != "" { + if c.configPath != "" { return fmt.Errorf("cannot specify a replica URL and the -config flag") } if r, err = NewReplicaFromConfig(&ReplicaConfig{URL: fs.Arg(0)}, nil); err != nil { return err } } else { - if *configPath == "" { - *configPath = DefaultConfigPath() + if c.configPath == "" { + c.configPath = DefaultConfigPath() } // Load configuration. - config, err := ReadConfigFile(*configPath, !*noExpandEnv) + config, err := ReadConfigFile(c.configPath, !c.noExpandEnv) if err != nil { return err } @@ -93,7 +96,7 @@ func (c *GenerationsCommand) Run(ctx context.Context, args []string) (err error) // Iterate over each generation for the replica. for _, generation := range generations { - createdAt, updatedAt, err := r.GenerationTimeBounds(ctx, generation) + createdAt, updatedAt, err := litestream.GenerationTimeBounds(ctx, r.Client, generation) if err != nil { log.Printf("%s: cannot determine generation time bounds: %s", r.Name(), err) continue diff --git a/cmd/litestream/main.go b/cmd/litestream/main.go index 783f73e6..7f6f101a 100644 --- a/cmd/litestream/main.go +++ b/cmd/litestream/main.go @@ -20,7 +20,6 @@ import ( "github.com/benbjohnson/litestream" "github.com/benbjohnson/litestream/abs" - "github.com/benbjohnson/litestream/file" "github.com/benbjohnson/litestream/gcs" "github.com/benbjohnson/litestream/s3" "github.com/benbjohnson/litestream/sftp" @@ -126,7 +125,7 @@ func (m *Main) Run(ctx context.Context, args []string) (err error) { return err case "restore": - return (&RestoreCommand{}).Run(ctx, args) + return NewRestoreCommand().Run(ctx, args) case "snapshots": return (&SnapshotsCommand{}).Run(ctx, args) case "version": @@ -383,8 +382,8 @@ func NewReplicaFromConfig(c *ReplicaConfig, db *litestream.DB) (_ *litestream.Re return r, nil } -// newFileReplicaClientFromConfig returns a new instance of file.ReplicaClient built from config. -func newFileReplicaClientFromConfig(c *ReplicaConfig, r *litestream.Replica) (_ *file.ReplicaClient, err error) { +// newFileReplicaClientFromConfig returns a new instance of FileReplicaClient built from config. +func newFileReplicaClientFromConfig(c *ReplicaConfig, r *litestream.Replica) (_ *litestream.FileReplicaClient, err error) { // Ensure URL & path are not both specified. if c.URL != "" && c.Path != "" { return nil, fmt.Errorf("cannot specify url & path for file replica") @@ -409,9 +408,7 @@ func newFileReplicaClientFromConfig(c *ReplicaConfig, r *litestream.Replica) (_ } // Instantiate replica and apply time fields, if set. - client := file.NewReplicaClient(path) - client.Replica = r - return client, nil + return litestream.NewFileReplicaClient(path), nil } // newS3ReplicaClientFromConfig returns a new instance of s3.ReplicaClient built from config. @@ -669,9 +666,9 @@ func DefaultConfigPath() string { return defaultConfigPath } -func registerConfigFlag(fs *flag.FlagSet) (configPath *string, noExpandEnv *bool) { - return fs.String("config", "", "config path"), - fs.Bool("no-expand-env", false, "do not expand env vars in config") +func registerConfigFlag(fs *flag.FlagSet, configPath *string, noExpandEnv *bool) { + fs.StringVar(configPath, "config", "", "config path") + fs.BoolVar(noExpandEnv, "no-expand-env", false, "do not expand env vars in config") } // expand returns an absolute path for s. diff --git a/cmd/litestream/main_test.go b/cmd/litestream/main_test.go index 75131e4b..38860950 100644 --- a/cmd/litestream/main_test.go +++ b/cmd/litestream/main_test.go @@ -9,7 +9,6 @@ import ( "github.com/benbjohnson/litestream" main "github.com/benbjohnson/litestream/cmd/litestream" - "github.com/benbjohnson/litestream/file" "github.com/benbjohnson/litestream/gcs" "github.com/benbjohnson/litestream/s3" ) @@ -103,7 +102,7 @@ func TestNewFileReplicaFromConfig(t *testing.T) { r, err := main.NewReplicaFromConfig(&main.ReplicaConfig{Path: "/foo"}, nil) if err != nil { t.Fatal(err) - } else if client, ok := r.Client.(*file.ReplicaClient); !ok { + } else if client, ok := r.Client.(*litestream.FileReplicaClient); !ok { t.Fatal("unexpected replica type") } else if got, want := client.Path(), "/foo"; got != want { t.Fatalf("Path=%s, want %s", got, want) diff --git a/cmd/litestream/replicate.go b/cmd/litestream/replicate.go index fdaebd2f..3da238fd 100644 --- a/cmd/litestream/replicate.go +++ b/cmd/litestream/replicate.go @@ -13,7 +13,6 @@ import ( "github.com/benbjohnson/litestream" "github.com/benbjohnson/litestream/abs" - "github.com/benbjohnson/litestream/file" "github.com/benbjohnson/litestream/gcs" "github.com/benbjohnson/litestream/s3" "github.com/benbjohnson/litestream/sftp" @@ -23,6 +22,9 @@ import ( // ReplicateCommand represents a command that continuously replicates SQLite databases. type ReplicateCommand struct { + configPath string + noExpandEnv bool + cmd *exec.Cmd // subcommand execCh chan error // subcommand error channel @@ -42,7 +44,7 @@ func NewReplicateCommand() *ReplicateCommand { func (c *ReplicateCommand) ParseFlags(ctx context.Context, args []string) (err error) { fs := flag.NewFlagSet("litestream-replicate", flag.ContinueOnError) execFlag := fs.String("exec", "", "execute subcommand") - configPath, noExpandEnv := registerConfigFlag(fs) + registerConfigFlag(fs, &c.configPath, &c.noExpandEnv) fs.Usage = c.Usage if err := fs.Parse(args); err != nil { return err @@ -52,7 +54,7 @@ func (c *ReplicateCommand) ParseFlags(ctx context.Context, args []string) (err e if fs.NArg() == 1 { return fmt.Errorf("must specify at least one replica URL for %s", fs.Arg(0)) } else if fs.NArg() > 1 { - if *configPath != "" { + if c.configPath != "" { return fmt.Errorf("cannot specify a replica URL and the -config flag") } @@ -66,10 +68,10 @@ func (c *ReplicateCommand) ParseFlags(ctx context.Context, args []string) (err e } c.Config.DBs = []*DBConfig{dbConfig} } else { - if *configPath == "" { - *configPath = DefaultConfigPath() + if c.configPath == "" { + c.configPath = DefaultConfigPath() } - if c.Config, err = ReadConfigFile(*configPath, !*noExpandEnv); err != nil { + if c.Config, err = ReadConfigFile(c.configPath, !c.noExpandEnv); err != nil { return err } } @@ -110,7 +112,7 @@ func (c *ReplicateCommand) Run(ctx context.Context) (err error) { log.Printf("initialized db: %s", db.Path()) for _, r := range db.Replicas { switch client := r.Client.(type) { - case *file.ReplicaClient: + case *litestream.FileReplicaClient: log.Printf("replicating to: name=%q type=%q path=%q", r.Name(), client.Type(), client.Path()) case *s3.ReplicaClient: log.Printf("replicating to: name=%q type=%q bucket=%q path=%q region=%q endpoint=%q sync-interval=%s", r.Name(), client.Type(), client.Bucket, client.Path, client.Region, client.Endpoint, r.SyncInterval) diff --git a/cmd/litestream/restore.go b/cmd/litestream/restore.go index 28c20fc1..9e3dca1b 100644 --- a/cmd/litestream/restore.go +++ b/cmd/litestream/restore.go @@ -7,31 +7,46 @@ import ( "fmt" "log" "os" + "path/filepath" "strconv" - "time" "github.com/benbjohnson/litestream" ) // RestoreCommand represents a command to restore a database from a backup. -type RestoreCommand struct{} +type RestoreCommand struct { + snapshotIndex int // index of snapshot to start from + + // CLI options + configPath string // path to config file + noExpandEnv bool // if true, do not expand env variables in config + outputPath string // path to restore database to + replicaName string // optional, name of replica to restore from + generation string // optional, generation to restore + targetIndex int // optional, last WAL index to replay + ifDBNotExists bool // if true, skips restore if output path already exists + ifReplicaExists bool // if true, skips if no backups exist + opt litestream.RestoreOptions +} + +func NewRestoreCommand() *RestoreCommand { + return &RestoreCommand{ + targetIndex: -1, + opt: litestream.NewRestoreOptions(), + } +} // Run executes the command. func (c *RestoreCommand) Run(ctx context.Context, args []string) (err error) { - opt := litestream.NewRestoreOptions() - opt.Verbose = true - fs := flag.NewFlagSet("litestream-restore", flag.ContinueOnError) - configPath, noExpandEnv := registerConfigFlag(fs) - fs.StringVar(&opt.OutputPath, "o", "", "output path") - fs.StringVar(&opt.ReplicaName, "replica", "", "replica name") - fs.StringVar(&opt.Generation, "generation", "", "generation name") - fs.Var((*indexVar)(&opt.Index), "index", "wal index") - fs.IntVar(&opt.Parallelism, "parallelism", opt.Parallelism, "parallelism") - ifDBNotExists := fs.Bool("if-db-not-exists", false, "") - ifReplicaExists := fs.Bool("if-replica-exists", false, "") - timestampStr := fs.String("timestamp", "", "timestamp") - verbose := fs.Bool("v", false, "verbose output") + registerConfigFlag(fs, &c.configPath, &c.noExpandEnv) + fs.StringVar(&c.outputPath, "o", "", "output path") + fs.StringVar(&c.replicaName, "replica", "", "replica name") + fs.StringVar(&c.generation, "generation", "", "generation name") + fs.Var((*indexVar)(&c.targetIndex), "index", "wal index") + fs.IntVar(&c.opt.Parallelism, "parallelism", c.opt.Parallelism, "parallelism") + fs.BoolVar(&c.ifDBNotExists, "if-db-not-exists", false, "") + fs.BoolVar(&c.ifReplicaExists, "if-replica-exists", false, "") fs.Usage = c.Usage if err := fs.Parse(args); err != nil { return err @@ -40,83 +55,100 @@ func (c *RestoreCommand) Run(ctx context.Context, args []string) (err error) { } else if fs.NArg() > 1 { return fmt.Errorf("too many arguments") } + arg := fs.Arg(0) - // Parse timestamp, if specified. - if *timestampStr != "" { - if opt.Timestamp, err = time.Parse(time.RFC3339, *timestampStr); err != nil { - return errors.New("invalid -timestamp, must specify in ISO 8601 format (e.g. 2000-01-01T00:00:00Z)") - } + // Ensure a generation is specified if target index is specified. + if c.targetIndex != -1 && c.generation == "" { + return fmt.Errorf("must specify -generation when using -index flag") } - // Instantiate logger if verbose output is enabled. - if *verbose { - opt.Logger = log.New(os.Stderr, "", log.LstdFlags|log.Lmicroseconds) + // Default to original database path if output path not specified. + if !isURL(arg) && c.outputPath == "" { + c.outputPath = arg } - // Determine replica & generation to restore from. - var r *litestream.Replica - if isURL(fs.Arg(0)) { - if *configPath != "" { - return fmt.Errorf("cannot specify a replica URL and the -config flag") - } - if r, err = c.loadFromURL(ctx, fs.Arg(0), *ifDBNotExists, &opt); err == errSkipDBExists { - fmt.Println("database already exists, skipping") - return nil - } else if err != nil { - return err - } - } else { - if *configPath == "" { - *configPath = DefaultConfigPath() - } - if r, err = c.loadFromConfig(ctx, fs.Arg(0), *configPath, !*noExpandEnv, *ifDBNotExists, &opt); err == errSkipDBExists { - fmt.Println("database already exists, skipping") - return nil + // Exit successfully if the output file already exists and flag is set. + if _, err := os.Stat(c.outputPath); !os.IsNotExist(err) && c.ifDBNotExists { + fmt.Println("database already exists, skipping") + return nil + } + + // Create parent directory if it doesn't already exist. + if err := os.MkdirAll(filepath.Dir(c.outputPath), 0700); err != nil { + return fmt.Errorf("cannot create parent directory: %w", err) + } + + // Build replica from either a URL or config. + r, err := c.loadReplica(ctx, arg) + if err != nil { + return err + } + + // Determine latest generation if one is not specified. + if c.generation == "" { + if c.generation, err = litestream.FindLatestGeneration(ctx, r.Client); err == litestream.ErrNoGeneration { + // Return an error if no matching targets found. + // If optional flag set, return success. Useful for automated recovery. + if c.ifReplicaExists { + fmt.Println("no matching backups found") + return nil + } + return fmt.Errorf("no matching backups found") } else if err != nil { - return err + return fmt.Errorf("cannot determine latest generation: %w", err) } } - // Return an error if no matching targets found. - // If optional flag set, return success. Useful for automated recovery. - if opt.Generation == "" { - if *ifReplicaExists { - fmt.Println("no matching backups found") - return nil + // Determine the maximum available index for the generation if one is not specified. + if c.targetIndex == -1 { + if c.targetIndex, err = litestream.FindMaxIndexByGeneration(ctx, r.Client, c.generation); err != nil { + return fmt.Errorf("cannot determine latest index in generation %q: %w", c.generation, err) } - return fmt.Errorf("no matching backups found") } - return r.Restore(ctx, opt) + // Find lastest snapshot that occurs before the index. + // TODO: Optionally allow -snapshot-index + if c.snapshotIndex, err = litestream.FindSnapshotForIndex(ctx, r.Client, c.generation, c.targetIndex); err != nil { + return fmt.Errorf("cannot find snapshot index: %w", err) + } + + c.opt.Logger = log.New(os.Stderr, "", log.LstdFlags|log.Lmicroseconds) + + return litestream.Restore(ctx, r.Client, c.outputPath, c.generation, c.snapshotIndex, c.targetIndex, c.opt) } -// loadFromURL creates a replica & updates the restore options from a replica URL. -func (c *RestoreCommand) loadFromURL(ctx context.Context, replicaURL string, ifDBNotExists bool, opt *litestream.RestoreOptions) (*litestream.Replica, error) { - if opt.OutputPath == "" { - return nil, fmt.Errorf("output path required") +func (c *RestoreCommand) loadReplica(ctx context.Context, arg string) (*litestream.Replica, error) { + if isURL(arg) { + return c.loadReplicaFromURL(ctx, arg) } + return c.loadReplicaFromConfig(ctx, arg) +} - // Exit successfully if the output file already exists. - if _, err := os.Stat(opt.OutputPath); !os.IsNotExist(err) && ifDBNotExists { - return nil, errSkipDBExists +// loadReplicaFromURL creates a replica & updates the restore options from a replica URL. +func (c *RestoreCommand) loadReplicaFromURL(ctx context.Context, replicaURL string) (*litestream.Replica, error) { + if c.configPath != "" { + return nil, fmt.Errorf("cannot specify a replica URL and the -config flag") + } else if c.replicaName != "" { + return nil, fmt.Errorf("cannot specify a replica URL and the -replica flag") + } else if c.outputPath == "" { + return nil, fmt.Errorf("output path required") } syncInterval := litestream.DefaultSyncInterval - r, err := NewReplicaFromConfig(&ReplicaConfig{ + return NewReplicaFromConfig(&ReplicaConfig{ URL: replicaURL, SyncInterval: &syncInterval, }, nil) - if err != nil { - return nil, err - } - opt.Generation, _, err = r.CalcRestoreTarget(ctx, *opt) - return r, err } -// loadFromConfig returns a replica & updates the restore options from a DB reference. -func (c *RestoreCommand) loadFromConfig(ctx context.Context, dbPath, configPath string, expandEnv, ifDBNotExists bool, opt *litestream.RestoreOptions) (*litestream.Replica, error) { +// loadReplicaFromConfig returns replicas based on the specific config path. +func (c *RestoreCommand) loadReplicaFromConfig(ctx context.Context, dbPath string) (*litestream.Replica, error) { + if c.configPath == "" { + c.configPath = DefaultConfigPath() + } + // Load configuration. - config, err := ReadConfigFile(configPath, expandEnv) + config, err := ReadConfigFile(c.configPath, !c.noExpandEnv) if err != nil { return nil, err } @@ -132,25 +164,34 @@ func (c *RestoreCommand) loadFromConfig(ctx context.Context, dbPath, configPath db, err := NewDBFromConfig(dbConfig) if err != nil { return nil, err + } else if len(db.Replicas) == 0 { + return nil, fmt.Errorf("database has no replicas: %s", dbPath) } - // Restore into original database path if not specified. - if opt.OutputPath == "" { - opt.OutputPath = dbPath + // Filter by replica name if specified. + if c.replicaName != "" { + r := db.Replica(c.replicaName) + if r == nil { + return nil, fmt.Errorf("replica %q not found", c.replicaName) + } + return r, nil } - // Exit successfully if the output file already exists. - if _, err := os.Stat(opt.OutputPath); !os.IsNotExist(err) && ifDBNotExists { - return nil, errSkipDBExists + // Choose only replica if only one available and no name is specified. + if len(db.Replicas) == 1 { + return db.Replicas[0], nil } - // Determine the appropriate replica & generation to restore from, - r, generation, err := db.CalcRestoreTarget(ctx, *opt) - if err != nil { - return nil, err + // A replica must be specified when restoring a specific generation with multiple replicas. + if c.generation != "" { + return nil, fmt.Errorf("must specify -replica when restoring from a specific generation") } - opt.Generation = generation + // Determine latest replica to restore from. + r, err := litestream.LatestReplica(ctx, db.Replicas) + if err != nil { + return nil, fmt.Errorf("cannot determine latest replica: %w", err) + } return r, nil } @@ -186,10 +227,6 @@ Arguments: Restore up to a specific hex-encoded WAL index (inclusive). Defaults to use the highest available index. - -timestamp TIMESTAMP - Restore to a specific point-in-time. - Defaults to use the latest available backup. - -o PATH Output path of the restored database. Defaults to original DB path. @@ -213,9 +250,6 @@ Examples: # Restore latest replica for database to original location. $ litestream restore /path/to/db - # Restore replica for database to a given point in time. - $ litestream restore -timestamp 2020-01-01T00:00:00Z /path/to/db - # Restore latest replica for database to new /tmp directory $ litestream restore -o /tmp/db /path/to/db diff --git a/cmd/litestream/snapshots.go b/cmd/litestream/snapshots.go index 574ec640..d8f84fa7 100644 --- a/cmd/litestream/snapshots.go +++ b/cmd/litestream/snapshots.go @@ -14,12 +14,15 @@ import ( ) // SnapshotsCommand represents a command to list snapshots for a command. -type SnapshotsCommand struct{} +type SnapshotsCommand struct { + configPath string + noExpandEnv bool +} // Run executes the command. func (c *SnapshotsCommand) Run(ctx context.Context, args []string) (err error) { fs := flag.NewFlagSet("litestream-snapshots", flag.ContinueOnError) - configPath, noExpandEnv := registerConfigFlag(fs) + registerConfigFlag(fs, &c.configPath, &c.noExpandEnv) replicaName := fs.String("replica", "", "replica name") fs.Usage = c.Usage if err := fs.Parse(args); err != nil { @@ -33,19 +36,19 @@ func (c *SnapshotsCommand) Run(ctx context.Context, args []string) (err error) { var db *litestream.DB var r *litestream.Replica if isURL(fs.Arg(0)) { - if *configPath != "" { + if c.configPath != "" { return fmt.Errorf("cannot specify a replica URL and the -config flag") } if r, err = NewReplicaFromConfig(&ReplicaConfig{URL: fs.Arg(0)}, nil); err != nil { return err } } else { - if *configPath == "" { - *configPath = DefaultConfigPath() + if c.configPath == "" { + c.configPath = DefaultConfigPath() } // Load configuration. - config, err := ReadConfigFile(*configPath, !*noExpandEnv) + config, err := ReadConfigFile(c.configPath, !c.noExpandEnv) if err != nil { return err } diff --git a/cmd/litestream/wal.go b/cmd/litestream/wal.go index 9b7b9efc..d3cc6818 100644 --- a/cmd/litestream/wal.go +++ b/cmd/litestream/wal.go @@ -13,12 +13,15 @@ import ( ) // WALCommand represents a command to list WAL files for a database. -type WALCommand struct{} +type WALCommand struct { + configPath string + noExpandEnv bool +} // Run executes the command. func (c *WALCommand) Run(ctx context.Context, args []string) (err error) { fs := flag.NewFlagSet("litestream-wal", flag.ContinueOnError) - configPath, noExpandEnv := registerConfigFlag(fs) + registerConfigFlag(fs, &c.configPath, &c.noExpandEnv) replicaName := fs.String("replica", "", "replica name") generation := fs.String("generation", "", "generation name") fs.Usage = c.Usage @@ -33,19 +36,19 @@ func (c *WALCommand) Run(ctx context.Context, args []string) (err error) { var db *litestream.DB var r *litestream.Replica if isURL(fs.Arg(0)) { - if *configPath != "" { + if c.configPath != "" { return fmt.Errorf("cannot specify a replica URL and the -config flag") } if r, err = NewReplicaFromConfig(&ReplicaConfig{URL: fs.Arg(0)}, nil); err != nil { return err } } else { - if *configPath == "" { - *configPath = DefaultConfigPath() + if c.configPath == "" { + c.configPath = DefaultConfigPath() } // Load configuration. - config, err := ReadConfigFile(*configPath, !*noExpandEnv) + config, err := ReadConfigFile(c.configPath, !c.noExpandEnv) if err != nil { return err } diff --git a/db.go b/db.go index 8c224cc7..f56bdc57 100644 --- a/db.go +++ b/db.go @@ -12,7 +12,6 @@ import ( "io" "io/ioutil" "log" - "math" "math/rand" "os" "path/filepath" @@ -62,8 +61,9 @@ type DB struct { chksum0, chksum1 uint32 byteOrder binary.ByteOrder - fileInfo os.FileInfo // db info cached during init - dirInfo os.FileInfo // parent dir info cached during init + fileMode os.FileMode // db mode cached during init + dirMode os.FileMode // parent dir mode cached during init + uid, gid int // db user & group id cached during init ctx context.Context cancel func() @@ -180,16 +180,6 @@ func (db *DB) ShadowWALDir(generation string) string { return filepath.Join(db.GenerationPath(generation), "wal") } -// FileInfo returns the cached file stats for the database file when it was initialized. -func (db *DB) FileInfo() os.FileInfo { - return db.fileInfo -} - -// DirInfo returns the cached file stats for the parent directory of the database file when it was initialized. -func (db *DB) DirInfo() os.FileInfo { - return db.dirInfo -} - // Replica returns a replica by name. func (db *DB) Replica(name string) *Replica { for _, r := range db.Replicas { @@ -505,13 +495,14 @@ func (db *DB) init() (err error) { } else if err != nil { return err } - db.fileInfo = fi + db.fileMode = fi.Mode() + db.uid, db.gid = internal.Fileinfo(fi) // Obtain permissions for parent directory. if fi, err = os.Stat(filepath.Dir(db.path)); err != nil { return err } - db.dirInfo = fi + db.dirMode = fi.Mode() dsn := db.path dsn += fmt.Sprintf("?_busy_timeout=%d", BusyTimeout.Milliseconds()) @@ -577,7 +568,7 @@ func (db *DB) init() (err error) { } // Ensure meta directory structure exists. - if err := internal.MkdirAll(db.MetaPath(), db.dirInfo); err != nil { + if err := internal.MkdirAll(db.MetaPath(), db.dirMode, db.uid, db.gid); err != nil { return err } @@ -785,7 +776,7 @@ func (db *DB) createGeneration(ctx context.Context) (string, error) { // Generate new directory. dir := filepath.Join(db.MetaPath(), "generations", generation) - if err := internal.MkdirAll(dir, db.dirInfo); err != nil { + if err := internal.MkdirAll(dir, db.dirMode, db.uid, db.gid); err != nil { return "", err } @@ -796,15 +787,10 @@ func (db *DB) createGeneration(ctx context.Context) (string, error) { // Atomically write generation name as current generation. generationNamePath := db.GenerationNamePath() - mode := os.FileMode(0600) - if db.fileInfo != nil { - mode = db.fileInfo.Mode() - } - if err := os.WriteFile(generationNamePath+".tmp", []byte(generation+"\n"), mode); err != nil { + if err := os.WriteFile(generationNamePath+".tmp", []byte(generation+"\n"), db.fileMode); err != nil { return "", fmt.Errorf("write generation temp file: %w", err) } - uid, gid := internal.Fileinfo(db.fileInfo) - _ = os.Chown(generationNamePath+".tmp", uid, gid) + _ = os.Chown(generationNamePath+".tmp", db.uid, db.gid) if err := os.Rename(generationNamePath+".tmp", generationNamePath); err != nil { return "", fmt.Errorf("rename generation file: %w", err) } @@ -1086,7 +1072,7 @@ func (db *DB) copyToShadowWAL(ctx context.Context) error { tempFilename := filepath.Join(db.ShadowWALDir(pos.Generation), FormatIndex(pos.Index), FormatOffset(pos.Offset)+".wal.tmp") defer os.Remove(tempFilename) - f, err := internal.CreateFile(tempFilename, db.fileInfo) + f, err := internal.CreateFile(tempFilename, db.fileMode, db.uid, db.gid) if err != nil { return err } @@ -1214,12 +1200,12 @@ func (db *DB) writeWALSegment(ctx context.Context, pos Pos, rd io.Reader) error filename := filepath.Join(db.ShadowWALDir(pos.Generation), FormatIndex(pos.Index), FormatOffset(pos.Offset)+".wal.lz4") // Ensure parent directory exists. - if err := internal.MkdirAll(filepath.Dir(filename), db.dirInfo); err != nil { + if err := internal.MkdirAll(filepath.Dir(filename), db.dirMode, db.uid, db.gid); err != nil { return err } // Write WAL segment to temporary file next to destination path. - f, err := internal.CreateFile(filename+".tmp", db.fileInfo) + f, err := internal.CreateFile(filename+".tmp", db.fileMode, db.uid, db.gid) if err != nil { return err } @@ -1542,39 +1528,10 @@ func (db *DB) monitor() { } } -// CalcRestoreTarget returns a replica & generation to restore from based on opt criteria. -func (db *DB) CalcRestoreTarget(ctx context.Context, opt RestoreOptions) (*Replica, string, error) { - var target struct { - replica *Replica - generation string - updatedAt time.Time - } - - for _, r := range db.Replicas { - // Skip replica if it does not match filter. - if opt.ReplicaName != "" && r.Name() != opt.ReplicaName { - continue - } - - generation, updatedAt, err := r.CalcRestoreTarget(ctx, opt) - if err != nil { - return nil, "", err - } - - // Use the latest replica if we have multiple candidates. - if !updatedAt.After(target.updatedAt) { - continue - } - - target.replica, target.generation, target.updatedAt = r, generation, updatedAt - } - return target.replica, target.generation, nil -} - -// applyWAL performs a truncating checkpoint on the given database. -func applyWAL(ctx context.Context, index int, dbPath string) error { +// ApplyWAL performs a truncating checkpoint on the given database. +func ApplyWAL(ctx context.Context, dbPath, walPath string) error { // Copy WAL file from it's staging path to the correct "-wal" location. - if err := os.Rename(fmt.Sprintf("%s-%08x-wal", dbPath, index), dbPath+"-wal"); err != nil { + if err := os.Rename(walPath, dbPath+"-wal"); err != nil { return err } @@ -1583,7 +1540,7 @@ func applyWAL(ctx context.Context, index int, dbPath string) error { if err != nil { return err } - defer d.Close() + defer func() { _ = d.Close() }() var row [3]int if err := d.QueryRow(`PRAGMA wal_checkpoint(TRUNCATE);`).Scan(&row[0], &row[1], &row[2]); err != nil { @@ -1660,47 +1617,6 @@ func formatWALPath(index int) string { var walPathRegex = regexp.MustCompile(`^([0-9a-f]{8})\.wal$`) -// DefaultRestoreParallelism is the default parallelism when downloading WAL files. -const DefaultRestoreParallelism = 8 - -// RestoreOptions represents options for DB.Restore(). -type RestoreOptions struct { - // Target path to restore into. - // If blank, the original DB path is used. - OutputPath string - - // Specific replica to restore from. - // If blank, all replicas are considered. - ReplicaName string - - // Specific generation to restore from. - // If blank, all generations considered. - Generation string - - // Specific index to restore from. - // Set to math.MaxInt32 to ignore index. - Index int - - // Point-in-time to restore database. - // If zero, database restore to most recent state available. - Timestamp time.Time - - // Specifies how many WAL files are downloaded in parallel during restore. - Parallelism int - - // Logging settings. - Logger *log.Logger - Verbose bool -} - -// NewRestoreOptions returns a new instance of RestoreOptions with defaults. -func NewRestoreOptions() RestoreOptions { - return RestoreOptions{ - Index: math.MaxInt32, - Parallelism: DefaultRestoreParallelism, - } -} - // ReadWALFields iterates over the header & frames in the WAL data in r. // Returns salt, checksum, byte order & the last frame. WAL data must start // from the beginning of the WAL header and must end on either the WAL header diff --git a/file/replica_client.go b/file_replica_client.go similarity index 64% rename from file/replica_client.go rename to file_replica_client.go index ef7d7b91..a8873f0f 100644 --- a/file/replica_client.go +++ b/file_replica_client.go @@ -1,4 +1,4 @@ -package file +package litestream import ( "context" @@ -10,49 +10,46 @@ import ( "sort" "strings" - "github.com/benbjohnson/litestream" "github.com/benbjohnson/litestream/internal" ) -// ReplicaClientType is the client type for this package. -const ReplicaClientType = "file" +// FileReplicaClientType is the client type for file replica clients. +const FileReplicaClientType = "file" -var _ litestream.ReplicaClient = (*ReplicaClient)(nil) +var _ ReplicaClient = (*FileReplicaClient)(nil) -// ReplicaClient is a client for writing snapshots & WAL segments to disk. -type ReplicaClient struct { +// FileReplicaClient is a client for writing snapshots & WAL segments to disk. +type FileReplicaClient struct { path string // destination path - Replica *litestream.Replica + // File info + FileMode os.FileMode + DirMode os.FileMode + Uid, Gid int } -// NewReplicaClient returns a new instance of ReplicaClient. -func NewReplicaClient(path string) *ReplicaClient { - return &ReplicaClient{ +// NewFileReplicaClient returns a new instance of FileReplicaClient. +func NewFileReplicaClient(path string) *FileReplicaClient { + return &FileReplicaClient{ path: path, - } -} -// db returns the database, if available. -func (c *ReplicaClient) db() *litestream.DB { - if c.Replica == nil { - return nil + FileMode: 0600, + DirMode: 0700, } - return c.Replica.DB() } // Type returns "file" as the client type. -func (c *ReplicaClient) Type() string { - return ReplicaClientType +func (c *FileReplicaClient) Type() string { + return FileReplicaClientType } // Path returns the destination path to replicate the database to. -func (c *ReplicaClient) Path() string { +func (c *FileReplicaClient) Path() string { return c.path } // GenerationsDir returns the path to a generation root directory. -func (c *ReplicaClient) GenerationsDir() (string, error) { +func (c *FileReplicaClient) GenerationsDir() (string, error) { if c.path == "" { return "", fmt.Errorf("file replica path required") } @@ -60,7 +57,7 @@ func (c *ReplicaClient) GenerationsDir() (string, error) { } // GenerationDir returns the path to a generation's root directory. -func (c *ReplicaClient) GenerationDir(generation string) (string, error) { +func (c *FileReplicaClient) GenerationDir(generation string) (string, error) { dir, err := c.GenerationsDir() if err != nil { return "", err @@ -71,7 +68,7 @@ func (c *ReplicaClient) GenerationDir(generation string) (string, error) { } // SnapshotsDir returns the path to a generation's snapshot directory. -func (c *ReplicaClient) SnapshotsDir(generation string) (string, error) { +func (c *FileReplicaClient) SnapshotsDir(generation string) (string, error) { dir, err := c.GenerationDir(generation) if err != nil { return "", err @@ -80,16 +77,16 @@ func (c *ReplicaClient) SnapshotsDir(generation string) (string, error) { } // SnapshotPath returns the path to an uncompressed snapshot file. -func (c *ReplicaClient) SnapshotPath(generation string, index int) (string, error) { +func (c *FileReplicaClient) SnapshotPath(generation string, index int) (string, error) { dir, err := c.SnapshotsDir(generation) if err != nil { return "", err } - return filepath.Join(dir, litestream.FormatIndex(index)+".snapshot.lz4"), nil + return filepath.Join(dir, FormatIndex(index)+".snapshot.lz4"), nil } // WALDir returns the path to a generation's WAL directory -func (c *ReplicaClient) WALDir(generation string) (string, error) { +func (c *FileReplicaClient) WALDir(generation string) (string, error) { dir, err := c.GenerationDir(generation) if err != nil { return "", err @@ -98,16 +95,16 @@ func (c *ReplicaClient) WALDir(generation string) (string, error) { } // WALSegmentPath returns the path to a WAL segment file. -func (c *ReplicaClient) WALSegmentPath(generation string, index int, offset int64) (string, error) { +func (c *FileReplicaClient) WALSegmentPath(generation string, index int, offset int64) (string, error) { dir, err := c.WALDir(generation) if err != nil { return "", err } - return filepath.Join(dir, litestream.FormatIndex(index), fmt.Sprintf("%08x.wal.lz4", offset)), nil + return filepath.Join(dir, FormatIndex(index), fmt.Sprintf("%08x.wal.lz4", offset)), nil } // Generations returns a list of available generation names. -func (c *ReplicaClient) Generations(ctx context.Context) ([]string, error) { +func (c *FileReplicaClient) Generations(ctx context.Context) ([]string, error) { root, err := c.GenerationsDir() if err != nil { return nil, fmt.Errorf("cannot determine generations path: %w", err) @@ -122,7 +119,7 @@ func (c *ReplicaClient) Generations(ctx context.Context) ([]string, error) { var generations []string for _, fi := range fis { - if !litestream.IsGenerationName(fi.Name()) { + if !IsGenerationName(fi.Name()) { continue } else if !fi.IsDir() { continue @@ -133,7 +130,7 @@ func (c *ReplicaClient) Generations(ctx context.Context) ([]string, error) { } // DeleteGeneration deletes all snapshots & WAL segments within a generation. -func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string) error { +func (c *FileReplicaClient) DeleteGeneration(ctx context.Context, generation string) error { dir, err := c.GenerationDir(generation) if err != nil { return fmt.Errorf("cannot determine generation path: %w", err) @@ -146,7 +143,7 @@ func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string) } // Snapshots returns an iterator over all available snapshots for a generation. -func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { +func (c *FileReplicaClient) Snapshots(ctx context.Context, generation string) (SnapshotIterator, error) { dir, err := c.SnapshotsDir(generation) if err != nil { return nil, err @@ -154,7 +151,7 @@ func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (lites f, err := os.Open(dir) if os.IsNotExist(err) { - return litestream.NewSnapshotInfoSliceIterator(nil), nil + return NewSnapshotInfoSliceIterator(nil), nil } else if err != nil { return nil, err } @@ -166,7 +163,7 @@ func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (lites } // Iterate over every file and convert to metadata. - infos := make([]litestream.SnapshotInfo, 0, len(fis)) + infos := make([]SnapshotInfo, 0, len(fis)) for _, fi := range fis { // Parse index from filename. index, err := internal.ParseSnapshotPath(filepath.Base(fi.Name())) @@ -174,7 +171,7 @@ func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (lites continue } - infos = append(infos, litestream.SnapshotInfo{ + infos = append(infos, SnapshotInfo{ Generation: generation, Index: index, Size: fi.Size(), @@ -182,30 +179,25 @@ func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (lites }) } - sort.Sort(litestream.SnapshotInfoSlice(infos)) + sort.Sort(SnapshotInfoSlice(infos)) - return litestream.NewSnapshotInfoSliceIterator(infos), nil + return NewSnapshotInfoSliceIterator(infos), nil } // WriteSnapshot writes LZ4 compressed data from rd into a file on disk. -func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, index int, rd io.Reader) (info litestream.SnapshotInfo, err error) { +func (c *FileReplicaClient) WriteSnapshot(ctx context.Context, generation string, index int, rd io.Reader) (info SnapshotInfo, err error) { filename, err := c.SnapshotPath(generation, index) if err != nil { return info, err } - var fileInfo, dirInfo os.FileInfo - if db := c.db(); db != nil { - fileInfo, dirInfo = db.FileInfo(), db.DirInfo() - } - // Ensure parent directory exists. - if err := internal.MkdirAll(filepath.Dir(filename), dirInfo); err != nil { + if err := internal.MkdirAll(filepath.Dir(filename), c.DirMode, c.Uid, c.Gid); err != nil { return info, err } // Write snapshot to temporary file next to destination path. - f, err := internal.CreateFile(filename+".tmp", fileInfo) + f, err := internal.CreateFile(filename+".tmp", c.FileMode, c.Uid, c.Gid) if err != nil { return info, err } @@ -224,7 +216,7 @@ func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, in if err != nil { return info, err } - info = litestream.SnapshotInfo{ + info = SnapshotInfo{ Generation: generation, Index: index, Size: fi.Size(), @@ -241,7 +233,7 @@ func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, in // SnapshotReader returns a reader for snapshot data at the given generation/index. // Returns os.ErrNotExist if no matching index is found. -func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error) { +func (c *FileReplicaClient) SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error) { filename, err := c.SnapshotPath(generation, index) if err != nil { return nil, err @@ -250,7 +242,7 @@ func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, i } // DeleteSnapshot deletes a snapshot with the given generation & index. -func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, index int) error { +func (c *FileReplicaClient) DeleteSnapshot(ctx context.Context, generation string, index int) error { filename, err := c.SnapshotPath(generation, index) if err != nil { return fmt.Errorf("cannot determine snapshot path: %w", err) @@ -262,7 +254,7 @@ func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, i } // WALSegments returns an iterator over all available WAL files for a generation. -func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) { +func (c *FileReplicaClient) WALSegments(ctx context.Context, generation string) (WALSegmentIterator, error) { dir, err := c.WALDir(generation) if err != nil { return nil, err @@ -270,7 +262,7 @@ func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (lit f, err := os.Open(dir) if os.IsNotExist(err) { - return litestream.NewWALSegmentInfoSliceIterator(nil), nil + return NewWALSegmentInfoSliceIterator(nil), nil } else if err != nil { return nil, err } @@ -284,7 +276,7 @@ func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (lit // Iterate over every file and convert to metadata. indexes := make([]int, 0, len(fis)) for _, fi := range fis { - index, err := litestream.ParseIndex(fi.Name()) + index, err := ParseIndex(fi.Name()) if err != nil || !fi.IsDir() { continue } @@ -293,28 +285,23 @@ func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (lit sort.Ints(indexes) - return newWALSegmentIterator(dir, generation, indexes), nil + return newFileWALSegmentIterator(dir, generation, indexes), nil } // WriteWALSegment writes LZ4 compressed data from rd into a file on disk. -func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, rd io.Reader) (info litestream.WALSegmentInfo, err error) { +func (c *FileReplicaClient) WriteWALSegment(ctx context.Context, pos Pos, rd io.Reader) (info WALSegmentInfo, err error) { filename, err := c.WALSegmentPath(pos.Generation, pos.Index, pos.Offset) if err != nil { return info, err } - var fileInfo, dirInfo os.FileInfo - if db := c.db(); db != nil { - fileInfo, dirInfo = db.FileInfo(), db.DirInfo() - } - // Ensure parent directory exists. - if err := internal.MkdirAll(filepath.Dir(filename), dirInfo); err != nil { + if err := internal.MkdirAll(filepath.Dir(filename), c.DirMode, c.Uid, c.Gid); err != nil { return info, err } // Write WAL segment to temporary file next to destination path. - f, err := internal.CreateFile(filename+".tmp", fileInfo) + f, err := internal.CreateFile(filename+".tmp", c.FileMode, c.Uid, c.Gid) if err != nil { return info, err } @@ -333,7 +320,7 @@ func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, if err != nil { return info, err } - info = litestream.WALSegmentInfo{ + info = WALSegmentInfo{ Generation: pos.Generation, Index: pos.Index, Offset: pos.Offset, @@ -351,7 +338,7 @@ func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, // WALSegmentReader returns a reader for a section of WAL data at the given position. // Returns os.ErrNotExist if no matching index/offset is found. -func (c *ReplicaClient) WALSegmentReader(ctx context.Context, pos litestream.Pos) (io.ReadCloser, error) { +func (c *FileReplicaClient) WALSegmentReader(ctx context.Context, pos Pos) (io.ReadCloser, error) { filename, err := c.WALSegmentPath(pos.Generation, pos.Index, pos.Offset) if err != nil { return nil, err @@ -360,7 +347,7 @@ func (c *ReplicaClient) WALSegmentReader(ctx context.Context, pos litestream.Pos } // DeleteWALSegments deletes WAL segments at the given positions. -func (c *ReplicaClient) DeleteWALSegments(ctx context.Context, a []litestream.Pos) error { +func (c *FileReplicaClient) DeleteWALSegments(ctx context.Context, a []Pos) error { for _, pos := range a { filename, err := c.WALSegmentPath(pos.Generation, pos.Index, pos.Offset) if err != nil { @@ -373,28 +360,28 @@ func (c *ReplicaClient) DeleteWALSegments(ctx context.Context, a []litestream.Po return nil } -type walSegmentIterator struct { +type fileWalSegmentIterator struct { dir string generation string indexes []int - infos []litestream.WALSegmentInfo + infos []WALSegmentInfo err error } -func newWALSegmentIterator(dir, generation string, indexes []int) *walSegmentIterator { - return &walSegmentIterator{ +func newFileWALSegmentIterator(dir, generation string, indexes []int) *fileWalSegmentIterator { + return &fileWalSegmentIterator{ dir: dir, generation: generation, indexes: indexes, } } -func (itr *walSegmentIterator) Close() (err error) { +func (itr *fileWalSegmentIterator) Close() (err error) { return itr.err } -func (itr *walSegmentIterator) Next() bool { +func (itr *fileWalSegmentIterator) Next() bool { // Exit if an error has already occurred. if itr.err != nil { return false @@ -416,7 +403,7 @@ func (itr *walSegmentIterator) Next() bool { // Read segments into a cache for the current index. index := itr.indexes[0] itr.indexes = itr.indexes[1:] - f, err := os.Open(filepath.Join(itr.dir, litestream.FormatIndex(index))) + f, err := os.Open(filepath.Join(itr.dir, FormatIndex(index))) if err != nil { itr.err = err return false @@ -438,12 +425,12 @@ func (itr *walSegmentIterator) Next() bool { continue } - offset, err := litestream.ParseOffset(strings.TrimSuffix(filename, ".wal.lz4")) + offset, err := ParseOffset(strings.TrimSuffix(filename, ".wal.lz4")) if err != nil { continue } - itr.infos = append(itr.infos, litestream.WALSegmentInfo{ + itr.infos = append(itr.infos, WALSegmentInfo{ Generation: itr.generation, Index: index, Offset: offset, @@ -453,7 +440,7 @@ func (itr *walSegmentIterator) Next() bool { } // Ensure segments are sorted within index. - sort.Sort(litestream.WALSegmentInfoSlice(itr.infos)) + sort.Sort(WALSegmentInfoSlice(itr.infos)) if len(itr.infos) > 0 { return true @@ -461,11 +448,11 @@ func (itr *walSegmentIterator) Next() bool { } } -func (itr *walSegmentIterator) Err() error { return itr.err } +func (itr *fileWalSegmentIterator) Err() error { return itr.err } -func (itr *walSegmentIterator) WALSegment() litestream.WALSegmentInfo { +func (itr *fileWalSegmentIterator) WALSegment() WALSegmentInfo { if len(itr.infos) == 0 { - return litestream.WALSegmentInfo{} + return WALSegmentInfo{} } return itr.infos[0] } diff --git a/file/replica_client_test.go b/file_replica_client_test.go similarity index 54% rename from file/replica_client_test.go rename to file_replica_client_test.go index 465e8357..65dcb111 100644 --- a/file/replica_client_test.go +++ b/file_replica_client_test.go @@ -1,34 +1,34 @@ -package file_test +package litestream_test import ( "testing" - "github.com/benbjohnson/litestream/file" + "github.com/benbjohnson/litestream" ) func TestReplicaClient_Path(t *testing.T) { - c := file.NewReplicaClient("/foo/bar") + c := litestream.NewFileReplicaClient("/foo/bar") if got, want := c.Path(), "/foo/bar"; got != want { t.Fatalf("Path()=%v, want %v", got, want) } } func TestReplicaClient_Type(t *testing.T) { - if got, want := file.NewReplicaClient("").Type(), "file"; got != want { + if got, want := litestream.NewFileReplicaClient("").Type(), "file"; got != want { t.Fatalf("Type()=%v, want %v", got, want) } } func TestReplicaClient_GenerationsDir(t *testing.T) { t.Run("OK", func(t *testing.T) { - if got, err := file.NewReplicaClient("/foo").GenerationsDir(); err != nil { + if got, err := litestream.NewFileReplicaClient("/foo").GenerationsDir(); err != nil { t.Fatal(err) } else if want := "/foo/generations"; got != want { t.Fatalf("GenerationsDir()=%v, want %v", got, want) } }) t.Run("ErrNoPath", func(t *testing.T) { - if _, err := file.NewReplicaClient("").GenerationsDir(); err == nil || err.Error() != `file replica path required` { + if _, err := litestream.NewFileReplicaClient("").GenerationsDir(); err == nil || err.Error() != `file replica path required` { t.Fatalf("unexpected error: %v", err) } }) @@ -36,19 +36,19 @@ func TestReplicaClient_GenerationsDir(t *testing.T) { func TestReplicaClient_GenerationDir(t *testing.T) { t.Run("OK", func(t *testing.T) { - if got, err := file.NewReplicaClient("/foo").GenerationDir("0123456701234567"); err != nil { + if got, err := litestream.NewFileReplicaClient("/foo").GenerationDir("0123456701234567"); err != nil { t.Fatal(err) } else if want := "/foo/generations/0123456701234567"; got != want { t.Fatalf("GenerationDir()=%v, want %v", got, want) } }) t.Run("ErrNoPath", func(t *testing.T) { - if _, err := file.NewReplicaClient("").GenerationDir("0123456701234567"); err == nil || err.Error() != `file replica path required` { + if _, err := litestream.NewFileReplicaClient("").GenerationDir("0123456701234567"); err == nil || err.Error() != `file replica path required` { t.Fatalf("expected error: %v", err) } }) t.Run("ErrNoGeneration", func(t *testing.T) { - if _, err := file.NewReplicaClient("/foo").GenerationDir(""); err == nil || err.Error() != `generation required` { + if _, err := litestream.NewFileReplicaClient("/foo").GenerationDir(""); err == nil || err.Error() != `generation required` { t.Fatalf("expected error: %v", err) } }) @@ -56,19 +56,19 @@ func TestReplicaClient_GenerationDir(t *testing.T) { func TestReplicaClient_SnapshotsDir(t *testing.T) { t.Run("OK", func(t *testing.T) { - if got, err := file.NewReplicaClient("/foo").SnapshotsDir("0123456701234567"); err != nil { + if got, err := litestream.NewFileReplicaClient("/foo").SnapshotsDir("0123456701234567"); err != nil { t.Fatal(err) } else if want := "/foo/generations/0123456701234567/snapshots"; got != want { t.Fatalf("SnapshotsDir()=%v, want %v", got, want) } }) t.Run("ErrNoPath", func(t *testing.T) { - if _, err := file.NewReplicaClient("").SnapshotsDir("0123456701234567"); err == nil || err.Error() != `file replica path required` { + if _, err := litestream.NewFileReplicaClient("").SnapshotsDir("0123456701234567"); err == nil || err.Error() != `file replica path required` { t.Fatalf("unexpected error: %v", err) } }) t.Run("ErrNoGeneration", func(t *testing.T) { - if _, err := file.NewReplicaClient("/foo").SnapshotsDir(""); err == nil || err.Error() != `generation required` { + if _, err := litestream.NewFileReplicaClient("/foo").SnapshotsDir(""); err == nil || err.Error() != `generation required` { t.Fatalf("unexpected error: %v", err) } }) @@ -76,19 +76,19 @@ func TestReplicaClient_SnapshotsDir(t *testing.T) { func TestReplicaClient_SnapshotPath(t *testing.T) { t.Run("OK", func(t *testing.T) { - if got, err := file.NewReplicaClient("/foo").SnapshotPath("0123456701234567", 1000); err != nil { + if got, err := litestream.NewFileReplicaClient("/foo").SnapshotPath("0123456701234567", 1000); err != nil { t.Fatal(err) } else if want := "/foo/generations/0123456701234567/snapshots/000003e8.snapshot.lz4"; got != want { t.Fatalf("SnapshotPath()=%v, want %v", got, want) } }) t.Run("ErrNoPath", func(t *testing.T) { - if _, err := file.NewReplicaClient("").SnapshotPath("0123456701234567", 1000); err == nil || err.Error() != `file replica path required` { + if _, err := litestream.NewFileReplicaClient("").SnapshotPath("0123456701234567", 1000); err == nil || err.Error() != `file replica path required` { t.Fatalf("unexpected error: %v", err) } }) t.Run("ErrNoGeneration", func(t *testing.T) { - if _, err := file.NewReplicaClient("/foo").SnapshotPath("", 1000); err == nil || err.Error() != `generation required` { + if _, err := litestream.NewFileReplicaClient("/foo").SnapshotPath("", 1000); err == nil || err.Error() != `generation required` { t.Fatalf("unexpected error: %v", err) } }) @@ -96,19 +96,19 @@ func TestReplicaClient_SnapshotPath(t *testing.T) { func TestReplicaClient_WALDir(t *testing.T) { t.Run("OK", func(t *testing.T) { - if got, err := file.NewReplicaClient("/foo").WALDir("0123456701234567"); err != nil { + if got, err := litestream.NewFileReplicaClient("/foo").WALDir("0123456701234567"); err != nil { t.Fatal(err) } else if want := "/foo/generations/0123456701234567/wal"; got != want { t.Fatalf("WALDir()=%v, want %v", got, want) } }) t.Run("ErrNoPath", func(t *testing.T) { - if _, err := file.NewReplicaClient("").WALDir("0123456701234567"); err == nil || err.Error() != `file replica path required` { + if _, err := litestream.NewFileReplicaClient("").WALDir("0123456701234567"); err == nil || err.Error() != `file replica path required` { t.Fatalf("unexpected error: %v", err) } }) t.Run("ErrNoGeneration", func(t *testing.T) { - if _, err := file.NewReplicaClient("/foo").WALDir(""); err == nil || err.Error() != `generation required` { + if _, err := litestream.NewFileReplicaClient("/foo").WALDir(""); err == nil || err.Error() != `generation required` { t.Fatalf("unexpected error: %v", err) } }) @@ -116,19 +116,19 @@ func TestReplicaClient_WALDir(t *testing.T) { func TestReplicaClient_WALSegmentPath(t *testing.T) { t.Run("OK", func(t *testing.T) { - if got, err := file.NewReplicaClient("/foo").WALSegmentPath("0123456701234567", 1000, 1001); err != nil { + if got, err := litestream.NewFileReplicaClient("/foo").WALSegmentPath("0123456701234567", 1000, 1001); err != nil { t.Fatal(err) } else if want := "/foo/generations/0123456701234567/wal/000003e8/000003e9.wal.lz4"; got != want { t.Fatalf("WALPath()=%v, want %v", got, want) } }) t.Run("ErrNoPath", func(t *testing.T) { - if _, err := file.NewReplicaClient("").WALSegmentPath("0123456701234567", 1000, 0); err == nil || err.Error() != `file replica path required` { + if _, err := litestream.NewFileReplicaClient("").WALSegmentPath("0123456701234567", 1000, 0); err == nil || err.Error() != `file replica path required` { t.Fatalf("unexpected error: %v", err) } }) t.Run("ErrNoGeneration", func(t *testing.T) { - if _, err := file.NewReplicaClient("/foo").WALSegmentPath("", 1000, 0); err == nil || err.Error() != `generation required` { + if _, err := litestream.NewFileReplicaClient("/foo").WALSegmentPath("", 1000, 0); err == nil || err.Error() != `generation required` { t.Fatalf("unexpected error: %v", err) } }) diff --git a/integration/replica_client_test.go b/integration/replica_client_test.go new file mode 100644 index 00000000..109f4f39 --- /dev/null +++ b/integration/replica_client_test.go @@ -0,0 +1,566 @@ +package integration_test + +import ( + "context" + "flag" + "fmt" + "io/ioutil" + "math/rand" + "os" + "path" + "reflect" + "sort" + "strings" + "testing" + "time" + + "github.com/benbjohnson/litestream" + "github.com/benbjohnson/litestream/abs" + "github.com/benbjohnson/litestream/gcs" + "github.com/benbjohnson/litestream/s3" + "github.com/benbjohnson/litestream/sftp" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +var ( + // Enables integration tests. + replicaType = flag.String("replica-type", "file", "") +) + +// S3 settings +var ( + // Replica client settings + s3AccessKeyID = flag.String("s3-access-key-id", os.Getenv("LITESTREAM_S3_ACCESS_KEY_ID"), "") + s3SecretAccessKey = flag.String("s3-secret-access-key", os.Getenv("LITESTREAM_S3_SECRET_ACCESS_KEY"), "") + s3Region = flag.String("s3-region", os.Getenv("LITESTREAM_S3_REGION"), "") + s3Bucket = flag.String("s3-bucket", os.Getenv("LITESTREAM_S3_BUCKET"), "") + s3Path = flag.String("s3-path", os.Getenv("LITESTREAM_S3_PATH"), "") + s3Endpoint = flag.String("s3-endpoint", os.Getenv("LITESTREAM_S3_ENDPOINT"), "") + s3ForcePathStyle = flag.Bool("s3-force-path-style", os.Getenv("LITESTREAM_S3_FORCE_PATH_STYLE") == "true", "") + s3SkipVerify = flag.Bool("s3-skip-verify", os.Getenv("LITESTREAM_S3_SKIP_VERIFY") == "true", "") +) + +// Google cloud storage settings +var ( + gcsBucket = flag.String("gcs-bucket", os.Getenv("LITESTREAM_GCS_BUCKET"), "") + gcsPath = flag.String("gcs-path", os.Getenv("LITESTREAM_GCS_PATH"), "") +) + +// Azure blob storage settings +var ( + absAccountName = flag.String("abs-account-name", os.Getenv("LITESTREAM_ABS_ACCOUNT_NAME"), "") + absAccountKey = flag.String("abs-account-key", os.Getenv("LITESTREAM_ABS_ACCOUNT_KEY"), "") + absBucket = flag.String("abs-bucket", os.Getenv("LITESTREAM_ABS_BUCKET"), "") + absPath = flag.String("abs-path", os.Getenv("LITESTREAM_ABS_PATH"), "") +) + +// SFTP settings +var ( + sftpHost = flag.String("sftp-host", os.Getenv("LITESTREAM_SFTP_HOST"), "") + sftpUser = flag.String("sftp-user", os.Getenv("LITESTREAM_SFTP_USER"), "") + sftpPassword = flag.String("sftp-password", os.Getenv("LITESTREAM_SFTP_PASSWORD"), "") + sftpKeyPath = flag.String("sftp-key-path", os.Getenv("LITESTREAM_SFTP_KEY_PATH"), "") + sftpPath = flag.String("sftp-path", os.Getenv("LITESTREAM_SFTP_PATH"), "") +) + +func TestReplicaClient_Generations(t *testing.T) { + RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + // Write snapshots. + if _, err := c.WriteSnapshot(context.Background(), "5efbd8d042012dca", 0, strings.NewReader(`foo`)); err != nil { + t.Fatal(err) + } else if _, err := c.WriteSnapshot(context.Background(), "b16ddcf5c697540f", 0, strings.NewReader(`bar`)); err != nil { + t.Fatal(err) + } else if _, err := c.WriteSnapshot(context.Background(), "155fe292f8333c72", 0, strings.NewReader(`baz`)); err != nil { + t.Fatal(err) + } + + // Verify returned generations. + if got, err := c.Generations(context.Background()); err != nil { + t.Fatal(err) + } else if want := []string{"155fe292f8333c72", "5efbd8d042012dca", "b16ddcf5c697540f"}; !reflect.DeepEqual(got, want) { + t.Fatalf("Generations()=%v, want %v", got, want) + } + }) + + RunWithReplicaClient(t, "NoGenerationsDir", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + if generations, err := c.Generations(context.Background()); err != nil { + t.Fatal(err) + } else if got, want := len(generations), 0; got != want { + t.Fatalf("len(Generations())=%v, want %v", got, want) + } + }) +} + +func TestReplicaClient_Snapshots(t *testing.T) { + RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + // Write snapshots. + if _, err := c.WriteSnapshot(context.Background(), "5efbd8d042012dca", 1, strings.NewReader(``)); err != nil { + t.Fatal(err) + } else if _, err := c.WriteSnapshot(context.Background(), "b16ddcf5c697540f", 5, strings.NewReader(`x`)); err != nil { + t.Fatal(err) + } else if _, err := c.WriteSnapshot(context.Background(), "b16ddcf5c697540f", 10, strings.NewReader(`xyz`)); err != nil { + t.Fatal(err) + } + + // Fetch all snapshots by generation. + itr, err := c.Snapshots(context.Background(), "b16ddcf5c697540f") + if err != nil { + t.Fatal(err) + } + defer itr.Close() + + // Read all snapshots into a slice so they can be sorted. + a, err := litestream.SliceSnapshotIterator(itr) + if err != nil { + t.Fatal(err) + } else if got, want := len(a), 2; got != want { + t.Fatalf("len=%v, want %v", got, want) + } + sort.Sort(litestream.SnapshotInfoSlice(a)) + + // Verify first snapshot metadata. + if got, want := a[0].Generation, "b16ddcf5c697540f"; got != want { + t.Fatalf("Generation=%v, want %v", got, want) + } else if got, want := a[0].Index, 5; got != want { + t.Fatalf("Index=%v, want %v", got, want) + } else if got, want := a[0].Size, int64(1); got != want { + t.Fatalf("Size=%v, want %v", got, want) + } else if a[0].CreatedAt.IsZero() { + t.Fatalf("expected CreatedAt") + } + + // Verify second snapshot metadata. + if got, want := a[1].Generation, "b16ddcf5c697540f"; got != want { + t.Fatalf("Generation=%v, want %v", got, want) + } else if got, want := a[1].Index, 0xA; got != want { + t.Fatalf("Index=%v, want %v", got, want) + } else if got, want := a[1].Size, int64(3); got != want { + t.Fatalf("Size=%v, want %v", got, want) + } else if a[1].CreatedAt.IsZero() { + t.Fatalf("expected CreatedAt") + } + + // Ensure close is clean. + if err := itr.Close(); err != nil { + t.Fatal(err) + } + }) + + RunWithReplicaClient(t, "NoGenerationDir", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + itr, err := c.Snapshots(context.Background(), "5efbd8d042012dca") + if err != nil { + t.Fatal(err) + } + defer itr.Close() + + if itr.Next() { + t.Fatal("expected no snapshots") + } + }) + + RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + itr, err := c.Snapshots(context.Background(), "") + if err == nil { + err = itr.Close() + } + if err == nil || err.Error() != `generation required` { + t.Fatalf("unexpected error: %v", err) + } + }) +} + +func TestReplicaClient_WriteSnapshot(t *testing.T) { + RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + if _, err := c.WriteSnapshot(context.Background(), "b16ddcf5c697540f", 1000, strings.NewReader(`foobar`)); err != nil { + t.Fatal(err) + } + + if r, err := c.SnapshotReader(context.Background(), "b16ddcf5c697540f", 1000); err != nil { + t.Fatal(err) + } else if buf, err := ioutil.ReadAll(r); err != nil { + t.Fatal(err) + } else if err := r.Close(); err != nil { + t.Fatal(err) + } else if got, want := string(buf), `foobar`; got != want { + t.Fatalf("data=%q, want %q", got, want) + } + }) + + RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + if _, err := c.WriteSnapshot(context.Background(), "", 0, nil); err == nil || err.Error() != `generation required` { + t.Fatalf("unexpected error: %v", err) + } + }) +} + +func TestReplicaClient_SnapshotReader(t *testing.T) { + RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + if _, err := c.WriteSnapshot(context.Background(), "5efbd8d042012dca", 10, strings.NewReader(`foo`)); err != nil { + t.Fatal(err) + } + + r, err := c.SnapshotReader(context.Background(), "5efbd8d042012dca", 10) + if err != nil { + t.Fatal(err) + } + defer r.Close() + + if buf, err := ioutil.ReadAll(r); err != nil { + t.Fatal(err) + } else if got, want := string(buf), "foo"; got != want { + t.Fatalf("ReadAll=%v, want %v", got, want) + } + }) + + RunWithReplicaClient(t, "ErrNotFound", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + if _, err := c.SnapshotReader(context.Background(), "5efbd8d042012dca", 1); !os.IsNotExist(err) { + t.Fatalf("expected not exist, got %#v", err) + } + }) + + RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + if _, err := c.SnapshotReader(context.Background(), "", 1); err == nil || err.Error() != `generation required` { + t.Fatalf("unexpected error: %v", err) + } + }) +} + +func TestReplicaClient_WALSegments(t *testing.T) { + RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 1, Offset: 0}, strings.NewReader(``)); err != nil { + t.Fatal(err) + } + if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 2, Offset: 0}, strings.NewReader(`12345`)); err != nil { + t.Fatal(err) + } else if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 2, Offset: 5}, strings.NewReader(`67`)); err != nil { + t.Fatal(err) + } else if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 3, Offset: 0}, strings.NewReader(`xyz`)); err != nil { + t.Fatal(err) + } + + itr, err := c.WALSegments(context.Background(), "b16ddcf5c697540f") + if err != nil { + t.Fatal(err) + } + defer itr.Close() + + // Read all WAL segment files into a slice so they can be sorted. + a, err := litestream.SliceWALSegmentIterator(itr) + if err != nil { + t.Fatal(err) + } else if got, want := len(a), 3; got != want { + t.Fatalf("len=%v, want %v", got, want) + } + sort.Sort(litestream.WALSegmentInfoSlice(a)) + + // Verify first WAL segment metadata. + if got, want := a[0].Generation, "b16ddcf5c697540f"; got != want { + t.Fatalf("Generation=%v, want %v", got, want) + } else if got, want := a[0].Index, 2; got != want { + t.Fatalf("Index=%v, want %v", got, want) + } else if got, want := a[0].Offset, int64(0); got != want { + t.Fatalf("Offset=%v, want %v", got, want) + } else if got, want := a[0].Size, int64(5); got != want { + t.Fatalf("Size=%v, want %v", got, want) + } else if a[0].CreatedAt.IsZero() { + t.Fatalf("expected CreatedAt") + } + + // Verify first WAL segment metadata. + if got, want := a[1].Generation, "b16ddcf5c697540f"; got != want { + t.Fatalf("Generation=%v, want %v", got, want) + } else if got, want := a[1].Index, 2; got != want { + t.Fatalf("Index=%v, want %v", got, want) + } else if got, want := a[1].Offset, int64(5); got != want { + t.Fatalf("Offset=%v, want %v", got, want) + } else if got, want := a[1].Size, int64(2); got != want { + t.Fatalf("Size=%v, want %v", got, want) + } else if a[1].CreatedAt.IsZero() { + t.Fatalf("expected CreatedAt") + } + + // Verify third WAL segment metadata. + if got, want := a[2].Generation, "b16ddcf5c697540f"; got != want { + t.Fatalf("Generation=%v, want %v", got, want) + } else if got, want := a[2].Index, 3; got != want { + t.Fatalf("Index=%v, want %v", got, want) + } else if got, want := a[2].Offset, int64(0); got != want { + t.Fatalf("Offset=%v, want %v", got, want) + } else if got, want := a[2].Size, int64(3); got != want { + t.Fatalf("Size=%v, want %v", got, want) + } else if a[1].CreatedAt.IsZero() { + t.Fatalf("expected CreatedAt") + } + + // Ensure close is clean. + if err := itr.Close(); err != nil { + t.Fatal(err) + } + }) + + RunWithReplicaClient(t, "NoGenerationDir", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + itr, err := c.WALSegments(context.Background(), "5efbd8d042012dca") + if err != nil { + t.Fatal(err) + } + defer itr.Close() + + if itr.Next() { + t.Fatal("expected no wal files") + } + }) + + RunWithReplicaClient(t, "NoWALs", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + if _, err := c.WriteSnapshot(context.Background(), "5efbd8d042012dca", 0, strings.NewReader(`foo`)); err != nil { + t.Fatal(err) + } + + itr, err := c.WALSegments(context.Background(), "5efbd8d042012dca") + if err != nil { + t.Fatal(err) + } + defer itr.Close() + + if itr.Next() { + t.Fatal("expected no wal files") + } + }) + + RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + itr, err := c.WALSegments(context.Background(), "") + if err == nil { + err = itr.Close() + } + if err == nil || err.Error() != `generation required` { + t.Fatalf("unexpected error: %v", err) + } + }) +} + +func TestReplicaClient_WriteWALSegment(t *testing.T) { + RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1000, Offset: 2000}, strings.NewReader(`foobar`)); err != nil { + t.Fatal(err) + } + + if r, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1000, Offset: 2000}); err != nil { + t.Fatal(err) + } else if buf, err := ioutil.ReadAll(r); err != nil { + t.Fatal(err) + } else if err := r.Close(); err != nil { + t.Fatal(err) + } else if got, want := string(buf), `foobar`; got != want { + t.Fatalf("data=%q, want %q", got, want) + } + }) + + RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "", Index: 0, Offset: 0}, nil); err == nil || err.Error() != `generation required` { + t.Fatalf("unexpected error: %v", err) + } + }) +} + +func TestReplicaClient_WALSegmentReader(t *testing.T) { + + RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 10, Offset: 5}, strings.NewReader(`foobar`)); err != nil { + t.Fatal(err) + } + + r, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 10, Offset: 5}) + if err != nil { + t.Fatal(err) + } + defer r.Close() + + if buf, err := ioutil.ReadAll(r); err != nil { + t.Fatal(err) + } else if got, want := string(buf), "foobar"; got != want { + t.Fatalf("ReadAll=%v, want %v", got, want) + } + }) + + RunWithReplicaClient(t, "ErrNotFound", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + if _, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 1, Offset: 0}); !os.IsNotExist(err) { + t.Fatalf("expected not exist, got %#v", err) + } + }) +} + +func TestReplicaClient_DeleteWALSegments(t *testing.T) { + RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1, Offset: 2}, strings.NewReader(`foo`)); err != nil { + t.Fatal(err) + } else if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 3, Offset: 4}, strings.NewReader(`bar`)); err != nil { + t.Fatal(err) + } + + if err := c.DeleteWALSegments(context.Background(), []litestream.Pos{ + {Generation: "b16ddcf5c697540f", Index: 1, Offset: 2}, + {Generation: "5efbd8d042012dca", Index: 3, Offset: 4}, + }); err != nil { + t.Fatal(err) + } + + if _, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1, Offset: 2}); !os.IsNotExist(err) { + t.Fatalf("expected not exist, got %#v", err) + } else if _, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 3, Offset: 4}); !os.IsNotExist(err) { + t.Fatalf("expected not exist, got %#v", err) + } + }) + + RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + if err := c.DeleteWALSegments(context.Background(), []litestream.Pos{{}}); err == nil || err.Error() != `generation required` { + t.Fatalf("unexpected error: %v", err) + } + }) +} + +// RunWithReplicaClient executes fn with each replica specified by the -replica-type flag +func RunWithReplicaClient(t *testing.T, name string, fn func(*testing.T, litestream.ReplicaClient)) { + t.Run(name, func(t *testing.T) { + for _, typ := range strings.Split(*replicaType, ",") { + t.Run(typ, func(t *testing.T) { + c := NewReplicaClient(t, typ) + defer MustDeleteAll(t, c) + + fn(t, c) + }) + } + }) +} + +// NewReplicaClient returns a new client for integration testing by type name. +func NewReplicaClient(tb testing.TB, typ string) litestream.ReplicaClient { + tb.Helper() + + switch typ { + case litestream.FileReplicaClientType: + return litestream.NewFileReplicaClient(tb.TempDir()) + case s3.ReplicaClientType: + return NewS3ReplicaClient(tb) + case gcs.ReplicaClientType: + return NewGCSReplicaClient(tb) + case abs.ReplicaClientType: + return NewABSReplicaClient(tb) + case sftp.ReplicaClientType: + return NewSFTPReplicaClient(tb) + default: + tb.Fatalf("invalid replica client type: %q", typ) + return nil + } +} + +// NewS3ReplicaClient returns a new client for integration testing. +func NewS3ReplicaClient(tb testing.TB) *s3.ReplicaClient { + tb.Helper() + + c := s3.NewReplicaClient() + c.AccessKeyID = *s3AccessKeyID + c.SecretAccessKey = *s3SecretAccessKey + c.Region = *s3Region + c.Bucket = *s3Bucket + c.Path = path.Join(*s3Path, fmt.Sprintf("%016x", rand.Uint64())) + c.Endpoint = *s3Endpoint + c.ForcePathStyle = *s3ForcePathStyle + c.SkipVerify = *s3SkipVerify + return c +} + +// NewGCSReplicaClient returns a new client for integration testing. +func NewGCSReplicaClient(tb testing.TB) *gcs.ReplicaClient { + tb.Helper() + + c := gcs.NewReplicaClient() + c.Bucket = *gcsBucket + c.Path = path.Join(*gcsPath, fmt.Sprintf("%016x", rand.Uint64())) + return c +} + +// NewABSReplicaClient returns a new client for integration testing. +func NewABSReplicaClient(tb testing.TB) *abs.ReplicaClient { + tb.Helper() + + c := abs.NewReplicaClient() + c.AccountName = *absAccountName + c.AccountKey = *absAccountKey + c.Bucket = *absBucket + c.Path = path.Join(*absPath, fmt.Sprintf("%016x", rand.Uint64())) + return c +} + +// NewSFTPReplicaClient returns a new client for integration testing. +func NewSFTPReplicaClient(tb testing.TB) *sftp.ReplicaClient { + tb.Helper() + + c := sftp.NewReplicaClient() + c.Host = *sftpHost + c.User = *sftpUser + c.Password = *sftpPassword + c.KeyPath = *sftpKeyPath + c.Path = path.Join(*sftpPath, fmt.Sprintf("%016x", rand.Uint64())) + return c +} + +// MustDeleteAll deletes all objects under the client's path. +func MustDeleteAll(tb testing.TB, c litestream.ReplicaClient) { + tb.Helper() + + generations, err := c.Generations(context.Background()) + if err != nil { + tb.Fatalf("cannot list generations for deletion: %s", err) + } + + for _, generation := range generations { + if err := c.DeleteGeneration(context.Background(), generation); err != nil { + tb.Fatalf("cannot delete generation: %s", err) + } + } + + switch c := c.(type) { + case *sftp.ReplicaClient: + if err := c.Cleanup(context.Background()); err != nil { + tb.Fatalf("cannot cleanup sftp: %s", err) + } + } +} diff --git a/internal/internal.go b/internal/internal.go index 26d55aae..f8e5c60b 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -94,27 +94,19 @@ func (r *ReadCounter) Read(p []byte) (int, error) { func (r *ReadCounter) N() int64 { return r.n } // CreateFile creates the file and matches the mode & uid/gid of fi. -func CreateFile(filename string, fi os.FileInfo) (*os.File, error) { - mode := os.FileMode(0600) - if fi != nil { - mode = fi.Mode() - } - +func CreateFile(filename string, mode os.FileMode, uid, gid int) (*os.File, error) { f, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode) if err != nil { return nil, err } - uid, gid := Fileinfo(fi) _ = f.Chown(uid, gid) return f, nil } // MkdirAll is a copy of os.MkdirAll() except that it attempts to set the // mode/uid/gid to match fi for each created directory. -func MkdirAll(path string, fi os.FileInfo) error { - uid, gid := Fileinfo(fi) - +func MkdirAll(path string, mode os.FileMode, uid, gid int) error { // Fast path: if we can tell whether path is a directory or file, stop with success or error. dir, err := os.Stat(path) if err == nil { @@ -137,17 +129,13 @@ func MkdirAll(path string, fi os.FileInfo) error { if j > 1 { // Create parent. - err = MkdirAll(fixRootDirectory(path[:j-1]), fi) + err = MkdirAll(fixRootDirectory(path[:j-1]), mode, uid, gid) if err != nil { return err } } // Parent now exists; invoke Mkdir and use its result. - mode := os.FileMode(0700) - if fi != nil { - mode = fi.Mode() - } err = os.Mkdir(path, mode) if err != nil { // Handle arguments like "foo/." by diff --git a/litestream.go b/litestream.go index 46eb0338..e962f141 100644 --- a/litestream.go +++ b/litestream.go @@ -37,6 +37,7 @@ const ( var ( ErrNoGeneration = errors.New("no generation available") ErrNoSnapshots = errors.New("no snapshots available") + ErrNoWALSegments = errors.New("no wal segments available") ErrChecksumMismatch = errors.New("invalid replica, checksum mismatch") ) @@ -440,6 +441,20 @@ func ParseOffset(s string) (int64, error) { return v, nil } +// removeDBFiles deletes the database and related files (journal, shm, wal). +func removeDBFiles(filename string) error { + if err := os.Remove(filename); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("cannot delete database %q: %w", filename, err) + } else if err := os.Remove(filename + "-journal"); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("cannot delete journal for %q: %w", filename, err) + } else if err := os.Remove(filename + "-shm"); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("cannot delete shared memory for %q: %w", filename, err) + } else if err := os.Remove(filename + "-wal"); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("cannot delete wal for %q: %w", filename, err) + } + return nil +} + // isHexChar returns true if ch is a lowercase hex character. func isHexChar(ch rune) bool { return (ch >= '0' && ch <= '9') || (ch >= 'a' && ch <= 'f') diff --git a/litestream_test.go b/litestream_test.go index 93327df3..9878fbdf 100644 --- a/litestream_test.go +++ b/litestream_test.go @@ -1,12 +1,16 @@ package litestream_test import ( + "bytes" "encoding/binary" "encoding/hex" + "io" + "os" "testing" "github.com/benbjohnson/litestream" _ "github.com/mattn/go-sqlite3" + "github.com/pierrec/lz4/v4" ) func TestChecksum(t *testing.T) { @@ -26,14 +30,14 @@ func TestChecksum(t *testing.T) { // Ensure we get the same result as OnePass even if we split up into multiple calls. t.Run("Incremental", func(t *testing.T) { // Compute checksum for beginning of WAL header. - s0, s1 := litestream.Checksum(binary.LittleEndian, 0, 0, MustDecodeHexString("377f0682002de218000010000000000052382eac857b1a4e")) + s0, s1 := litestream.Checksum(binary.LittleEndian, 0, 0, decodeHexString(t, "377f0682002de218000010000000000052382eac857b1a4e")) if got, want := [2]uint32{s0, s1}, [2]uint32{0x81153b65, 0x87178e8f}; got != want { t.Fatalf("Checksum()=%x, want %x", got, want) } // Continue checksum with WAL frame header & frame contents. - s0a, s1a := litestream.Checksum(binary.LittleEndian, s0, s1, MustDecodeHexString("0000000200000002")) - s0b, s1b := litestream.Checksum(binary.LittleEndian, s0a, s1a, MustDecodeHexString(`0d000000080fe0000ffc0ff80ff40ff00fec0fe80fe40fe000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000208020902070209020602090205020902040209020302090202020902010209`)) + s0a, s1a := litestream.Checksum(binary.LittleEndian, s0, s1, decodeHexString(t, "0000000200000002")) + s0b, s1b := litestream.Checksum(binary.LittleEndian, s0a, s1a, decodeHexString(t, `0d000000080fe0000ffc0ff80ff40ff00fec0fe80fe40fe000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000208020902070209020602090205020902040209020302090202020902010209`)) if got, want := [2]uint32{s0b, s1b}, [2]uint32{0xdc2f3e84, 0x540488d3}; got != want { t.Fatalf("Checksum()=%x, want %x", got, want) } @@ -50,10 +54,52 @@ func TestFindMinSnapshotByGeneration(t *testing.T) { } } -func MustDecodeHexString(s string) []byte { +func decodeHexString(tb testing.TB, s string) []byte { + tb.Helper() + b, err := hex.DecodeString(s) if err != nil { - panic(err) + tb.Fatal(err) } return b } + +// fileEqual returns true if files at x and y have equal contents. +func fileEqual(tb testing.TB, x, y string) bool { + tb.Helper() + + bx, err := os.ReadFile(x) + if err != nil { + tb.Fatal(err) + } + + by, err := os.ReadFile(y) + if err != nil { + tb.Fatal(err) + } + + return bytes.Equal(bx, by) +} + +func compressLZ4(tb testing.TB, b []byte) []byte { + tb.Helper() + + var buf bytes.Buffer + zw := lz4.NewWriter(&buf) + if _, err := zw.Write(b); err != nil { + tb.Fatal(err) + } else if err := zw.Close(); err != nil { + tb.Fatal(err) + } + return buf.Bytes() +} + +func decompressLZ4(tb testing.TB, b []byte) []byte { + tb.Helper() + + buf, err := io.ReadAll(lz4.NewReader(bytes.NewReader(b))) + if err != nil { + tb.Fatal(err) + } + return buf +} diff --git a/mock/read_closer.go b/mock/read_closer.go new file mode 100644 index 00000000..a473e96e --- /dev/null +++ b/mock/read_closer.go @@ -0,0 +1,14 @@ +package mock + +type ReadCloser struct { + CloseFunc func() error + ReadFunc func([]byte) (int, error) +} + +func (r *ReadCloser) Close() error { + return r.CloseFunc() +} + +func (r *ReadCloser) Read(b []byte) (int, error) { + return r.ReadFunc(b) +} diff --git a/mock/snapshot_iterator.go b/mock/snapshot_iterator.go new file mode 100644 index 00000000..8f167d68 --- /dev/null +++ b/mock/snapshot_iterator.go @@ -0,0 +1,28 @@ +package mock + +import ( + "github.com/benbjohnson/litestream" +) + +type SnapshotIterator struct { + CloseFunc func() error + NextFunc func() bool + ErrFunc func() error + SnapshotFunc func() litestream.SnapshotInfo +} + +func (itr *SnapshotIterator) Close() error { + return itr.CloseFunc() +} + +func (itr *SnapshotIterator) Next() bool { + return itr.NextFunc() +} + +func (itr *SnapshotIterator) Err() error { + return itr.ErrFunc() +} + +func (itr *SnapshotIterator) Snapshot() litestream.SnapshotInfo { + return itr.SnapshotFunc() +} diff --git a/mock/wal_segment_iterator.go b/mock/wal_segment_iterator.go new file mode 100644 index 00000000..f1d62cd3 --- /dev/null +++ b/mock/wal_segment_iterator.go @@ -0,0 +1,28 @@ +package mock + +import ( + "github.com/benbjohnson/litestream" +) + +type WALSegmentIterator struct { + CloseFunc func() error + NextFunc func() bool + ErrFunc func() error + WALSegmentFunc func() litestream.WALSegmentInfo +} + +func (itr *WALSegmentIterator) Close() error { + return itr.CloseFunc() +} + +func (itr *WALSegmentIterator) Next() bool { + return itr.NextFunc() +} + +func (itr *WALSegmentIterator) Err() error { + return itr.ErrFunc() +} + +func (itr *WALSegmentIterator) WALSegment() litestream.WALSegmentInfo { + return itr.WALSegmentFunc() +} diff --git a/replica.go b/replica.go index c6d0f3f4..67e9d141 100644 --- a/replica.go +++ b/replica.go @@ -7,14 +7,12 @@ import ( "io" "io/ioutil" "log" - "math" "os" "path/filepath" "sort" "sync" "time" - "github.com/benbjohnson/litestream/internal" "github.com/pierrec/lz4/v4" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -144,6 +142,15 @@ func (r *Replica) Stop(hard bool) (err error) { return err } +// logPrefix returns the prefix used when logging from the replica. +// This includes the replica name as well as the database path, if available. +func (r *Replica) logPrefix() string { + if db := r.DB(); db != nil { + return fmt.Sprintf("%s(%s): ", db.Path(), r.Name()) + } + return r.Name() + ": " +} + // Sync copies new WAL frames from the shadow WAL to the replica client. func (r *Replica) Sync(ctx context.Context) (err error) { // Clear last position if if an error occurs during sync. @@ -766,14 +773,18 @@ func (r *Replica) Validate(ctx context.Context) error { return fmt.Errorf("cannot wait for replica: %w", err) } + // Find lastest snapshot that occurs before the index. + snapshotIndex, err := FindSnapshotForIndex(ctx, r.Client, pos.Generation, pos.Index-1) + if err != nil { + return fmt.Errorf("cannot find snapshot index: %w", err) + } + restorePath := filepath.Join(tmpdir, "replica") - if err := r.Restore(ctx, RestoreOptions{ - OutputPath: restorePath, - ReplicaName: r.Name(), - Generation: pos.Generation, - Index: pos.Index - 1, - Logger: log.New(os.Stderr, "", 0), - }); err != nil { + opt := RestoreOptions{ + Logger: log.New(os.Stderr, "", 0), + LogPrefix: r.logPrefix(), + } + if err := Restore(ctx, r.Client, restorePath, pos.Generation, snapshotIndex, pos.Index-1, opt); err != nil { return fmt.Errorf("cannot restore: %w", err) } @@ -883,295 +894,6 @@ func (r *Replica) GenerationCreatedAt(ctx context.Context, generation string) (t return min, itr.Close() } -// GenerationTimeBounds returns the creation time & last updated time of a generation. -// Returns zero time if no snapshots or WAL segments exist. -func (r *Replica) GenerationTimeBounds(ctx context.Context, generation string) (createdAt, updatedAt time.Time, err error) { - // Iterate over snapshots. - sitr, err := r.Client.Snapshots(ctx, generation) - if err != nil { - return createdAt, updatedAt, err - } - defer sitr.Close() - - for sitr.Next() { - info := sitr.Snapshot() - if createdAt.IsZero() || info.CreatedAt.Before(createdAt) { - createdAt = info.CreatedAt - } - if updatedAt.IsZero() || info.CreatedAt.After(updatedAt) { - updatedAt = info.CreatedAt - } - } - if err := sitr.Close(); err != nil { - return createdAt, updatedAt, err - } - - // Iterate over WAL segments. - witr, err := r.Client.WALSegments(ctx, generation) - if err != nil { - return createdAt, updatedAt, err - } - defer witr.Close() - - for witr.Next() { - info := witr.WALSegment() - if createdAt.IsZero() || info.CreatedAt.Before(createdAt) { - createdAt = info.CreatedAt - } - if updatedAt.IsZero() || info.CreatedAt.After(updatedAt) { - updatedAt = info.CreatedAt - } - } - if err := witr.Close(); err != nil { - return createdAt, updatedAt, err - } - - return createdAt, updatedAt, nil -} - -// CalcRestoreTarget returns a generation to restore from. -func (r *Replica) CalcRestoreTarget(ctx context.Context, opt RestoreOptions) (generation string, updatedAt time.Time, err error) { - var target struct { - generation string - updatedAt time.Time - } - - generations, err := r.Client.Generations(ctx) - if err != nil { - return "", time.Time{}, fmt.Errorf("cannot fetch generations: %w", err) - } - - // Search generations for one that contains the requested timestamp. - for _, generation := range generations { - // Skip generation if it does not match filter. - if opt.Generation != "" && generation != opt.Generation { - continue - } - - // Determine the time bounds for the generation. - createdAt, updatedAt, err := r.GenerationTimeBounds(ctx, generation) - if err != nil { - return "", time.Time{}, fmt.Errorf("generation created at: %w", err) - } - - // Skip if it does not contain timestamp. - if !opt.Timestamp.IsZero() { - if opt.Timestamp.Before(createdAt) || opt.Timestamp.After(updatedAt) { - continue - } - } - - // Use the latest replica if we have multiple candidates. - if !updatedAt.After(target.updatedAt) { - continue - } - - target.generation = generation - target.updatedAt = updatedAt - } - - return target.generation, target.updatedAt, nil -} - -// Replica restores the database from a replica based on the options given. -// This method will restore into opt.OutputPath, if specified, or into the -// DB's original database path. It can optionally restore from a specific -// replica or generation or it will automatically choose the best one. Finally, -// a timestamp can be specified to restore the database to a specific -// point-in-time. -func (r *Replica) Restore(ctx context.Context, opt RestoreOptions) (err error) { - // Validate options. - if opt.OutputPath == "" { - if r.db.path == "" { - return fmt.Errorf("output path required") - } - opt.OutputPath = r.db.path - } else if opt.Generation == "" && opt.Index != math.MaxInt32 { - return fmt.Errorf("must specify generation when restoring to index") - } else if opt.Index != math.MaxInt32 && !opt.Timestamp.IsZero() { - return fmt.Errorf("cannot specify index & timestamp to restore") - } - - // Ensure logger exists. - logger := opt.Logger - if logger == nil { - logger = log.New(ioutil.Discard, "", 0) - } - - logPrefix := r.Name() - if db := r.DB(); db != nil { - logPrefix = fmt.Sprintf("%s(%s)", db.Path(), r.Name()) - } - - // Ensure output path does not already exist. - if _, err := os.Stat(opt.OutputPath); err == nil { - return fmt.Errorf("cannot restore, output path already exists: %s", opt.OutputPath) - } else if err != nil && !os.IsNotExist(err) { - return err - } - - // Find lastest snapshot that occurs before timestamp or index. - var minWALIndex int - if opt.Index < math.MaxInt32 { - if minWALIndex, err = r.SnapshotIndexByIndex(ctx, opt.Generation, opt.Index); err != nil { - return fmt.Errorf("cannot find snapshot index: %w", err) - } - } else { - if minWALIndex, err = r.SnapshotIndexAt(ctx, opt.Generation, opt.Timestamp); err != nil { - return fmt.Errorf("cannot find snapshot index by timestamp: %w", err) - } - } - - // Compute list of offsets for each WAL index. - walSegmentMap, err := r.walSegmentMap(ctx, opt.Generation, opt.Index, opt.Timestamp) - if err != nil { - return fmt.Errorf("cannot find max wal index for restore: %w", err) - } - - // Find the maximum WAL index that occurs before timestamp. - maxWALIndex := -1 - for index := range walSegmentMap { - if index > maxWALIndex { - maxWALIndex = index - } - } - - // Ensure that we found the specific index, if one was specified. - if opt.Index != math.MaxInt32 && opt.Index != opt.Index { - return fmt.Errorf("unable to locate index %d in generation %q, highest index was %d", opt.Index, opt.Generation, maxWALIndex) - } - - // If no WAL files were found, mark this as a snapshot-only restore. - snapshotOnly := maxWALIndex == -1 - - // Initialize starting position. - pos := Pos{Generation: opt.Generation, Index: minWALIndex} - tmpPath := opt.OutputPath + ".tmp" - - // Copy snapshot to output path. - logger.Printf("%s: restoring snapshot %s/%08x to %s", logPrefix, opt.Generation, minWALIndex, tmpPath) - if err := r.restoreSnapshot(ctx, pos.Generation, pos.Index, tmpPath); err != nil { - return fmt.Errorf("cannot restore snapshot: %w", err) - } - - // If no WAL files available, move snapshot to final path & exit early. - if snapshotOnly { - logger.Printf("%s: snapshot only, finalizing database", logPrefix) - return os.Rename(tmpPath, opt.OutputPath) - } - - // Begin processing WAL files. - logger.Printf("%s: restoring wal files: generation=%s index=[%08x,%08x]", logPrefix, opt.Generation, minWALIndex, maxWALIndex) - - // Fill input channel with all WAL indexes to be loaded in order. - // Verify every index has at least one offset. - ch := make(chan int, maxWALIndex-minWALIndex+1) - for index := minWALIndex; index <= maxWALIndex; index++ { - if len(walSegmentMap[index]) == 0 { - return fmt.Errorf("missing WAL index: %s/%08x", opt.Generation, index) - } - ch <- index - } - close(ch) - - // Track load state for each WAL. - var mu sync.Mutex - cond := sync.NewCond(&mu) - walStates := make([]walRestoreState, maxWALIndex-minWALIndex+1) - - parallelism := opt.Parallelism - if parallelism < 1 { - parallelism = 1 - } - - // Download WAL files to disk in parallel. - g, ctx := errgroup.WithContext(ctx) - for i := 0; i < parallelism; i++ { - g.Go(func() error { - for { - select { - case <-ctx.Done(): - cond.Broadcast() - return err - case index, ok := <-ch: - if !ok { - cond.Broadcast() - return nil - } - - startTime := time.Now() - - err := r.downloadWAL(ctx, opt.Generation, index, walSegmentMap[index], tmpPath) - if err != nil { - err = fmt.Errorf("cannot download wal %s/%08x: %w", opt.Generation, index, err) - } - - // Mark index as ready-to-apply and notify applying code. - mu.Lock() - walStates[index-minWALIndex] = walRestoreState{ready: true, err: err} - mu.Unlock() - cond.Broadcast() - - // Returning the error here will cancel the other goroutines. - if err != nil { - return err - } - - logger.Printf("%s: downloaded wal %s/%08x elapsed=%s", - logPrefix, opt.Generation, index, - time.Since(startTime).String(), - ) - } - } - }) - } - - // Apply WAL files in order as they are ready. - for index := minWALIndex; index <= maxWALIndex; index++ { - // Wait until next WAL file is ready to apply. - mu.Lock() - for !walStates[index-minWALIndex].ready { - if err := ctx.Err(); err != nil { - return err - } - cond.Wait() - } - if err := walStates[index-minWALIndex].err; err != nil { - return err - } - mu.Unlock() - - // Apply WAL to database file. - startTime := time.Now() - if err = applyWAL(ctx, index, tmpPath); err != nil { - return fmt.Errorf("cannot apply wal: %w", err) - } - logger.Printf("%s: applied wal %s/%08x elapsed=%s", - logPrefix, opt.Generation, index, - time.Since(startTime).String(), - ) - } - - // Ensure all goroutines finish. All errors should have been handled during - // the processing of WAL files but this ensures that all processing is done. - if err := g.Wait(); err != nil { - return err - } - - // Copy file to final location. - logger.Printf("%s: renaming database from temporary location", logPrefix) - if err := os.Rename(tmpPath, opt.OutputPath); err != nil { - return err - } - - return nil -} - -type walRestoreState struct { - ready bool - err error -} - // SnapshotIndexAt returns the highest index for a snapshot within a generation // that occurs before timestamp. If timestamp is zero, returns the latest snapshot. func (r *Replica) SnapshotIndexAt(ctx context.Context, generation string, timestamp time.Time) (int, error) { @@ -1202,137 +924,19 @@ func (r *Replica) SnapshotIndexAt(ctx context.Context, generation string, timest return snapshotIndex, nil } -// SnapshotIndexbyIndex returns the highest index for a snapshot within a generation -// that occurs before a given index. If index is MaxInt32, returns the latest snapshot. -func (r *Replica) SnapshotIndexByIndex(ctx context.Context, generation string, index int) (int, error) { - itr, err := r.Client.Snapshots(ctx, generation) - if err != nil { - return 0, err - } - defer itr.Close() - - snapshotIndex := -1 - for itr.Next() { - snapshot := itr.Snapshot() - - if index < math.MaxInt32 && snapshot.Index > index { - continue // after index, skip - } - - // Use snapshot if it newer. - if snapshotIndex == -1 || snapshotIndex >= snapshotIndex { - snapshotIndex = snapshot.Index - } - } - if err := itr.Close(); err != nil { - return 0, err - } else if snapshotIndex == -1 { - return 0, ErrNoSnapshots - } - return snapshotIndex, nil -} - -// walSegmentMap returns a map of WAL indices to their segments. -// Filters by a max timestamp or a max index. -func (r *Replica) walSegmentMap(ctx context.Context, generation string, maxIndex int, maxTimestamp time.Time) (map[int][]int64, error) { - itr, err := r.Client.WALSegments(ctx, generation) - if err != nil { - return nil, err - } - defer itr.Close() - - m := make(map[int][]int64) - for itr.Next() { - info := itr.WALSegment() - - // Exit if we go past the max timestamp or index. - if !maxTimestamp.IsZero() && info.CreatedAt.After(maxTimestamp) { - break // after max timestamp, skip - } else if info.Index > maxIndex { - break // after max index, skip - } - - // Verify offsets are added in order. - offsets := m[info.Index] - if len(offsets) == 0 && info.Offset != 0 { - return nil, fmt.Errorf("missing initial wal segment: generation=%s index=%08x offset=%d", generation, info.Index, info.Offset) - } else if len(offsets) > 0 && offsets[len(offsets)-1] >= info.Offset { - return nil, fmt.Errorf("wal segments out of order: generation=%s index=%08x offsets=(%d,%d)", generation, info.Index, offsets[len(offsets)-1], info.Offset) - } - - // Append to the end of the WAL file. - m[info.Index] = append(offsets, info.Offset) - } - return m, itr.Close() -} - -// restoreSnapshot copies a snapshot from the replica to a file. -func (r *Replica) restoreSnapshot(ctx context.Context, generation string, index int, filename string) error { - // Determine the user/group & mode based on the DB, if available. - var fileInfo, dirInfo os.FileInfo - if db := r.DB(); db != nil { - fileInfo, dirInfo = db.fileInfo, db.dirInfo - } - - if err := internal.MkdirAll(filepath.Dir(filename), dirInfo); err != nil { - return err - } - - f, err := internal.CreateFile(filename, fileInfo) - if err != nil { - return err - } - defer f.Close() - - rd, err := r.Client.SnapshotReader(ctx, generation, index) - if err != nil { - return err - } - defer rd.Close() - - if _, err := io.Copy(f, lz4.NewReader(rd)); err != nil { - return err - } else if err := f.Sync(); err != nil { - return err - } - return f.Close() -} - -// downloadWAL copies a WAL file from the replica to a local copy next to the DB. -// The WAL is later applied by applyWAL(). This function can be run in parallel -// to download multiple WAL files simultaneously. -func (r *Replica) downloadWAL(ctx context.Context, generation string, index int, offsets []int64, dbPath string) (err error) { - // Determine the user/group & mode based on the DB, if available. - var fileInfo os.FileInfo - if db := r.DB(); db != nil { - fileInfo = db.fileInfo - } - - // Open readers for every segment in the WAL file, in order. - var readers []io.Reader - for _, offset := range offsets { - rd, err := r.Client.WALSegmentReader(ctx, Pos{Generation: generation, Index: index, Offset: offset}) +// LatestReplica returns the most recently updated replica. +func LatestReplica(ctx context.Context, replicas []*Replica) (*Replica, error) { + var t time.Time + var r *Replica + for i := range replicas { + _, max, err := ReplicaClientTimeBounds(ctx, replicas[i].Client) if err != nil { - return err + return nil, err + } else if r == nil || max.After(t) { + r, t = replicas[i], max } - defer rd.Close() - readers = append(readers, lz4.NewReader(rd)) - } - - // Open handle to destination WAL path. - f, err := internal.CreateFile(fmt.Sprintf("%s-%08x-wal", dbPath, index), fileInfo) - if err != nil { - return err } - defer f.Close() - - // Combine segments together and copy WAL to target path. - if _, err := io.Copy(f, io.MultiReader(readers...)); err != nil { - return err - } else if err := f.Close(); err != nil { - return err - } - return nil + return r, nil } // Replica metrics. diff --git a/replica_client.go b/replica_client.go index 3a914e47..3bf01b17 100644 --- a/replica_client.go +++ b/replica_client.go @@ -2,9 +2,19 @@ package litestream import ( "context" + "fmt" "io" + "log" + "os" + "time" + + "github.com/benbjohnson/litestream/internal" + "github.com/pierrec/lz4/v4" ) +// DefaultRestoreParallelism is the default parallelism when downloading WAL files. +const DefaultRestoreParallelism = 8 + // ReplicaClient represents client to connect to a Replica. type ReplicaClient interface { // Returns the type of client. @@ -46,3 +56,382 @@ type ReplicaClient interface { // WAL segment does not exist. WALSegmentReader(ctx context.Context, pos Pos) (io.ReadCloser, error) } + +// FindSnapshotForIndex returns the highest index for a snapshot within a +// generation that occurs before a given index. +func FindSnapshotForIndex(ctx context.Context, client ReplicaClient, generation string, index int) (int, error) { + itr, err := client.Snapshots(ctx, generation) + if err != nil { + return 0, fmt.Errorf("snapshots: %w", err) + } + defer itr.Close() + + // Iterate over all snapshots to find the closest to our given index. + snapshotIndex := -1 + var n int + for ; itr.Next(); n++ { + info := itr.Snapshot() + if info.Index > index { + continue // after given index, skip + } + + // Use snapshot if it's more recent. + if info.Index >= snapshotIndex { + snapshotIndex = info.Index + } + } + if err := itr.Close(); err != nil { + return 0, fmt.Errorf("snapshot iteration: %w", err) + } + + // Ensure we find at least one snapshot and that it's before the given index. + if n == 0 { + return 0, ErrNoSnapshots + } else if snapshotIndex == -1 { + return 0, fmt.Errorf("no snapshots available at or before index %08x", index) + } + return snapshotIndex, nil +} + +// GenerationTimeBounds returns the creation time & last updated time of a generation. +// Returns ErrNoSnapshots if no data exists for the generation. +func GenerationTimeBounds(ctx context.Context, client ReplicaClient, generation string) (createdAt, updatedAt time.Time, err error) { + // Determine bounds for snapshots only first. + // This will return ErrNoSnapshots if no snapshots exist. + if createdAt, updatedAt, err = SnapshotTimeBounds(ctx, client, generation); err != nil { + return createdAt, updatedAt, err + } + + // Update ending time bounds if WAL segments exist after the last snapshot. + _, max, err := WALTimeBounds(ctx, client, generation) + if err != nil && err != ErrNoWALSegments { + return createdAt, updatedAt, err + } else if max.After(updatedAt) { + updatedAt = max + } + + return createdAt, updatedAt, nil +} + +// SnapshotTimeBounds returns the minimum and maximum snapshot timestamps within a generation. +// Returns ErrNoSnapshots if no data exists for the generation. +func SnapshotTimeBounds(ctx context.Context, client ReplicaClient, generation string) (min, max time.Time, err error) { + itr, err := client.Snapshots(ctx, generation) + if err != nil { + return min, max, fmt.Errorf("snapshots: %w", err) + } + defer itr.Close() + + // Iterate over all snapshots to find the oldest and newest. + var n int + for ; itr.Next(); n++ { + info := itr.Snapshot() + if min.IsZero() || info.CreatedAt.Before(min) { + min = info.CreatedAt + } + if max.IsZero() || info.CreatedAt.After(max) { + max = info.CreatedAt + } + } + if err := itr.Close(); err != nil { + return min, max, fmt.Errorf("snapshot iteration: %w", err) + } + + // Return error if no snapshots exist. + if n == 0 { + return min, max, ErrNoSnapshots + } + return min, max, nil +} + +// WALTimeBounds returns the minimum and maximum snapshot timestamps. +// Returns ErrNoWALSegments if no data exists for the generation. +func WALTimeBounds(ctx context.Context, client ReplicaClient, generation string) (min, max time.Time, err error) { + itr, err := client.WALSegments(ctx, generation) + if err != nil { + return min, max, fmt.Errorf("wal segments: %w", err) + } + defer itr.Close() + + // Iterate over all WAL segments to find oldest and newest. + var n int + for ; itr.Next(); n++ { + info := itr.WALSegment() + if min.IsZero() || info.CreatedAt.Before(min) { + min = info.CreatedAt + } + if max.IsZero() || info.CreatedAt.After(max) { + max = info.CreatedAt + } + } + if err := itr.Close(); err != nil { + return min, max, fmt.Errorf("wal segment iteration: %w", err) + } + + if n == 0 { + return min, max, ErrNoWALSegments + } + return min, max, nil +} + +// FindLatestGeneration returns the most recent generation for a client. +func FindLatestGeneration(ctx context.Context, client ReplicaClient) (generation string, err error) { + generations, err := client.Generations(ctx) + if err != nil { + return "", fmt.Errorf("generations: %w", err) + } + + // Search generations for one latest updated. + var maxTime time.Time + for i := range generations { + // Determine the latest update for the generation. + _, updatedAt, err := GenerationTimeBounds(ctx, client, generations[i]) + if err != nil { + return "", fmt.Errorf("generation time bounds: %w", err) + } + + // Use the latest replica if we have multiple candidates. + if updatedAt.After(maxTime) { + maxTime = updatedAt + generation = generations[i] + } + } + + if generation == "" { + return "", ErrNoGeneration + } + return generation, nil +} + +// ReplicaClientTimeBounds returns time range covered by a replica client +// across all generations. It scans the time range of all generations and +// computes the lower and upper bounds of them. +func ReplicaClientTimeBounds(ctx context.Context, client ReplicaClient) (min, max time.Time, err error) { + generations, err := client.Generations(ctx) + if err != nil { + return min, max, fmt.Errorf("generations: %w", err) + } else if len(generations) == 0 { + return min, max, ErrNoGeneration + } + + // Iterate over generations to determine outer bounds. + for i := range generations { + // Determine the time range for the generation. + createdAt, updatedAt, err := GenerationTimeBounds(ctx, client, generations[i]) + if err != nil { + return min, max, fmt.Errorf("generation time bounds: %w", err) + } + + // Update time bounds. + if min.IsZero() || createdAt.Before(min) { + min = createdAt + } + if max.IsZero() || updatedAt.After(max) { + max = updatedAt + } + } + + return min, max, nil +} + +// FindMaxIndexByGeneration returns the last index within a generation. +// Returns ErrNoSnapshots if no index exists on the replica for the generation. +func FindMaxIndexByGeneration(ctx context.Context, client ReplicaClient, generation string) (index int, err error) { + // Determine the highest available snapshot index. Returns an error if no + // snapshot are available as WALs are not useful without snapshots. + snapshotIndex, err := FindMaxSnapshotIndexByGeneration(ctx, client, generation) + if err == ErrNoSnapshots { + return index, err + } else if err != nil { + return index, fmt.Errorf("max snapshot index: %w", err) + } + + // Determine the highest available WAL index. + walIndex, err := FindMaxWALIndexByGeneration(ctx, client, generation) + if err != nil && err != ErrNoWALSegments { + return index, fmt.Errorf("max wal index: %w", err) + } + + // Use snapshot index if it's after the last WAL index. + if snapshotIndex > walIndex { + return snapshotIndex, nil + } + return walIndex, nil +} + +// FindMaxSnapshotIndexByGeneration returns the last snapshot index within a generation. +// Returns ErrNoSnapshots if no snapshots exist for the generation on the replica. +func FindMaxSnapshotIndexByGeneration(ctx context.Context, client ReplicaClient, generation string) (index int, err error) { + itr, err := client.Snapshots(ctx, generation) + if err != nil { + return 0, fmt.Errorf("snapshots: %w", err) + } + defer func() { _ = itr.Close() }() + + // Iterate over snapshots to find the highest index. + var n int + for ; itr.Next(); n++ { + if info := itr.Snapshot(); info.Index > index { + index = info.Index + } + } + if err := itr.Close(); err != nil { + return 0, fmt.Errorf("snapshot iteration: %w", err) + } + + // Return an error if no snapshots were found. + if n == 0 { + return 0, ErrNoSnapshots + } + return index, nil +} + +// FindMaxWALIndexByGeneration returns the last WAL index within a generation. +// Returns ErrNoWALSegments if no segments exist for the generation on the replica. +func FindMaxWALIndexByGeneration(ctx context.Context, client ReplicaClient, generation string) (index int, err error) { + itr, err := client.WALSegments(ctx, generation) + if err != nil { + return 0, fmt.Errorf("wal segments: %w", err) + } + defer func() { _ = itr.Close() }() + + // Iterate over WAL segments to find the highest index. + var n int + for ; itr.Next(); n++ { + if info := itr.WALSegment(); info.Index > index { + index = info.Index + } + } + if err := itr.Close(); err != nil { + return 0, fmt.Errorf("wal segment iteration: %w", err) + } + + // Return an error if no WAL segments were found. + if n == 0 { + return 0, ErrNoWALSegments + } + return index, nil +} + +// Restore restores the database to the given index on a generation. +func Restore(ctx context.Context, client ReplicaClient, filename, generation string, snapshotIndex, targetIndex int, opt RestoreOptions) (err error) { + // Validate options. + if filename == "" { + return fmt.Errorf("restore path required") + } else if generation == "" { + return fmt.Errorf("generation required") + } else if snapshotIndex < 0 { + return fmt.Errorf("snapshot index required") + } else if targetIndex < 0 { + return fmt.Errorf("target index required") + } + + // Require a default level of parallelism. + if opt.Parallelism < 1 { + opt.Parallelism = DefaultRestoreParallelism + } + + // Ensure logger exists. + logger := opt.Logger + if logger == nil { + logger = log.New(io.Discard, "", 0) + } + + // Ensure output path does not already exist. + // If doesn't exist, also remove the journal, shm, & wal if left behind. + if _, err := os.Stat(filename); err == nil { + return fmt.Errorf("cannot restore, output path already exists: %s", filename) + } else if err != nil && !os.IsNotExist(err) { + return err + } else if err := removeDBFiles(filename); err != nil { + return err + } + + // Copy snapshot to output path. + tmpPath := filename + ".tmp" + logger.Printf("%srestoring snapshot %s/%08x to %s", opt.LogPrefix, generation, snapshotIndex, tmpPath) + if err := RestoreSnapshot(ctx, client, tmpPath, generation, snapshotIndex, opt.Mode, opt.Uid, opt.Gid); err != nil { + return fmt.Errorf("cannot restore snapshot: %w", err) + } + + // Download & apply all WAL files between the snapshot & the target index. + d := NewWALDownloader(client, tmpPath, generation, snapshotIndex, targetIndex) + d.Parallelism = opt.Parallelism + d.Mode = opt.Mode + d.Uid, d.Gid = opt.Uid, opt.Gid + + for { + // Read next WAL file from downloader. + walIndex, walPath, err := d.Next(ctx) + if err == io.EOF { + break + } + + // If we are only reading a single index, a WAL file may not be found. + if _, ok := err.(*WALNotFoundError); ok && snapshotIndex == targetIndex { + logger.Printf("%sno wal files found, snapshot only", opt.LogPrefix) + break + } else if err != nil { + return fmt.Errorf("cannot download WAL: %w", err) + } + + // Apply WAL file. + startTime := time.Now() + if err = ApplyWAL(ctx, tmpPath, walPath); err != nil { + return fmt.Errorf("cannot apply wal: %w", err) + } + logger.Printf("%sapplied wal %s/%08x elapsed=%s", opt.LogPrefix, generation, walIndex, time.Since(startTime).String()) + } + + // Copy file to final location. + logger.Printf("%srenaming database from temporary location", opt.LogPrefix) + if err := os.Rename(tmpPath, filename); err != nil { + return err + } + + return nil +} + +// RestoreOptions represents options for DB.Restore(). +type RestoreOptions struct { + // File info used for restored snapshot & WAL files. + Mode os.FileMode + Uid, Gid int + + // Specifies how many WAL files are downloaded in parallel during restore. + Parallelism int + + // Logging settings. + Logger *log.Logger + LogPrefix string +} + +// NewRestoreOptions returns a new instance of RestoreOptions with defaults. +func NewRestoreOptions() RestoreOptions { + return RestoreOptions{ + Mode: 0600, + Parallelism: DefaultRestoreParallelism, + } +} + +// RestoreSnapshot copies a snapshot from the replica client to a file. +func RestoreSnapshot(ctx context.Context, client ReplicaClient, filename, generation string, index int, mode os.FileMode, uid, gid int) error { + f, err := internal.CreateFile(filename, mode, uid, gid) + if err != nil { + return err + } + defer f.Close() + + rd, err := client.SnapshotReader(ctx, generation, index) + if err != nil { + return err + } + defer rd.Close() + + if _, err := io.Copy(f, lz4.NewReader(rd)); err != nil { + return err + } else if err := f.Sync(); err != nil { + return err + } + return f.Close() +} diff --git a/replica_client_test.go b/replica_client_test.go index ec2d8411..65d5d819 100644 --- a/replica_client_test.go +++ b/replica_client_test.go @@ -2,572 +2,582 @@ package litestream_test import ( "context" - "flag" "fmt" - "io/ioutil" - "math/rand" "os" - "path" - "reflect" - "sort" + "path/filepath" "strings" "testing" "time" "github.com/benbjohnson/litestream" - "github.com/benbjohnson/litestream/abs" - "github.com/benbjohnson/litestream/file" - "github.com/benbjohnson/litestream/gcs" - "github.com/benbjohnson/litestream/s3" - "github.com/benbjohnson/litestream/sftp" + "github.com/benbjohnson/litestream/mock" ) -func init() { - rand.Seed(time.Now().UnixNano()) -} +func TestFindSnapshotForIndex(t *testing.T) { + t.Run("BeforeIndex", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "find-snapshot-for-index", "ok")) + if snapshotIndex, err := litestream.FindSnapshotForIndex(context.Background(), client, "0000000000000000", 0x000007d0); err != nil { + t.Fatal(err) + } else if got, want := snapshotIndex, 0x000003e8; got != want { + t.Fatalf("index=%08x, want %08x", got, want) + } + }) -var ( - // Enables integration tests. - integration = flag.String("integration", "file", "") -) + t.Run("AtIndex", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "find-snapshot-for-index", "ok")) + if snapshotIndex, err := litestream.FindSnapshotForIndex(context.Background(), client, "0000000000000000", 0x000003e8); err != nil { + t.Fatal(err) + } else if got, want := snapshotIndex, 0x000003e8; got != want { + t.Fatalf("index=%08x, want %08x", got, want) + } + }) -// S3 settings -var ( - // Replica client settings - s3AccessKeyID = flag.String("s3-access-key-id", os.Getenv("LITESTREAM_S3_ACCESS_KEY_ID"), "") - s3SecretAccessKey = flag.String("s3-secret-access-key", os.Getenv("LITESTREAM_S3_SECRET_ACCESS_KEY"), "") - s3Region = flag.String("s3-region", os.Getenv("LITESTREAM_S3_REGION"), "") - s3Bucket = flag.String("s3-bucket", os.Getenv("LITESTREAM_S3_BUCKET"), "") - s3Path = flag.String("s3-path", os.Getenv("LITESTREAM_S3_PATH"), "") - s3Endpoint = flag.String("s3-endpoint", os.Getenv("LITESTREAM_S3_ENDPOINT"), "") - s3ForcePathStyle = flag.Bool("s3-force-path-style", os.Getenv("LITESTREAM_S3_FORCE_PATH_STYLE") == "true", "") - s3SkipVerify = flag.Bool("s3-skip-verify", os.Getenv("LITESTREAM_S3_SKIP_VERIFY") == "true", "") -) + t.Run("ErrNoSnapshotsBeforeIndex", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "find-snapshot-for-index", "no-snapshots-before-index")) + _, err := litestream.FindSnapshotForIndex(context.Background(), client, "0000000000000000", 0x000003e8) + if err == nil || err.Error() != `no snapshots available at or before index 000003e8` { + t.Fatalf("unexpected error: %#v", err) + } + }) -// Google cloud storage settings -var ( - gcsBucket = flag.String("gcs-bucket", os.Getenv("LITESTREAM_GCS_BUCKET"), "") - gcsPath = flag.String("gcs-path", os.Getenv("LITESTREAM_GCS_PATH"), "") -) + t.Run("ErrNoSnapshots", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "find-snapshot-for-index", "no-snapshots")) + _, err := litestream.FindSnapshotForIndex(context.Background(), client, "0000000000000000", 0x000003e8) + if err != litestream.ErrNoSnapshots { + t.Fatalf("unexpected error: %#v", err) + } + }) -// Azure blob storage settings -var ( - absAccountName = flag.String("abs-account-name", os.Getenv("LITESTREAM_ABS_ACCOUNT_NAME"), "") - absAccountKey = flag.String("abs-account-key", os.Getenv("LITESTREAM_ABS_ACCOUNT_KEY"), "") - absBucket = flag.String("abs-bucket", os.Getenv("LITESTREAM_ABS_BUCKET"), "") - absPath = flag.String("abs-path", os.Getenv("LITESTREAM_ABS_PATH"), "") -) + t.Run("ErrSnapshots", func(t *testing.T) { + var client mock.ReplicaClient + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return nil, fmt.Errorf("marker") + } + _, err := litestream.FindSnapshotForIndex(context.Background(), &client, "0000000000000000", 0x000003e8) + if err == nil || err.Error() != `snapshots: marker` { + t.Fatalf("unexpected error: %#v", err) + } + }) -// SFTP settings -var ( - sftpHost = flag.String("sftp-host", os.Getenv("LITESTREAM_SFTP_HOST"), "") - sftpUser = flag.String("sftp-user", os.Getenv("LITESTREAM_SFTP_USER"), "") - sftpPassword = flag.String("sftp-password", os.Getenv("LITESTREAM_SFTP_PASSWORD"), "") - sftpKeyPath = flag.String("sftp-key-path", os.Getenv("LITESTREAM_SFTP_KEY_PATH"), "") - sftpPath = flag.String("sftp-path", os.Getenv("LITESTREAM_SFTP_PATH"), "") -) + t.Run("ErrSnapshotIterator", func(t *testing.T) { + var itr mock.SnapshotIterator + itr.NextFunc = func() bool { return false } + itr.CloseFunc = func() error { return fmt.Errorf("marker") } -func TestReplicaClient_Generations(t *testing.T) { - RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() + var client mock.ReplicaClient + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return &itr, nil + } - // Write snapshots. - if _, err := c.WriteSnapshot(context.Background(), "5efbd8d042012dca", 0, strings.NewReader(`foo`)); err != nil { - t.Fatal(err) - } else if _, err := c.WriteSnapshot(context.Background(), "b16ddcf5c697540f", 0, strings.NewReader(`bar`)); err != nil { - t.Fatal(err) - } else if _, err := c.WriteSnapshot(context.Background(), "155fe292f8333c72", 0, strings.NewReader(`baz`)); err != nil { - t.Fatal(err) + _, err := litestream.FindSnapshotForIndex(context.Background(), &client, "0000000000000000", 0x000003e8) + if err == nil || err.Error() != `snapshot iteration: marker` { + t.Fatalf("unexpected error: %#v", err) } + }) +} - // Verify returned generations. - if got, err := c.Generations(context.Background()); err != nil { +func TestSnapshotTimeBounds(t *testing.T) { + t.Run("OK", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "snapshot-time-bounds", "ok")) + if min, max, err := litestream.SnapshotTimeBounds(context.Background(), client, "0000000000000000"); err != nil { t.Fatal(err) - } else if want := []string{"155fe292f8333c72", "5efbd8d042012dca", "b16ddcf5c697540f"}; !reflect.DeepEqual(got, want) { - t.Fatalf("Generations()=%v, want %v", got, want) + } else if got, want := min, time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC); !got.Equal(want) { + t.Fatalf("min=%s, want %s", got, want) + } else if got, want := max, time.Date(2000, 1, 3, 0, 0, 0, 0, time.UTC); !got.Equal(want) { + t.Fatalf("max=%s, want %s", got, want) } }) - RunWithReplicaClient(t, "NoGenerationsDir", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() + t.Run("ErrNoSnapshots", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "snapshot-time-bounds", "no-snapshots")) + if _, _, err := litestream.SnapshotTimeBounds(context.Background(), client, "0000000000000000"); err != litestream.ErrNoSnapshots { + t.Fatalf("unexpected error: %#v", err) + } + }) - if generations, err := c.Generations(context.Background()); err != nil { - t.Fatal(err) - } else if got, want := len(generations), 0; got != want { - t.Fatalf("len(Generations())=%v, want %v", got, want) + t.Run("ErrSnapshots", func(t *testing.T) { + var client mock.ReplicaClient + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return nil, fmt.Errorf("marker") + } + + _, _, err := litestream.SnapshotTimeBounds(context.Background(), &client, "0000000000000000") + if err == nil || err.Error() != `snapshots: marker` { + t.Fatalf("unexpected error: %s", err) } }) -} -func TestReplicaClient_Snapshots(t *testing.T) { - RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() + t.Run("ErrSnapshotIterator", func(t *testing.T) { + var itr mock.SnapshotIterator + itr.NextFunc = func() bool { return false } + itr.CloseFunc = func() error { return fmt.Errorf("marker") } - // Write snapshots. - if _, err := c.WriteSnapshot(context.Background(), "5efbd8d042012dca", 1, strings.NewReader(``)); err != nil { - t.Fatal(err) - } else if _, err := c.WriteSnapshot(context.Background(), "b16ddcf5c697540f", 5, strings.NewReader(`x`)); err != nil { - t.Fatal(err) - } else if _, err := c.WriteSnapshot(context.Background(), "b16ddcf5c697540f", 10, strings.NewReader(`xyz`)); err != nil { - t.Fatal(err) + var client mock.ReplicaClient + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return &itr, nil } - // Fetch all snapshots by generation. - itr, err := c.Snapshots(context.Background(), "b16ddcf5c697540f") - if err != nil { - t.Fatal(err) + _, _, err := litestream.SnapshotTimeBounds(context.Background(), &client, "0000000000000000") + if err == nil || err.Error() != `snapshot iteration: marker` { + t.Fatalf("unexpected error: %s", err) } - defer itr.Close() + }) +} - // Read all snapshots into a slice so they can be sorted. - a, err := litestream.SliceSnapshotIterator(itr) - if err != nil { - t.Fatal(err) - } else if got, want := len(a), 2; got != want { - t.Fatalf("len=%v, want %v", got, want) - } - sort.Sort(litestream.SnapshotInfoSlice(a)) - - // Verify first snapshot metadata. - if got, want := a[0].Generation, "b16ddcf5c697540f"; got != want { - t.Fatalf("Generation=%v, want %v", got, want) - } else if got, want := a[0].Index, 5; got != want { - t.Fatalf("Index=%v, want %v", got, want) - } else if got, want := a[0].Size, int64(1); got != want { - t.Fatalf("Size=%v, want %v", got, want) - } else if a[0].CreatedAt.IsZero() { - t.Fatalf("expected CreatedAt") - } - - // Verify second snapshot metadata. - if got, want := a[1].Generation, "b16ddcf5c697540f"; got != want { - t.Fatalf("Generation=%v, want %v", got, want) - } else if got, want := a[1].Index, 0xA; got != want { - t.Fatalf("Index=%v, want %v", got, want) - } else if got, want := a[1].Size, int64(3); got != want { - t.Fatalf("Size=%v, want %v", got, want) - } else if a[1].CreatedAt.IsZero() { - t.Fatalf("expected CreatedAt") - } - - // Ensure close is clean. - if err := itr.Close(); err != nil { +func TestWALTimeBounds(t *testing.T) { + t.Run("OK", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "wal-time-bounds", "ok")) + if min, max, err := litestream.WALTimeBounds(context.Background(), client, "0000000000000000"); err != nil { t.Fatal(err) + } else if got, want := min, time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC); !got.Equal(want) { + t.Fatalf("min=%s, want %s", got, want) + } else if got, want := max, time.Date(2000, 1, 3, 0, 0, 0, 0, time.UTC); !got.Equal(want) { + t.Fatalf("max=%s, want %s", got, want) } }) - RunWithReplicaClient(t, "NoGenerationDir", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() + t.Run("ErrNoWALSegments", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "wal-time-bounds", "no-wal-segments")) + if _, _, err := litestream.WALTimeBounds(context.Background(), client, "0000000000000000"); err != litestream.ErrNoWALSegments { + t.Fatalf("unexpected error: %#v", err) + } + }) - itr, err := c.Snapshots(context.Background(), "5efbd8d042012dca") - if err != nil { - t.Fatal(err) + t.Run("ErrWALSegments", func(t *testing.T) { + var client mock.ReplicaClient + client.WALSegmentsFunc = func(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) { + return nil, fmt.Errorf("marker") } - defer itr.Close() - if itr.Next() { - t.Fatal("expected no snapshots") + _, _, err := litestream.WALTimeBounds(context.Background(), &client, "0000000000000000") + if err == nil || err.Error() != `wal segments: marker` { + t.Fatalf("unexpected error: %s", err) } }) - RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() + t.Run("ErrWALSegmentIterator", func(t *testing.T) { + var itr mock.WALSegmentIterator + itr.NextFunc = func() bool { return false } + itr.CloseFunc = func() error { return fmt.Errorf("marker") } - itr, err := c.Snapshots(context.Background(), "") - if err == nil { - err = itr.Close() + var client mock.ReplicaClient + client.WALSegmentsFunc = func(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) { + return &itr, nil } - if err == nil || err.Error() != `generation required` { - t.Fatalf("unexpected error: %v", err) + + _, _, err := litestream.WALTimeBounds(context.Background(), &client, "0000000000000000") + if err == nil || err.Error() != `wal segment iteration: marker` { + t.Fatalf("unexpected error: %s", err) } }) } -func TestReplicaClient_WriteSnapshot(t *testing.T) { - RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() - - if _, err := c.WriteSnapshot(context.Background(), "b16ddcf5c697540f", 1000, strings.NewReader(`foobar`)); err != nil { +func TestGenerationTimeBounds(t *testing.T) { + t.Run("OK", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "generation-time-bounds", "ok")) + if min, max, err := litestream.GenerationTimeBounds(context.Background(), client, "0000000000000000"); err != nil { t.Fatal(err) + } else if got, want := min, time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC); !got.Equal(want) { + t.Fatalf("min=%s, want %s", got, want) + } else if got, want := max, time.Date(2000, 1, 3, 0, 0, 0, 0, time.UTC); !got.Equal(want) { + t.Fatalf("max=%s, want %s", got, want) } + }) - if r, err := c.SnapshotReader(context.Background(), "b16ddcf5c697540f", 1000); err != nil { + t.Run("SnapshotsOnly", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "generation-time-bounds", "snapshots-only")) + if min, max, err := litestream.GenerationTimeBounds(context.Background(), client, "0000000000000000"); err != nil { t.Fatal(err) - } else if buf, err := ioutil.ReadAll(r); err != nil { - t.Fatal(err) - } else if err := r.Close(); err != nil { - t.Fatal(err) - } else if got, want := string(buf), `foobar`; got != want { - t.Fatalf("data=%q, want %q", got, want) + } else if got, want := min, time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC); !got.Equal(want) { + t.Fatalf("min=%s, want %s", got, want) + } else if got, want := max, time.Date(2000, 1, 2, 0, 0, 0, 0, time.UTC); !got.Equal(want) { + t.Fatalf("max=%s, want %s", got, want) } }) - RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() - if _, err := c.WriteSnapshot(context.Background(), "", 0, nil); err == nil || err.Error() != `generation required` { - t.Fatalf("unexpected error: %v", err) + t.Run("ErrNoSnapshots", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "generation-time-bounds", "no-snapshots")) + if _, _, err := litestream.GenerationTimeBounds(context.Background(), client, "0000000000000000"); err != litestream.ErrNoSnapshots { + t.Fatalf("unexpected error: %#v", err) } }) -} -func TestReplicaClient_SnapshotReader(t *testing.T) { - RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() + t.Run("ErrWALSegments", func(t *testing.T) { + var snapshotN int + var itr mock.SnapshotIterator + itr.NextFunc = func() bool { + snapshotN++ + return snapshotN == 1 + } + itr.SnapshotFunc = func() litestream.SnapshotInfo { + return litestream.SnapshotInfo{CreatedAt: time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)} + } + itr.CloseFunc = func() error { return nil } - if _, err := c.WriteSnapshot(context.Background(), "5efbd8d042012dca", 10, strings.NewReader(`foo`)); err != nil { - t.Fatal(err) + var client mock.ReplicaClient + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return &itr, nil + } + client.WALSegmentsFunc = func(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) { + return nil, fmt.Errorf("marker") } - r, err := c.SnapshotReader(context.Background(), "5efbd8d042012dca", 10) - if err != nil { - t.Fatal(err) + _, _, err := litestream.GenerationTimeBounds(context.Background(), &client, "0000000000000000") + if err == nil || err.Error() != `wal segments: marker` { + t.Fatalf("unexpected error: %s", err) } - defer r.Close() + }) +} - if buf, err := ioutil.ReadAll(r); err != nil { +func TestFindLatestGeneration(t *testing.T) { + t.Run("OK", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "find-latest-generation", "ok")) + if generation, err := litestream.FindLatestGeneration(context.Background(), client); err != nil { t.Fatal(err) - } else if got, want := string(buf), "foo"; got != want { - t.Fatalf("ReadAll=%v, want %v", got, want) + } else if got, want := generation, "0000000000000001"; got != want { + t.Fatalf("generation=%s, want %s", got, want) } }) - RunWithReplicaClient(t, "ErrNotFound", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() - - if _, err := c.SnapshotReader(context.Background(), "5efbd8d042012dca", 1); !os.IsNotExist(err) { - t.Fatalf("expected not exist, got %#v", err) + t.Run("ErrNoSnapshots", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "find-latest-generation", "no-generations")) + if generation, err := litestream.FindLatestGeneration(context.Background(), client); err != litestream.ErrNoGeneration { + t.Fatalf("unexpected error: %s", err) + } else if got, want := generation, ""; got != want { + t.Fatalf("generation=%s, want %s", got, want) } }) - RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() + t.Run("ErrGenerations", func(t *testing.T) { + var client mock.ReplicaClient + client.GenerationsFunc = func(ctx context.Context) ([]string, error) { + return nil, fmt.Errorf("marker") + } - if _, err := c.SnapshotReader(context.Background(), "", 1); err == nil || err.Error() != `generation required` { - t.Fatalf("unexpected error: %v", err) + _, err := litestream.FindLatestGeneration(context.Background(), &client) + if err == nil || err.Error() != `generations: marker` { + t.Fatalf("unexpected error: %s", err) } }) -} -func TestReplicaClient_WALSegments(t *testing.T) { - RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() - - if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 1, Offset: 0}, strings.NewReader(``)); err != nil { - t.Fatal(err) + t.Run("ErrSnapshots", func(t *testing.T) { + var client mock.ReplicaClient + client.GenerationsFunc = func(ctx context.Context) ([]string, error) { + return []string{"0000000000000000"}, nil } - if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 2, Offset: 0}, strings.NewReader(`12345`)); err != nil { - t.Fatal(err) - } else if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 2, Offset: 5}, strings.NewReader(`67`)); err != nil { - t.Fatal(err) - } else if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 3, Offset: 0}, strings.NewReader(`xyz`)); err != nil { - t.Fatal(err) + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return nil, fmt.Errorf("marker") } - itr, err := c.WALSegments(context.Background(), "b16ddcf5c697540f") - if err != nil { - t.Fatal(err) + _, err := litestream.FindLatestGeneration(context.Background(), &client) + if err == nil || err.Error() != `generation time bounds: snapshots: marker` { + t.Fatalf("unexpected error: %s", err) } - defer itr.Close() + }) +} - // Read all WAL segment files into a slice so they can be sorted. - a, err := litestream.SliceWALSegmentIterator(itr) - if err != nil { - t.Fatal(err) - } else if got, want := len(a), 3; got != want { - t.Fatalf("len=%v, want %v", got, want) - } - sort.Sort(litestream.WALSegmentInfoSlice(a)) - - // Verify first WAL segment metadata. - if got, want := a[0].Generation, "b16ddcf5c697540f"; got != want { - t.Fatalf("Generation=%v, want %v", got, want) - } else if got, want := a[0].Index, 2; got != want { - t.Fatalf("Index=%v, want %v", got, want) - } else if got, want := a[0].Offset, int64(0); got != want { - t.Fatalf("Offset=%v, want %v", got, want) - } else if got, want := a[0].Size, int64(5); got != want { - t.Fatalf("Size=%v, want %v", got, want) - } else if a[0].CreatedAt.IsZero() { - t.Fatalf("expected CreatedAt") - } - - // Verify first WAL segment metadata. - if got, want := a[1].Generation, "b16ddcf5c697540f"; got != want { - t.Fatalf("Generation=%v, want %v", got, want) - } else if got, want := a[1].Index, 2; got != want { - t.Fatalf("Index=%v, want %v", got, want) - } else if got, want := a[1].Offset, int64(5); got != want { - t.Fatalf("Offset=%v, want %v", got, want) - } else if got, want := a[1].Size, int64(2); got != want { - t.Fatalf("Size=%v, want %v", got, want) - } else if a[1].CreatedAt.IsZero() { - t.Fatalf("expected CreatedAt") - } - - // Verify third WAL segment metadata. - if got, want := a[2].Generation, "b16ddcf5c697540f"; got != want { - t.Fatalf("Generation=%v, want %v", got, want) - } else if got, want := a[2].Index, 3; got != want { - t.Fatalf("Index=%v, want %v", got, want) - } else if got, want := a[2].Offset, int64(0); got != want { - t.Fatalf("Offset=%v, want %v", got, want) - } else if got, want := a[2].Size, int64(3); got != want { - t.Fatalf("Size=%v, want %v", got, want) - } else if a[1].CreatedAt.IsZero() { - t.Fatalf("expected CreatedAt") - } - - // Ensure close is clean. - if err := itr.Close(); err != nil { +func TestReplicaClientTimeBounds(t *testing.T) { + t.Run("OK", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "find-latest-generation", "ok")) + if min, max, err := litestream.ReplicaClientTimeBounds(context.Background(), client); err != nil { t.Fatal(err) + } else if got, want := min, time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC); !got.Equal(want) { + t.Fatalf("min=%s, want %s", got, want) + } else if got, want := max, time.Date(2000, 1, 3, 0, 0, 0, 0, time.UTC); !got.Equal(want) { + t.Fatalf("max=%s, want %s", got, want) } }) - RunWithReplicaClient(t, "NoGenerationDir", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() + t.Run("ErrNoGeneration", func(t *testing.T) { + var client mock.ReplicaClient + client.GenerationsFunc = func(ctx context.Context) ([]string, error) { + return nil, nil + } - itr, err := c.WALSegments(context.Background(), "5efbd8d042012dca") - if err != nil { - t.Fatal(err) + _, _, err := litestream.ReplicaClientTimeBounds(context.Background(), &client) + if err != litestream.ErrNoGeneration { + t.Fatalf("unexpected error: %s", err) } - defer itr.Close() + }) - if itr.Next() { - t.Fatal("expected no wal files") + t.Run("ErrGenerations", func(t *testing.T) { + var client mock.ReplicaClient + client.GenerationsFunc = func(ctx context.Context) ([]string, error) { + return nil, fmt.Errorf("marker") + } + + _, _, err := litestream.ReplicaClientTimeBounds(context.Background(), &client) + if err == nil || err.Error() != `generations: marker` { + t.Fatalf("unexpected error: %s", err) } }) - RunWithReplicaClient(t, "NoWALs", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() + t.Run("ErrSnapshots", func(t *testing.T) { + var client mock.ReplicaClient + client.GenerationsFunc = func(ctx context.Context) ([]string, error) { + return []string{"0000000000000000"}, nil + } + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return nil, fmt.Errorf("marker") + } - if _, err := c.WriteSnapshot(context.Background(), "5efbd8d042012dca", 0, strings.NewReader(`foo`)); err != nil { - t.Fatal(err) + _, _, err := litestream.ReplicaClientTimeBounds(context.Background(), &client) + if err == nil || err.Error() != `generation time bounds: snapshots: marker` { + t.Fatalf("unexpected error: %s", err) } + }) +} - itr, err := c.WALSegments(context.Background(), "5efbd8d042012dca") - if err != nil { +func TestFindMaxSnapshotIndexByGeneration(t *testing.T) { + t.Run("OK", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "max-snapshot-index", "ok")) + if index, err := litestream.FindMaxSnapshotIndexByGeneration(context.Background(), client, "0000000000000000"); err != nil { t.Fatal(err) + } else if got, want := index, 0x000007d0; got != want { + t.Fatalf("index=%d, want %d", got, want) } - defer itr.Close() + }) + + t.Run("ErrNoSnapshots", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "max-snapshot-index", "no-snapshots")) - if itr.Next() { - t.Fatal("expected no wal files") + _, err := litestream.FindMaxSnapshotIndexByGeneration(context.Background(), client, "0000000000000000") + if err != litestream.ErrNoSnapshots { + t.Fatalf("unexpected error: %s", err) } }) - RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() - - itr, err := c.WALSegments(context.Background(), "") - if err == nil { - err = itr.Close() + t.Run("ErrSnapshots", func(t *testing.T) { + var client mock.ReplicaClient + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return nil, fmt.Errorf("marker") } - if err == nil || err.Error() != `generation required` { - t.Fatalf("unexpected error: %v", err) + + _, err := litestream.FindMaxSnapshotIndexByGeneration(context.Background(), &client, "0000000000000000") + if err == nil || err.Error() != `snapshots: marker` { + t.Fatalf("unexpected error: %s", err) } }) -} -func TestReplicaClient_WriteWALSegment(t *testing.T) { - RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() + t.Run("ErrSnapshotIteration", func(t *testing.T) { + var itr mock.SnapshotIterator + itr.NextFunc = func() bool { return false } + itr.CloseFunc = func() error { return fmt.Errorf("marker") } - if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1000, Offset: 2000}, strings.NewReader(`foobar`)); err != nil { - t.Fatal(err) + var client mock.ReplicaClient + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return &itr, nil } - if r, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1000, Offset: 2000}); err != nil { - t.Fatal(err) - } else if buf, err := ioutil.ReadAll(r); err != nil { - t.Fatal(err) - } else if err := r.Close(); err != nil { - t.Fatal(err) - } else if got, want := string(buf), `foobar`; got != want { - t.Fatalf("data=%q, want %q", got, want) + _, err := litestream.FindMaxSnapshotIndexByGeneration(context.Background(), &client, "0000000000000000") + if err == nil || err.Error() != `snapshot iteration: marker` { + t.Fatalf("unexpected error: %s", err) } }) +} - RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() - if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "", Index: 0, Offset: 0}, nil); err == nil || err.Error() != `generation required` { - t.Fatalf("unexpected error: %v", err) +func TestFindMaxWALIndexByGeneration(t *testing.T) { + t.Run("OK", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "max-wal-index", "ok")) + if index, err := litestream.FindMaxWALIndexByGeneration(context.Background(), client, "0000000000000000"); err != nil { + t.Fatal(err) + } else if got, want := index, 1; got != want { + t.Fatalf("index=%d, want %d", got, want) } }) -} -func TestReplicaClient_WALSegmentReader(t *testing.T) { + t.Run("ErrNoWALSegments", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "max-wal-index", "no-wal")) - RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() - if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 10, Offset: 5}, strings.NewReader(`foobar`)); err != nil { - t.Fatal(err) + _, err := litestream.FindMaxWALIndexByGeneration(context.Background(), client, "0000000000000000") + if err != litestream.ErrNoWALSegments { + t.Fatalf("unexpected error: %s", err) } + }) - r, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 10, Offset: 5}) - if err != nil { - t.Fatal(err) + t.Run("ErrWALSegments", func(t *testing.T) { + var client mock.ReplicaClient + client.WALSegmentsFunc = func(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) { + return nil, fmt.Errorf("marker") } - defer r.Close() - if buf, err := ioutil.ReadAll(r); err != nil { - t.Fatal(err) - } else if got, want := string(buf), "foobar"; got != want { - t.Fatalf("ReadAll=%v, want %v", got, want) + _, err := litestream.FindMaxWALIndexByGeneration(context.Background(), &client, "0000000000000000") + if err == nil || err.Error() != `wal segments: marker` { + t.Fatalf("unexpected error: %s", err) } }) - RunWithReplicaClient(t, "ErrNotFound", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() + t.Run("ErrWALSegmentIteration", func(t *testing.T) { + var itr mock.WALSegmentIterator + itr.NextFunc = func() bool { return false } + itr.CloseFunc = func() error { return fmt.Errorf("marker") } + + var client mock.ReplicaClient + client.WALSegmentsFunc = func(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) { + return &itr, nil + } - if _, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 1, Offset: 0}); !os.IsNotExist(err) { - t.Fatalf("expected not exist, got %#v", err) + _, err := litestream.FindMaxWALIndexByGeneration(context.Background(), &client, "0000000000000000") + if err == nil || err.Error() != `wal segment iteration: marker` { + t.Fatalf("unexpected error: %s", err) } }) } -func TestReplicaClient_DeleteWALSegments(t *testing.T) { - RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() - - if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1, Offset: 2}, strings.NewReader(`foo`)); err != nil { +func TestFindMaxIndexByGeneration(t *testing.T) { + t.Run("OK", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "max-index", "ok")) + if index, err := litestream.FindMaxIndexByGeneration(context.Background(), client, "0000000000000000"); err != nil { t.Fatal(err) - } else if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 3, Offset: 4}, strings.NewReader(`bar`)); err != nil { + } else if got, want := index, 0x00000002; got != want { + t.Fatalf("index=%d, want %d", got, want) + } + }) + + t.Run("NoWAL", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "max-index", "no-wal")) + if index, err := litestream.FindMaxIndexByGeneration(context.Background(), client, "0000000000000000"); err != nil { t.Fatal(err) + } else if got, want := index, 0x00000001; got != want { + t.Fatalf("index=%d, want %d", got, want) } + }) - if err := c.DeleteWALSegments(context.Background(), []litestream.Pos{ - {Generation: "b16ddcf5c697540f", Index: 1, Offset: 2}, - {Generation: "5efbd8d042012dca", Index: 3, Offset: 4}, - }); err != nil { + t.Run("SnapshotLaterThanWAL", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "max-index", "snapshot-later-than-wal")) + if index, err := litestream.FindMaxIndexByGeneration(context.Background(), client, "0000000000000000"); err != nil { t.Fatal(err) + } else if got, want := index, 0x00000001; got != want { + t.Fatalf("index=%d, want %d", got, want) } + }) + + t.Run("ErrNoSnapshots", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "max-index", "no-snapshots")) - if _, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1, Offset: 2}); !os.IsNotExist(err) { - t.Fatalf("expected not exist, got %#v", err) - } else if _, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 3, Offset: 4}); !os.IsNotExist(err) { - t.Fatalf("expected not exist, got %#v", err) + _, err := litestream.FindMaxIndexByGeneration(context.Background(), client, "0000000000000000") + if err != litestream.ErrNoSnapshots { + t.Fatalf("unexpected error: %s", err) } }) - RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() - if err := c.DeleteWALSegments(context.Background(), []litestream.Pos{{}}); err == nil || err.Error() != `generation required` { - t.Fatalf("unexpected error: %v", err) + t.Run("ErrSnapshots", func(t *testing.T) { + var client mock.ReplicaClient + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return nil, fmt.Errorf("marker") + } + + _, err := litestream.FindMaxIndexByGeneration(context.Background(), &client, "0000000000000000") + if err == nil || err.Error() != `max snapshot index: snapshots: marker` { + t.Fatalf("unexpected error: %s", err) } }) -} -// RunWithReplicaClient executes fn with each replica specified by the -integration flag -func RunWithReplicaClient(t *testing.T, name string, fn func(*testing.T, litestream.ReplicaClient)) { - t.Run(name, func(t *testing.T) { - for _, typ := range strings.Split(*integration, ",") { - t.Run(typ, func(t *testing.T) { - c := NewReplicaClient(t, typ) - defer MustDeleteAll(t, c) + t.Run("ErrWALSegments", func(t *testing.T) { + var client mock.ReplicaClient + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return litestream.NewSnapshotInfoSliceIterator([]litestream.SnapshotInfo{{Index: 0x00000001}}), nil + } + client.WALSegmentsFunc = func(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) { + return nil, fmt.Errorf("marker") + } - fn(t, c) - }) + _, err := litestream.FindMaxIndexByGeneration(context.Background(), &client, "0000000000000000") + if err == nil || err.Error() != `max wal index: wal segments: marker` { + t.Fatalf("unexpected error: %s", err) } }) } -// NewReplicaClient returns a new client for integration testing by type name. -func NewReplicaClient(tb testing.TB, typ string) litestream.ReplicaClient { - tb.Helper() - - switch typ { - case file.ReplicaClientType: - return NewFileReplicaClient(tb) - case s3.ReplicaClientType: - return NewS3ReplicaClient(tb) - case gcs.ReplicaClientType: - return NewGCSReplicaClient(tb) - case abs.ReplicaClientType: - return NewABSReplicaClient(tb) - case sftp.ReplicaClientType: - return NewSFTPReplicaClient(tb) - default: - tb.Fatalf("invalid replica client type: %q", typ) - return nil - } -} +func TestRestoreSnapshot(t *testing.T) { t.Skip("TODO") } -// NewFileReplicaClient returns a new client for integration testing. -func NewFileReplicaClient(tb testing.TB) *file.ReplicaClient { - tb.Helper() - return file.NewReplicaClient(tb.TempDir()) -} +func TestRestore(t *testing.T) { + t.Run("OK", func(t *testing.T) { + testDir := filepath.Join("testdata", "restore", "ok") + tempDir := t.TempDir() -// NewS3ReplicaClient returns a new client for integration testing. -func NewS3ReplicaClient(tb testing.TB) *s3.ReplicaClient { - tb.Helper() - - c := s3.NewReplicaClient() - c.AccessKeyID = *s3AccessKeyID - c.SecretAccessKey = *s3SecretAccessKey - c.Region = *s3Region - c.Bucket = *s3Bucket - c.Path = path.Join(*s3Path, fmt.Sprintf("%016x", rand.Uint64())) - c.Endpoint = *s3Endpoint - c.ForcePathStyle = *s3ForcePathStyle - c.SkipVerify = *s3SkipVerify - return c -} + client := litestream.NewFileReplicaClient(testDir) + if err := litestream.Restore(context.Background(), client, filepath.Join(tempDir, "db"), "0000000000000000", 0, 2, litestream.NewRestoreOptions()); err != nil { + t.Fatal(err) + } else if !fileEqual(t, filepath.Join(testDir, "00000002.db"), filepath.Join(tempDir, "db")) { + t.Fatalf("file mismatch") + } + }) -// NewGCSReplicaClient returns a new client for integration testing. -func NewGCSReplicaClient(tb testing.TB) *gcs.ReplicaClient { - tb.Helper() + t.Run("SnapshotOnly", func(t *testing.T) { + testDir := filepath.Join("testdata", "restore", "snapshot-only") + tempDir := t.TempDir() - c := gcs.NewReplicaClient() - c.Bucket = *gcsBucket - c.Path = path.Join(*gcsPath, fmt.Sprintf("%016x", rand.Uint64())) - return c -} + client := litestream.NewFileReplicaClient(testDir) + if err := litestream.Restore(context.Background(), client, filepath.Join(tempDir, "db"), "0000000000000000", 0, 0, litestream.NewRestoreOptions()); err != nil { + t.Fatal(err) + } else if !fileEqual(t, filepath.Join(testDir, "00000000.db"), filepath.Join(tempDir, "db")) { + t.Fatalf("file mismatch") + } + }) -// NewABSReplicaClient returns a new client for integration testing. -func NewABSReplicaClient(tb testing.TB) *abs.ReplicaClient { - tb.Helper() + t.Run("DefaultParallelism", func(t *testing.T) { + testDir := filepath.Join("testdata", "restore", "ok") + tempDir := t.TempDir() - c := abs.NewReplicaClient() - c.AccountName = *absAccountName - c.AccountKey = *absAccountKey - c.Bucket = *absBucket - c.Path = path.Join(*absPath, fmt.Sprintf("%016x", rand.Uint64())) - return c -} + client := litestream.NewFileReplicaClient(testDir) + opt := litestream.NewRestoreOptions() + opt.Parallelism = 0 + if err := litestream.Restore(context.Background(), client, filepath.Join(tempDir, "db"), "0000000000000000", 0, 2, opt); err != nil { + t.Fatal(err) + } else if !fileEqual(t, filepath.Join(testDir, "00000002.db"), filepath.Join(tempDir, "db")) { + t.Fatalf("file mismatch") + } + }) -// NewSFTPReplicaClient returns a new client for integration testing. -func NewSFTPReplicaClient(tb testing.TB) *sftp.ReplicaClient { - tb.Helper() - - c := sftp.NewReplicaClient() - c.Host = *sftpHost - c.User = *sftpUser - c.Password = *sftpPassword - c.KeyPath = *sftpKeyPath - c.Path = path.Join(*sftpPath, fmt.Sprintf("%016x", rand.Uint64())) - return c -} + t.Run("ErrPathRequired", func(t *testing.T) { + var client mock.ReplicaClient + if err := litestream.Restore(context.Background(), &client, "", "0000000000000000", 0, 0, litestream.NewRestoreOptions()); err == nil || err.Error() != `restore path required` { + t.Fatalf("unexpected error: %#v", err) + } + }) -// MustDeleteAll deletes all objects under the client's path. -func MustDeleteAll(tb testing.TB, c litestream.ReplicaClient) { - tb.Helper() + t.Run("ErrGenerationRequired", func(t *testing.T) { + var client mock.ReplicaClient + if err := litestream.Restore(context.Background(), &client, t.TempDir(), "", 0, 0, litestream.NewRestoreOptions()); err == nil || err.Error() != `generation required` { + t.Fatalf("unexpected error: %#v", err) + } + }) - generations, err := c.Generations(context.Background()) - if err != nil { - tb.Fatalf("cannot list generations for deletion: %s", err) - } + t.Run("ErrSnapshotIndexRequired", func(t *testing.T) { + var client mock.ReplicaClient + if err := litestream.Restore(context.Background(), &client, t.TempDir(), "0000000000000000", -1, 0, litestream.NewRestoreOptions()); err == nil || err.Error() != `snapshot index required` { + t.Fatalf("unexpected error: %#v", err) + } + }) - for _, generation := range generations { - if err := c.DeleteGeneration(context.Background(), generation); err != nil { - tb.Fatalf("cannot delete generation: %s", err) + t.Run("ErrTargetIndexRequired", func(t *testing.T) { + var client mock.ReplicaClient + if err := litestream.Restore(context.Background(), &client, t.TempDir(), "0000000000000000", 0, -1, litestream.NewRestoreOptions()); err == nil || err.Error() != `target index required` { + t.Fatalf("unexpected error: %#v", err) } - } + }) - switch c := c.(type) { - case *sftp.ReplicaClient: - if err := c.Cleanup(context.Background()); err != nil { - tb.Fatalf("cannot cleanup sftp: %s", err) + t.Run("ErrPathExists", func(t *testing.T) { + filename := filepath.Join(t.TempDir(), "db") + if err := os.WriteFile(filename, []byte("foo"), 0600); err != nil { + t.Fatal(err) } - } + var client mock.ReplicaClient + if err := litestream.Restore(context.Background(), &client, filename, "0000000000000000", 0, 0, litestream.NewRestoreOptions()); err == nil || !strings.Contains(err.Error(), `cannot restore, output path already exists`) { + t.Fatalf("unexpected error: %#v", err) + } + }) + + t.Run("ErrPathPermissions", func(t *testing.T) { + dir := t.TempDir() + if err := os.Chmod(dir, 0000); err != nil { + t.Fatal(err) + } + + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "restore", "bad-permissions")) + if err := litestream.Restore(context.Background(), client, filepath.Join(dir, "db"), "0000000000000000", 0, 0, litestream.NewRestoreOptions()); err == nil || !strings.Contains(err.Error(), `permission denied`) { + t.Fatalf("unexpected error: %#v", err) + } + }) } diff --git a/replica_test.go b/replica_test.go index 1a64cc0d..a0220bbb 100644 --- a/replica_test.go +++ b/replica_test.go @@ -8,7 +8,6 @@ import ( "testing" "github.com/benbjohnson/litestream" - "github.com/benbjohnson/litestream/file" "github.com/benbjohnson/litestream/mock" "github.com/pierrec/lz4/v4" ) @@ -45,9 +44,9 @@ func TestReplica_Sync(t *testing.T) { // Fetch current database position. dpos := db.Pos() - c := file.NewReplicaClient(t.TempDir()) + c := litestream.NewFileReplicaClient(t.TempDir()) r := litestream.NewReplica(db, "") - c.Replica, r.Client = r, c + r.Client = c if err := r.Sync(context.Background()); err != nil { t.Fatal(err) @@ -81,7 +80,7 @@ func TestReplica_Snapshot(t *testing.T) { db, sqldb := MustOpenDBs(t) defer MustCloseDBs(t, db, sqldb) - c := file.NewReplicaClient(t.TempDir()) + c := litestream.NewFileReplicaClient(t.TempDir()) r := litestream.NewReplica(db, "") r.Client = c diff --git a/testdata/Makefile b/testdata/Makefile new file mode 100644 index 00000000..b87ebd50 --- /dev/null +++ b/testdata/Makefile @@ -0,0 +1,8 @@ +.PHONY: default +default: + make -C find-latest-generation/ok + make -C generation-time-bounds/ok + make -C generation-time-bounds/snapshots-only + make -C replica-client-time-bounds/ok + make -C snapshot-time-bounds/ok + make -C wal-time-bounds/ok diff --git a/testdata/find-latest-generation/no-generations/.gitignore b/testdata/find-latest-generation/no-generations/.gitignore new file mode 100644 index 00000000..e69de29b diff --git a/testdata/find-latest-generation/ok/Makefile b/testdata/find-latest-generation/ok/Makefile new file mode 100644 index 00000000..847b844e --- /dev/null +++ b/testdata/find-latest-generation/ok/Makefile @@ -0,0 +1,7 @@ +.PHONY: default +default: + TZ=UTC touch -t 200001010000 generations/0000000000000000/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -t 200001020000 generations/0000000000000000/snapshots/00000001.snapshot.lz4 + TZ=UTC touch -t 200001010000 generations/0000000000000001/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -t 200001030000 generations/0000000000000001/snapshots/00000001.snapshot.lz4 + TZ=UTC touch -t 200001010000 generations/0000000000000002/snapshots/00000000.snapshot.lz4 diff --git a/testdata/find-latest-generation/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/find-latest-generation/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/find-latest-generation/ok/generations/0000000000000000/snapshots/00000001.snapshot.lz4 b/testdata/find-latest-generation/ok/generations/0000000000000000/snapshots/00000001.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/find-latest-generation/ok/generations/0000000000000001/snapshots/00000000.snapshot.lz4 b/testdata/find-latest-generation/ok/generations/0000000000000001/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/find-latest-generation/ok/generations/0000000000000001/snapshots/00000001.snapshot.lz4 b/testdata/find-latest-generation/ok/generations/0000000000000001/snapshots/00000001.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/find-latest-generation/ok/generations/0000000000000002/snapshots/00000000.snapshot.lz4 b/testdata/find-latest-generation/ok/generations/0000000000000002/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/find-snapshot-for-index/no-snapshots-before-index/generations/0000000000000000/snapshots/00000bb8.snapshot.lz4 b/testdata/find-snapshot-for-index/no-snapshots-before-index/generations/0000000000000000/snapshots/00000bb8.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/000003e8.snapshot.lz4 b/testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/000003e8.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/00000bb8.snapshot.lz4 b/testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/00000bb8.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/generation-time-bounds/no-snapshots/generations/0000000000000000/snapshots/.gitignore b/testdata/generation-time-bounds/no-snapshots/generations/0000000000000000/snapshots/.gitignore new file mode 100644 index 00000000..e69de29b diff --git a/testdata/generation-time-bounds/ok/Makefile b/testdata/generation-time-bounds/ok/Makefile new file mode 100644 index 00000000..06d50442 --- /dev/null +++ b/testdata/generation-time-bounds/ok/Makefile @@ -0,0 +1,8 @@ +.PHONY: default +default: + TZ=UTC touch -t 200001010000 generations/0000000000000000/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -t 200001020000 generations/0000000000000000/snapshots/00000001.snapshot.lz4 + TZ=UTC touch -t 200001010000 generations/0000000000000000/wal/00000000/00000000.wal.lz4 + TZ=UTC touch -t 200001020000 generations/0000000000000000/wal/00000000/00000001.wal.lz4 + TZ=UTC touch -t 200001030000 generations/0000000000000000/wal/00000001/00000000.wal.lz4 + diff --git a/testdata/generation-time-bounds/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/generation-time-bounds/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/generation-time-bounds/ok/generations/0000000000000000/snapshots/00000001.snapshot.lz4 b/testdata/generation-time-bounds/ok/generations/0000000000000000/snapshots/00000001.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/generation-time-bounds/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/testdata/generation-time-bounds/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/generation-time-bounds/ok/generations/0000000000000000/wal/00000000/00000001.wal.lz4 b/testdata/generation-time-bounds/ok/generations/0000000000000000/wal/00000000/00000001.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/generation-time-bounds/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 b/testdata/generation-time-bounds/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/generation-time-bounds/snapshots-only/Makefile b/testdata/generation-time-bounds/snapshots-only/Makefile new file mode 100644 index 00000000..18b382a8 --- /dev/null +++ b/testdata/generation-time-bounds/snapshots-only/Makefile @@ -0,0 +1,5 @@ +.PHONY: default +default: + TZ=UTC touch -t 200001010000 generations/0000000000000000/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -t 200001020000 generations/0000000000000000/snapshots/00000001.snapshot.lz4 + diff --git a/testdata/generation-time-bounds/snapshots-only/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/generation-time-bounds/snapshots-only/generations/0000000000000000/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/generation-time-bounds/snapshots-only/generations/0000000000000000/snapshots/00000001.snapshot.lz4 b/testdata/generation-time-bounds/snapshots-only/generations/0000000000000000/snapshots/00000001.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/max-index/no-snapshots/generations/0000000000000000/.gitignore b/testdata/max-index/no-snapshots/generations/0000000000000000/.gitignore new file mode 100644 index 00000000..e69de29b diff --git a/testdata/max-index/no-wal/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/max-index/no-wal/generations/0000000000000000/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/max-index/no-wal/generations/0000000000000000/snapshots/00000001.snapshot.lz4 b/testdata/max-index/no-wal/generations/0000000000000000/snapshots/00000001.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/max-index/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/max-index/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/max-index/ok/generations/0000000000000000/snapshots/00000001.snapshot.lz4 b/testdata/max-index/ok/generations/0000000000000000/snapshots/00000001.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/max-index/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/testdata/max-index/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/max-index/ok/generations/0000000000000000/wal/00000000/00001234.wal.lz4 b/testdata/max-index/ok/generations/0000000000000000/wal/00000000/00001234.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/max-index/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 b/testdata/max-index/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/max-index/ok/generations/0000000000000000/wal/00000002/00000000.wal.lz4 b/testdata/max-index/ok/generations/0000000000000000/wal/00000002/00000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/snapshots/00000001.snapshot.lz4 b/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/snapshots/00000001.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/wal/00000000/00000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/wal/00000000/00001234.wal.lz4 b/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/wal/00000000/00001234.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/max-snapshot-index/no-snapshots/generations/0000000000000000/.gitignore b/testdata/max-snapshot-index/no-snapshots/generations/0000000000000000/.gitignore new file mode 100644 index 00000000..e69de29b diff --git a/testdata/max-snapshot-index/ok/Makefile b/testdata/max-snapshot-index/ok/Makefile new file mode 100644 index 00000000..3d808b7d --- /dev/null +++ b/testdata/max-snapshot-index/ok/Makefile @@ -0,0 +1,6 @@ +.PHONY: default +default: + TZ=UTC touch -t 200001020000 generations/0000000000000000/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -t 200001010000 generations/0000000000000001/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -t 200001030000 generations/0000000000000001/snapshots/00000001.snapshot.lz4 + TZ=UTC touch -t 200001010000 generations/0000000000000002/snapshots/00000000.snapshot.lz4 diff --git a/testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/000003e8.snapshot.lz4 b/testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/000003e8.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/000007d0.snapshot.lz4 b/testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/000007d0.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/max-wal-index/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/testdata/max-wal-index/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/max-wal-index/ok/generations/0000000000000000/wal/00000000/00001234.wal.lz4 b/testdata/max-wal-index/ok/generations/0000000000000000/wal/00000000/00001234.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/max-wal-index/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 b/testdata/max-wal-index/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/replica-client-time-bounds/ok/Makefile b/testdata/replica-client-time-bounds/ok/Makefile new file mode 100644 index 00000000..3d808b7d --- /dev/null +++ b/testdata/replica-client-time-bounds/ok/Makefile @@ -0,0 +1,6 @@ +.PHONY: default +default: + TZ=UTC touch -t 200001020000 generations/0000000000000000/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -t 200001010000 generations/0000000000000001/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -t 200001030000 generations/0000000000000001/snapshots/00000001.snapshot.lz4 + TZ=UTC touch -t 200001010000 generations/0000000000000002/snapshots/00000000.snapshot.lz4 diff --git a/testdata/replica-client-time-bounds/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/replica-client-time-bounds/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/replica-client-time-bounds/ok/generations/0000000000000001/snapshots/00000000.snapshot.lz4 b/testdata/replica-client-time-bounds/ok/generations/0000000000000001/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/replica-client-time-bounds/ok/generations/0000000000000001/snapshots/00000001.snapshot.lz4 b/testdata/replica-client-time-bounds/ok/generations/0000000000000001/snapshots/00000001.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/replica-client-time-bounds/ok/generations/0000000000000002/snapshots/00000000.snapshot.lz4 b/testdata/replica-client-time-bounds/ok/generations/0000000000000002/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/restore/bad-permissions/00000000.db b/testdata/restore/bad-permissions/00000000.db new file mode 100644 index 0000000000000000000000000000000000000000..86bbea7e842930b044bdf727ae0ede0f0825834d GIT binary patch literal 4096 zcmWFz^vNtqRY=P(%1ta$FlG>7U}9o$P*7lCU|@t|AVoG{WY9BF;00+HAlr;ljiVtj n8UmvsFd71*Aut*OqaiRF0;3@?8UmvsFd71*Aut*O6ovo*=#~a$ literal 0 HcmV?d00001 diff --git a/testdata/restore/bad-permissions/README b/testdata/restore/bad-permissions/README new file mode 100644 index 00000000..9450f45a --- /dev/null +++ b/testdata/restore/bad-permissions/README @@ -0,0 +1,36 @@ +To reproduce this testdata, run sqlite3 and execute: + + PRAGMA journal_mode = WAL; + CREATE TABLE t (x); + INSERT INTO t (x) VALUES (1); + INSERT INTO t (x) VALUES (2); + + sl3 split -o generations/0000000000000000/wal/00000000 db-wal + cp db generations/0000000000000000/snapshots/00000000.snapshot + lz4 -c --rm generations/0000000000000000/snapshots/00000000.snapshot + + +Then execute: + + PRAGMA wal_checkpoint(TRUNCATE); + INSERT INTO t (x) VALUES (3); + + sl3 split -o generations/0000000000000000/wal/00000001 db-wal + + +Then execute: + + PRAGMA wal_checkpoint(TRUNCATE); + INSERT INTO t (x) VALUES (4); + INSERT INTO t (x) VALUES (5); + + sl3 split -o generations/0000000000000000/wal/00000002 db-wal + + +Finally, obtain the final snapshot: + + PRAGMA wal_checkpoint(TRUNCATE); + + cp db 00000002.db + rm db* + diff --git a/testdata/restore/bad-permissions/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/restore/bad-permissions/generations/0000000000000000/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/restore/ok/00000002.db b/testdata/restore/ok/00000002.db new file mode 100644 index 0000000000000000000000000000000000000000..cfd2b8d8296d0838ebb5cb0d8ab75f3408377b65 GIT binary patch literal 8192 zcmeI#p$Y;)5C-6x**(D^W3x`wB8WbKr(C;K_ z&Ma*D+HO|mJ~XyFosV^}DfLN&=4M1BZO8Em$J4ia8tMKmLgShB;w#oG#X=we0SG_< z0uX=z1Rwwb2tWV=5cs)(ac7{)lTdFDO$f4F7kO1!l`qP|f(nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/restore/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/testdata/restore/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..37e1dcf92fd018a7fb7c339d5b49ad06aaf564ca GIT binary patch literal 249 zcmZQk@|8#_*!hfsf#HX!c|BVbgYF{<1_l8jpMl}ZKh{^Tmx-k9d&~Iz&dY{!pb(=3 z!)MXfb%#S9-oAG^IM64vBvm0TzbH4cM8O!Si;0QBK|ukij){eVg^3}U5y)iWVGv;m zVBlwB;Q7zM#Gq%QzzZ^n{~iPXJv{~Am>8KjL5$^TXVU@v3L-ZE literal 0 HcmV?d00001 diff --git a/testdata/restore/ok/generations/0000000000000000/wal/00000000/00003068.wal.lz4 b/testdata/restore/ok/generations/0000000000000000/wal/00000000/00003068.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..c73bf2cb4dd5c84407c33054dd017d797727b4b0 GIT binary patch literal 94 zcmZQk@|8#_*y+u{z~BJHOe_o^n6CU|ef4^oNE+X)Bp4a~ dqk>xuOh9c67l4`>n3<#NBVhml literal 0 HcmV?d00001 diff --git a/testdata/restore/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 b/testdata/restore/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..64a4899be6ce976d393d9b4e84d31dfe6d8b663d GIT binary patch literal 128 zcmZQk@|8#_*qO_~!0<)Vyq>LzLHCga1A_pN&%k))AM5L=vt3OG-ZDP_6Tj>kP>czL zB^ds5o{{bPy*x&>kQXSx%>Rjj{}2CnAO%$L9~ImH3VmP%Dq?101Y#ye5aI;tVql36 GumAuqQY)1J literal 0 HcmV?d00001 diff --git a/testdata/restore/ok/generations/0000000000000000/wal/00000002/00000000.wal.lz4 b/testdata/restore/ok/generations/0000000000000000/wal/00000002/00000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..2265d0e07bb8a0f6707a5f7ebc567e634c5caaf2 GIT binary patch literal 125 zcmZQk@|8#_*qOz^!0LzLHCga1A_pN&%ku$AL|=^`&6w%ZyBF2x02Wm5|d!~ z&lM)TyiaP|&h@-N0T%wZ4E%rizw>_rG8h^Dqkw|w#S$@h4H60H0$82JD2f9L3NC!$WM*YzWMyVyVq{@vW?}?lCPonA1Zrn!ojYp-05jAjMgRZ+ literal 0 HcmV?d00001 diff --git a/testdata/restore/snapshot-only/00000000.db b/testdata/restore/snapshot-only/00000000.db new file mode 100644 index 0000000000000000000000000000000000000000..86bbea7e842930b044bdf727ae0ede0f0825834d GIT binary patch literal 4096 zcmWFz^vNtqRY=P(%1ta$FlG>7U}9o$P*7lCU|@t|AVoG{WY9BF;00+HAlr;ljiVtj n8UmvsFd71*Aut*OqaiRF0;3@?8UmvsFd71*Aut*O6ovo*=#~a$ literal 0 HcmV?d00001 diff --git a/testdata/restore/snapshot-only/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/restore/snapshot-only/generations/0000000000000000/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/snapshot-time-bounds/no-snapshots/generations/0000000000000000/snapshots/.gitignore b/testdata/snapshot-time-bounds/no-snapshots/generations/0000000000000000/snapshots/.gitignore new file mode 100644 index 00000000..e69de29b diff --git a/testdata/snapshot-time-bounds/ok/Makefile b/testdata/snapshot-time-bounds/ok/Makefile new file mode 100644 index 00000000..0a3ea137 --- /dev/null +++ b/testdata/snapshot-time-bounds/ok/Makefile @@ -0,0 +1,6 @@ +.PHONY: default +default: + TZ=UTC touch -t 200001010000 generations/0000000000000000/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -t 200001020000 generations/0000000000000000/snapshots/00000001.snapshot.lz4 + TZ=UTC touch -t 200001030000 generations/0000000000000000/snapshots/00000002.snapshot.lz4 + diff --git a/testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/00000001.snapshot.lz4 b/testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/00000001.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/00000002.snapshot.lz4 b/testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/00000002.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/wal-downloader/err-download-wal/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/testdata/wal-downloader/err-download-wal/generations/0000000000000000/wal/00000000/00000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..03f56a31dc37dd9cff31f52278ec4332404f8b21 GIT binary patch literal 249 zcmZQk@|8#_*!hfsf#HX!c|BVbgYF{<1_l8jpMgPAAZPVn+hneTui*<0^vNtqRY=P(%1ta$Fb3*kVq$PmPyni9VqsumVhCmgGFf;SL>K}X z_?Z}Z{xdK!=$R<+f(+un$H0G2kAVj${vQ?ieil|>WE1D-XEZEHOv*_uDPeLBa&-)G zRS0o(@^MutQP8N+e8MlnWFW95!eGJSuq~hN0*&+*z%=Rs(83Eq+fYIA1SN9-NmNWG literal 0 HcmV?d00001 diff --git a/testdata/wal-downloader/err-read-wal-segment/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/testdata/wal-downloader/err-read-wal-segment/generations/0000000000000000/wal/00000000/00000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..03f56a31dc37dd9cff31f52278ec4332404f8b21 GIT binary patch literal 249 zcmZQk@|8#_*!hfsf#HX!c|BVbgYF{<1_l8jpMgPAAZPVn+hneTui*<0^vNtqRY=P(%1ta$Fb3*kVq$PmPyni9VqsumVhCmgGFf;SL>K}X z_?Z}Z{xdK!=$R<+f(+un$H0G2kAVj${vQ?ieil|>WE1D-XEZEHOv*_uDPeLBa&-)G zRS0o(@^MutQP8N+e8MlnWFW95!eGJSuq~hN0*&+*z%=Rs(83Eq+fYIA1SN9-NmNWG literal 0 HcmV?d00001 diff --git a/testdata/wal-downloader/err-write-wal/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/testdata/wal-downloader/err-write-wal/generations/0000000000000000/wal/00000000/00000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..03f56a31dc37dd9cff31f52278ec4332404f8b21 GIT binary patch literal 249 zcmZQk@|8#_*!hfsf#HX!c|BVbgYF{<1_l8jpMgPAAZPVn+hneTui*<0^vNtqRY=P(%1ta$Fb3*kVq$PmPyni9VqsumVhCmgGFf;SL>K}X z_?Z}Z{xdK!=$R<+f(+un$H0G2kAVj${vQ?ieil|>WE1D-XEZEHOv*_uDPeLBa&-)G zRS0o(@^MutQP8N+e8MlnWFW95!eGJSuq~hN0*&+*z%=Rs(83Eq+fYIA1SN9-NmNWG literal 0 HcmV?d00001 diff --git a/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/00000000/00000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..03f56a31dc37dd9cff31f52278ec4332404f8b21 GIT binary patch literal 249 zcmZQk@|8#_*!hfsf#HX!c|BVbgYF{<1_l8jpMgPAAZPVn+hneTui*<0^vNtqRY=P(%1ta$Fb3*kVq$PmPyni9VqsumVhCmgGFf;SL>K}X z_?Z}Z{xdK!=$R<+f(+un$H0G2kAVj${vQ?ieil|>WE1D-XEZEHOv*_uDPeLBa&-)G zRS0o(@^MutQP8N+e8MlnWFW95!eGJSuq~hN0*&+*z%=Rs(83Eq+fYIA1SN9-NmNWG literal 0 HcmV?d00001 diff --git a/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/00000000/00002050.wal.lz4 b/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/00000000/00002050.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..d8c9ab6f48cdffbc0478d5427fd7419a66680730 GIT binary patch literal 90 zcmZQk@|8#_*y+x|z~BJHOe_rlc_jsMR`0b)Jd literal 0 HcmV?d00001 diff --git a/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/00000000/00003068.wal.lz4 b/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/00000000/00003068.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..16be1892ea8a063fe79d73a40fe5a056b1a2917e GIT binary patch literal 94 zcmZQk@|8#_*y+u{z~BJHOe_o^m?Q;qR`0bLzLHCga1A_pN&%h`tkh7-oq~yY;U}2p*AzOlgVoV?` z!SJ6m>0Rp^y-(*_d4U4V{GS;3|L}hYQa}a&QNazM&<94KB4#E=AZB6&Ax@w!hV)BW F^#B=KDi;6% literal 0 HcmV?d00001 diff --git a/testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/00000001/00000000.wal.lz4 b/testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/00000001/00000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..46d706b56d935d5e815efe70142c29049fc0ede4 GIT binary patch literal 128 zcmZQk@|8#_*qO_~!0<)Vyq>LzLHCga1A_pN&%h`tkh7-oq~yY;U}2p*AzOlgVoV?` z!SJ6m>0Rp^y-(*_d4U4V{GS;3|L}hYQa}a&QNazM&<94KB4#E=AZB6&Ax@w!hV)BW F^#B=KDi;6% literal 0 HcmV?d00001 diff --git a/testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/00000002/00000000.wal.lz4 b/testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/00000002/00000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..5366ae2a34c707fab0259c7f72bb478ec9640c7b GIT binary patch literal 125 zcmZQk@|8#_*qOz^!0LzLHCga1A_pN&%h)pkh502N~p6XSXk${Z^{dhm;}Rr zu8)`buX`Ei{NM!&u<*ZS;Qzz_o&OV%!N~9*72Np1!py?N$imFb#0bPpj3C4bGF)Rr G<17Hg0x6dO literal 0 HcmV?d00001 diff --git a/testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/00000002/00001038.wal.lz4 b/testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/00000002/00001038.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..6fdb481a2f958401817afa9464b384ec1ac22c43 GIT binary patch literal 108 zcmZQk@|8#_*cr*dz~BJHOe_rlMI{At){0jNb*@zBVR?4=OfWA{f|dUT1OFfX@BE+m r-vXHoObq`~!G#Z;%&bg|tjsJ-j4aH|OpHLx#0Wy1KeTui*<0^vNtqRY=P(%1ta$Fb3*kVq$PmPyni9VqsumVhCmgGFf;SL>K}X z_?Z}Z{xdK!=$R<+f(+un$H0G2kAVj${vQ?ieil|>WE1D-XEZEHOv*_uDPeLBa&-)G zRS0o(@^MutQP8N+e8MlnWFW95!eGJSuq~hN0*&+*z%=Rs(83Eq+fYIA1SN9-NmNWG literal 0 HcmV?d00001 diff --git a/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/00000000/00002050.wal.lz4 b/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/00000000/00002050.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..d8c9ab6f48cdffbc0478d5427fd7419a66680730 GIT binary patch literal 90 zcmZQk@|8#_*y+x|z~BJHOe_rlc_jsMR`0b)Jd literal 0 HcmV?d00001 diff --git a/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/00000000/00003068.wal.lz4 b/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/00000000/00003068.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..16be1892ea8a063fe79d73a40fe5a056b1a2917e GIT binary patch literal 94 zcmZQk@|8#_*y+u{z~BJHOe_o^m?Q;qR`0bLzLHCga1A_pN&%h)pkh502N~p6XSXk${Z^{dhm;}Rr zu8)`buX`Ei{NM!&u<*ZS;Qzz_o&OV%!N~9*72Np1!py?N$imFb#0bPpj3C4bGF)Rr G<17Hg0x6dO literal 0 HcmV?d00001 diff --git a/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/00000002/00001038.wal.lz4 b/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/00000002/00001038.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..6fdb481a2f958401817afa9464b384ec1ac22c43 GIT binary patch literal 108 zcmZQk@|8#_*cr*dz~BJHOe_rlMI{At){0jNb*@zBVR?4=OfWA{f|dUT1OFfX@BE+m r-vXHoObq`~!G#Z;%&bg|tjsJ-j4aH|OpHLx#0Wy1KeTui*<0^vNtqRY=P(%1ta$Fb3*kVq$PmPyni9VqsumVhCmgGFf;SL>K}X z_?Z}Z{xdK!=$R<+f(+un$H0G2kAVj${vQ?ieil|>WE1D-XEZEHOv*_uDPeLBa&-)G zRS0o(@^MutQP8N+e8MlnWFW95!eGJSuq~hN0*&+*z%=Rs(83Eq+fYIA1SN9-NmNWG literal 0 HcmV?d00001 diff --git a/testdata/wal-downloader/missing-offset/generations/0000000000000000/wal/00000000/00003068.wal.lz4 b/testdata/wal-downloader/missing-offset/generations/0000000000000000/wal/00000000/00003068.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..16be1892ea8a063fe79d73a40fe5a056b1a2917e GIT binary patch literal 94 zcmZQk@|8#_*y+u{z~BJHOe_o^m?Q;qR`0b7=N>QXtUqTnR7%56o^meRFK1*IyK3~qi*odjn=q^k}N zu1@aGI_Ti$>f+MDH`fX+INng?|8f^{AIXK^mgiPZ66b2}V?wEv@->;N-9E1FOq~|S zW=q$Z3%RAc@2mTIa{uaXw6K}qb5*;0R6nRVyCkEHQCVx{UDG-JFkgQBVAN|j(R^Lk ze|3$TU$#kkqJwuO2ZaCv2q1s}0tg_000IagfB*uq3s{{<2Z3I6mF-&9IkUbM=8B;$ z=GO9|bvAQ6BcF`+`M-HVdEw@%^zilO_w1c0A@)@fB*srAb0 literal 0 HcmV?d00001 diff --git a/testdata/wal-downloader/ok/00000002.wal b/testdata/wal-downloader/ok/00000002.wal new file mode 100644 index 0000000000000000000000000000000000000000..e8bb5264dc0a84985e9500b9561c1be51238c775 GIT binary patch literal 8272 zcmeI$Eeb+W5QgFND+tP5G^i`EXfn75!F0D^yeqH>VsR-3S7ET&40eOuV$h<&;G7}~ zZs5W@%z@!+=ZW8VQCphfxob>jbX+ehww-xvy~^8z{d88Q>^r|5lX0tg_000IagfB*srAbeTui*<0^vNtqRY=P(%1ta$Fb3*kVq$PmPyni9VqsumVhCmgGFf;SL>K}X z_?Z}Z{xdK!=$R<+f(+un$H0G2kAVj${vQ?ieil|>WE1D-XEZEHOv*_uDPeLBa&-)G zRS0o(@^MutQP8N+e8MlnWFW95!eGJSuq~hN0*&+*z%=Rs(83Eq+fYIA1SN9-NmNWG literal 0 HcmV?d00001 diff --git a/testdata/wal-downloader/ok/generations/0000000000000000/wal/00000000/00002050.wal.lz4 b/testdata/wal-downloader/ok/generations/0000000000000000/wal/00000000/00002050.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..d8c9ab6f48cdffbc0478d5427fd7419a66680730 GIT binary patch literal 90 zcmZQk@|8#_*y+x|z~BJHOe_rlc_jsMR`0b)Jd literal 0 HcmV?d00001 diff --git a/testdata/wal-downloader/ok/generations/0000000000000000/wal/00000000/00003068.wal.lz4 b/testdata/wal-downloader/ok/generations/0000000000000000/wal/00000000/00003068.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..16be1892ea8a063fe79d73a40fe5a056b1a2917e GIT binary patch literal 94 zcmZQk@|8#_*y+u{z~BJHOe_o^m?Q;qR`0bLzLHCga1A_pN&%h`tkh7-oq~yY;U}2p*AzOlgVoV?` z!SJ6m>0Rp^y-(*_d4U4V{GS;3|L}hYQa}a&QNazM&<94KB4#E=AZB6&Ax@w!hV)BW F^#B=KDi;6% literal 0 HcmV?d00001 diff --git a/testdata/wal-downloader/ok/generations/0000000000000000/wal/00000002/00000000.wal.lz4 b/testdata/wal-downloader/ok/generations/0000000000000000/wal/00000002/00000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..5366ae2a34c707fab0259c7f72bb478ec9640c7b GIT binary patch literal 125 zcmZQk@|8#_*qOz^!0LzLHCga1A_pN&%h)pkh502N~p6XSXk${Z^{dhm;}Rr zu8)`buX`Ei{NM!&u<*ZS;Qzz_o&OV%!N~9*72Np1!py?N$imFb#0bPpj3C4bGF)Rr G<17Hg0x6dO literal 0 HcmV?d00001 diff --git a/testdata/wal-downloader/ok/generations/0000000000000000/wal/00000002/00001038.wal.lz4 b/testdata/wal-downloader/ok/generations/0000000000000000/wal/00000002/00001038.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..6fdb481a2f958401817afa9464b384ec1ac22c43 GIT binary patch literal 108 zcmZQk@|8#_*cr*dz~BJHOe_rlMI{At){0jNb*@zBVR?4=OfWA{f|dUT1OFfX@BE+m r-vXHoObq`~!G#Z;%&bg|tjsJ-j4aH|OpHLx#0Wy1K7=N>QXtUqTnR7%56o^meRFK1*IyK3~qi*odjn=q^k}N zu1@aGI_Ti$>f+MDH`fX+INng?|8f^{AIXK^mgiPZ66b2}V?wEv@->;N-9E1FOq~|S zW=q$Z3%RAc@2mTIa{uaXw6K}qb5*;0R6nRVyCkEHQCVx{UDG-JFkgQBVAN|j(R^Lk ze|3$TU$#kkqJwuO2ZaCv2q1s}0tg_000IagfB*uq3s{{<2Z3I6mF-&9IkUbM=8B;$ z=GO9|bvAQ6BcF`+`M-HVdEw@%^zilO_w1c0A@)@fB*srAbeTui*<0^vNtqRY=P(%1ta$Fb3*kVq$PmPyni9VqsumVhCmgGFf;SL>K}X z_?Z}Z{xdK!=$R<+f(+un$H0G2kAVj${vQ?ieil|>WE1D-XEZEHOv*_uDPeLBa&-)G zRS0o(@^MutQP8N+e8MlnWFW95!eGJSuq~hN0*&+*z%=Rs(83Eq+fYIA1SN9-NmNWG literal 0 HcmV?d00001 diff --git a/testdata/wal-downloader/one/generations/0000000000000000/wal/00000000/00002050.wal.lz4 b/testdata/wal-downloader/one/generations/0000000000000000/wal/00000000/00002050.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..d8c9ab6f48cdffbc0478d5427fd7419a66680730 GIT binary patch literal 90 zcmZQk@|8#_*y+x|z~BJHOe_rlc_jsMR`0b)Jd literal 0 HcmV?d00001 diff --git a/testdata/wal-downloader/one/generations/0000000000000000/wal/00000000/00003068.wal.lz4 b/testdata/wal-downloader/one/generations/0000000000000000/wal/00000000/00003068.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..16be1892ea8a063fe79d73a40fe5a056b1a2917e GIT binary patch literal 94 zcmZQk@|8#_*y+u{z~BJHOe_o^m?Q;qR`0bnZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/wal-time-bounds/ok/generations/0000000000000000/wal/00000000/00000001.wal.lz4 b/testdata/wal-time-bounds/ok/generations/0000000000000000/wal/00000000/00000001.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/wal-time-bounds/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 b/testdata/wal-time-bounds/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/wal_downloader.go b/wal_downloader.go new file mode 100644 index 00000000..b87c2454 --- /dev/null +++ b/wal_downloader.go @@ -0,0 +1,335 @@ +package litestream + +import ( + "context" + "fmt" + "io" + "os" + "sync" + + "github.com/benbjohnson/litestream/internal" + "github.com/pierrec/lz4/v4" + "golang.org/x/sync/errgroup" +) + +// WALDownloader represents a parallel downloader of WAL files from a replica client. +// +// It works on a per-index level so WAL files are always downloaded in their +// entirety and are not segmented. WAL files are downloaded from minIndex to +// maxIndex, inclusively, and are written to a path prefix. WAL files are named +// with the prefix and suffixed with the WAL index. It is the responsibility of +// the caller to clean up these WAL files. +// +// The purpose of the parallization is that RTT & WAL apply time can consume +// much of the restore time so it's useful to download multiple WAL files in +// the background to minimize the latency. While some WAL indexes may be +// downloaded out of order, the WALDownloader ensures that Next() always +// returns the WAL files sequentially. +type WALDownloader struct { + ctx context.Context // context used for early close/cancellation + cancel func() + + client ReplicaClient // client to read WAL segments with + generation string // generation to download WAL files from + minIndex int // starting WAL index (inclusive) + maxIndex int // ending WAL index (inclusive) + prefix string // output file prefix + + err error // error occuring during init, propagated to Next() + n int // number of WAL files returned by Next() + + // Concurrency coordination + mu sync.Mutex // used to serialize sending of next WAL index + cond *sync.Cond // used with mu above + g *errgroup.Group // manages worker goroutines for downloading + input chan walDownloadInput // holds ordered WAL indices w/ offsets + output chan walDownloadOutput // always sends next sequential WAL; used by Next() + nextIndex int // tracks next WAL index to send to output channel + + // File info used for downloaded WAL files. + Mode os.FileMode + Uid, Gid int + + // Number of downloads occurring in parallel. + Parallelism int +} + +// NewWALDownloader returns a new instance of WALDownloader. +func NewWALDownloader(client ReplicaClient, prefix string, generation string, minIndex, maxIndex int) *WALDownloader { + d := &WALDownloader{ + client: client, + prefix: prefix, + generation: generation, + minIndex: minIndex, + maxIndex: maxIndex, + + Mode: 0600, + Parallelism: 1, + } + + d.ctx, d.cancel = context.WithCancel(context.Background()) + d.cond = sync.NewCond(&d.mu) + + return d +} + +// Close cancels all downloads and returns any error that has occurred. +func (d *WALDownloader) Close() (err error) { + if d.err != nil { + err = d.err + } + + d.cancel() + + if d.g != nil { + if e := d.g.Wait(); err != nil && e != context.Canceled { + err = e + } + } + return err +} + +// init initializes the downloader on the first invocation only. It generates +// the input channel with all WAL indices & offsets needed, it initializes +// the output channel that Next() waits on, and starts the worker goroutines +// that begin downloading WAL files in the background. +func (d *WALDownloader) init(ctx context.Context) error { + if d.input != nil { + return nil // already initialized + } else if d.minIndex < 0 { + return fmt.Errorf("minimum index required") + } else if d.maxIndex < 0 { + return fmt.Errorf("maximum index required") + } else if d.maxIndex < d.minIndex { + return fmt.Errorf("minimum index cannot be larger than maximum index") + } else if d.Parallelism < 1 { + return fmt.Errorf("parallelism must be at least one") + } + + // Populate input channel with indices & offsets. + if err := d.initInputCh(ctx); err != nil { + return err + } + d.nextIndex = d.minIndex + + // Generate output channel that Next() pulls from. + d.output = make(chan walDownloadOutput) + + // Spawn worker goroutines to download WALs. + d.g, d.ctx = errgroup.WithContext(d.ctx) + for i := 0; i < d.Parallelism; i++ { + d.g.Go(func() error { return d.downloader(d.ctx) }) + } + + return nil +} + +// initInputCh populates the input channel with each WAL index between minIndex +// and maxIndex. It also includes all offsets needed with the index. +func (d *WALDownloader) initInputCh(ctx context.Context) error { + itr, err := d.client.WALSegments(ctx, d.generation) + if err != nil { + return fmt.Errorf("wal segments: %w", err) + } + defer func() { _ = itr.Close() }() + + d.input = make(chan walDownloadInput, d.maxIndex-d.minIndex+1) + defer close(d.input) + + index := d.minIndex - 1 + var offsets []int64 + for itr.Next() { + info := itr.WALSegment() + + // Restrict segments to within our index range. + if info.Index < d.minIndex { + continue // haven't reached minimum index, skip + } else if info.Index > d.maxIndex { + break // after max index, stop + } + + // Flush index & offsets when index changes. + if info.Index != index { + if info.Index != index+1 { // must be sequential + return &WALNotFoundError{Generation: d.generation, Index: index + 1} + } + + if len(offsets) > 0 { + d.input <- walDownloadInput{index: index, offsets: offsets} + offsets = make([]int64, 0) + } + + index = info.Index + } + + // Append to the end of the WAL file. + offsets = append(offsets, info.Offset) + } + + // Ensure we read to the last index. + if index != d.maxIndex { + return &WALNotFoundError{Generation: d.generation, Index: index + 1} + } + + // Flush if we have remaining offsets. + if len(offsets) > 0 { + d.input <- walDownloadInput{index: index, offsets: offsets} + } + + return itr.Close() +} + +// N returns the number of WAL files returned by Next(). +func (d *WALDownloader) N() int { return d.n } + +// Next returns the index & local file path of the next downloaded WAL file. +func (d *WALDownloader) Next(ctx context.Context) (int, string, error) { + if d.err != nil { + return 0, "", d.err + } else if d.err = d.init(ctx); d.err != nil { + return 0, "", d.err + } + + select { + case <-ctx.Done(): + return 0, "", ctx.Err() + case <-d.ctx.Done(): + return 0, "", d.ctx.Err() + case v, ok := <-d.output: + if !ok { + return 0, "", io.EOF + } + + d.n++ + return v.index, v.path, v.err + } +} + +// downloader runs in a separate goroutine and downloads the next input index. +func (d *WALDownloader) downloader(ctx context.Context) error { + for { + select { + case <-ctx.Done(): + d.cond.Broadcast() + return ctx.Err() + + case input, ok := <-d.input: + if !ok { + return nil // no more input + } + + // Wait until next index equals input index and then send file to + // output to ensure sorted order. + if err := func() error { + walPath, err := d.downloadWAL(ctx, input.index, input.offsets) + + d.mu.Lock() + defer d.mu.Unlock() + + // Notify other downloader goroutines when we escape this + // anonymous function. + defer d.cond.Broadcast() + + // Keep looping until our index matches the next index to send. + for d.nextIndex != input.index { + if ctxErr := ctx.Err(); ctxErr != nil { + return ctxErr + } + d.cond.Wait() + } + + // Still under lock, wait until Next() requests next index. + select { + case <-ctx.Done(): + return ctx.Err() + + case d.output <- walDownloadOutput{ + index: input.index, + path: walPath, + err: err, + }: + // At the last index, close out output channel to notify + // the Next() method to return io.EOF. + if d.nextIndex == d.maxIndex { + close(d.output) + return nil + } + + // Update next expected index now that our send is successful. + d.nextIndex++ + } + + return err + }(); err != nil { + return err + } + } + } +} + +// downloadWAL sequentially downloads all the segments for WAL index from the +// replica client and appends them to a single on-disk file. Returns the name +// of the on-disk file on success. +func (d *WALDownloader) downloadWAL(ctx context.Context, index int, offsets []int64) (string, error) { + // Open handle to destination WAL path. + walPath := fmt.Sprintf("%s-%08x-wal", d.prefix, index) + f, err := internal.CreateFile(walPath, d.Mode, d.Uid, d.Gid) + if err != nil { + return "", err + } + defer f.Close() + + // Open readers for every segment in the WAL file, in order. + var written int64 + for _, offset := range offsets { + if err := func() error { + // Ensure next offset is our current position in the file. + if written != offset { + return fmt.Errorf("missing WAL offset: generation=%s index=%08x offset=%08x", d.generation, index, written) + } + + rd, err := d.client.WALSegmentReader(ctx, Pos{Generation: d.generation, Index: index, Offset: offset}) + if err != nil { + return fmt.Errorf("read WAL segment: %w", err) + } + defer rd.Close() + + n, err := io.Copy(f, lz4.NewReader(rd)) + if err != nil { + return fmt.Errorf("copy WAL segment: %w", err) + } + written += n + + return nil + }(); err != nil { + return "", err + } + } + + if err := f.Close(); err != nil { + return "", err + } + return walPath, nil +} + +type walDownloadInput struct { + index int + offsets []int64 +} + +type walDownloadOutput struct { + path string + index int + err error +} + +// WALNotFoundError is returned by WALDownloader if an WAL index is not found. +type WALNotFoundError struct { + Generation string + Index int +} + +// Error returns the error string. +func (e *WALNotFoundError) Error() string { + return fmt.Sprintf("wal not found: generation=%s index=%08x", e.Generation, e.Index) +} diff --git a/wal_downloader_test.go b/wal_downloader_test.go new file mode 100644 index 00000000..65d1c8ba --- /dev/null +++ b/wal_downloader_test.go @@ -0,0 +1,534 @@ +package litestream_test + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/benbjohnson/litestream" + "github.com/benbjohnson/litestream/mock" +) + +// TestWALDownloader runs downloader tests against different levels of parallelism. +func TestWALDownloader(t *testing.T) { + for _, parallelism := range []int{1, 8, 1024} { + t.Run(fmt.Sprint(parallelism), func(t *testing.T) { + testWALDownloader(t, parallelism) + }) + } +} + +func testWALDownloader(t *testing.T, parallelism int) { + // Ensure WAL files can be downloaded from file replica on disk. + t.Run("OK", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "ok") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 0, 2) + defer d.Close() + + if index, filename, err := d.Next(context.Background()); err != nil { + t.Fatal(err) + } else if got, want := index, 0; got != want { + t.Fatalf("index=%d, want %d", got, want) + } else if !fileEqual(t, filepath.Join(testDir, "00000000.wal"), filename) { + t.Fatalf("output file mismatch: %s", filename) + } + + if index, filename, err := d.Next(context.Background()); err != nil { + t.Fatal(err) + } else if got, want := index, 1; got != want { + t.Fatalf("index=%d, want %d", got, want) + } else if !fileEqual(t, filepath.Join(testDir, "00000001.wal"), filename) { + t.Fatalf("output file mismatch: %s", filename) + } + + if index, filename, err := d.Next(context.Background()); err != nil { + t.Fatal(err) + } else if got, want := index, 2; got != want { + t.Fatalf("index=%d, want %d", got, want) + } else if !fileEqual(t, filepath.Join(testDir, "00000002.wal"), filename) { + t.Fatalf("output file mismatch: %s", filename) + } + + if _, _, err := d.Next(context.Background()); err != io.EOF { + t.Fatalf("unexpected error: %#v", err) + } else if got, want := d.N(), 3; got != want { + t.Fatalf("N=%d, want %d", got, want) + } + + if err := d.Close(); err != nil { + t.Fatal(err) + } + }) + + // Ensure a single WAL index can be downloaded. + t.Run("One", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "one") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 0, 0) + defer d.Close() + + if index, filename, err := d.Next(context.Background()); err != nil { + t.Fatal(err) + } else if got, want := index, 0; got != want { + t.Fatalf("index=%d, want %d", got, want) + } else if !fileEqual(t, filepath.Join(testDir, "00000000.wal"), filename) { + t.Fatalf("output file mismatch: %s", filename) + } + + if _, _, err := d.Next(context.Background()); err != io.EOF { + t.Fatalf("unexpected error: %#v", err) + } else if err := d.Close(); err != nil { + t.Fatal(err) + } + }) + + // Ensure a subset of WAL indexes can be downloaded. + t.Run("Slice", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "ok") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 1, 1) + defer d.Close() + + if index, filename, err := d.Next(context.Background()); err != nil { + t.Fatal(err) + } else if got, want := index, 1; got != want { + t.Fatalf("index=%d, want %d", got, want) + } else if !fileEqual(t, filepath.Join(testDir, "00000001.wal"), filename) { + t.Fatalf("output file mismatch: %s", filename) + } + + if _, _, err := d.Next(context.Background()); err != io.EOF { + t.Fatalf("unexpected error: %#v", err) + } else if err := d.Close(); err != nil { + t.Fatal(err) + } + }) + + // Ensure a subset of WAL indexes can be downloaded starting from zero. + t.Run("SliceLeft", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "ok") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 0, 1) + defer d.Close() + + if index, filename, err := d.Next(context.Background()); err != nil { + t.Fatal(err) + } else if got, want := index, 0; got != want { + t.Fatalf("index=%d, want %d", got, want) + } else if !fileEqual(t, filepath.Join(testDir, "00000000.wal"), filename) { + t.Fatalf("output file mismatch: %s", filename) + } + + if index, filename, err := d.Next(context.Background()); err != nil { + t.Fatal(err) + } else if got, want := index, 1; got != want { + t.Fatalf("index=%d, want %d", got, want) + } else if !fileEqual(t, filepath.Join(testDir, "00000001.wal"), filename) { + t.Fatalf("output file mismatch: %s", filename) + } + + if _, _, err := d.Next(context.Background()); err != io.EOF { + t.Fatalf("unexpected error: %#v", err) + } else if err := d.Close(); err != nil { + t.Fatal(err) + } + }) + + // Ensure a subset of WAL indexes can be downloaded ending at the last index. + t.Run("SliceRight", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "ok") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 1, 2) + defer d.Close() + + if index, filename, err := d.Next(context.Background()); err != nil { + t.Fatal(err) + } else if got, want := index, 1; got != want { + t.Fatalf("index=%d, want %d", got, want) + } else if !fileEqual(t, filepath.Join(testDir, "00000001.wal"), filename) { + t.Fatalf("output file mismatch: %s", filename) + } + + if index, filename, err := d.Next(context.Background()); err != nil { + t.Fatal(err) + } else if got, want := index, 2; got != want { + t.Fatalf("index=%d, want %d", got, want) + } else if !fileEqual(t, filepath.Join(testDir, "00000002.wal"), filename) { + t.Fatalf("output file mismatch: %s", filename) + } + + if _, _, err := d.Next(context.Background()); err != io.EOF { + t.Fatalf("unexpected error: %#v", err) + } else if err := d.Close(); err != nil { + t.Fatal(err) + } + }) + + // Ensure a large, generated set of WAL files can be downloaded in the correct order. + t.Run("Large", func(t *testing.T) { + if testing.Short() { + t.Skip("short mode, skipping") + } + + // Generate WAL files. + const n = 1000 + tempDir := t.TempDir() + for i := 0; i < n; i++ { + filename := filepath.Join(tempDir, "generations", "0000000000000000", "wal", fmt.Sprintf("%08x", i), "00000000.wal.lz4") + if err := os.MkdirAll(filepath.Dir(filename), 0777); err != nil { + t.Fatal(err) + } else if err := os.WriteFile(filename, compressLZ4(t, []byte(fmt.Sprint(i))), 0666); err != nil { + t.Fatal(err) + } + } + + client := litestream.NewFileReplicaClient(tempDir) + d := litestream.NewWALDownloader(client, filepath.Join(t.TempDir(), "wal"), "0000000000000000", 0, n-1) + d.Parallelism = parallelism + defer d.Close() + + for i := 0; i < n; i++ { + if index, filename, err := d.Next(context.Background()); err != nil { + t.Fatal(err) + } else if got, want := index, i; got != want { + t.Fatalf("index[%d]=%d, want %d", i, got, want) + } else if buf, err := os.ReadFile(filename); err != nil { + t.Fatal(err) + } else if got, want := fmt.Sprint(i), string(buf); got != want { + t.Fatalf("file[%d]=%q, want %q", i, got, want) + } + } + + if _, _, err := d.Next(context.Background()); err != io.EOF { + t.Fatalf("unexpected error: %#v", err) + } else if err := d.Close(); err != nil { + t.Fatal(err) + } + }) + + // Ensure a non-existent WAL directory returns error. + t.Run("ErrEmptyGenerationDir", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "empty-generation-dir") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 0, 0) + defer d.Close() + + var e *litestream.WALNotFoundError + if _, _, err := d.Next(context.Background()); !errors.As(err, &e) { + t.Fatalf("unexpected error type: %#v", err) + } else if *e != (litestream.WALNotFoundError{Generation: "0000000000000000", Index: 0}) { + t.Fatalf("unexpected error: %#v", err) + } else if got, want := d.N(), 0; got != want { + t.Fatalf("N=%d, want %d", got, want) + } + + // Reinvoking Next() should return the same error. + if _, _, err := d.Next(context.Background()); !errors.As(err, &e) { + t.Fatalf("unexpected error type: %#v", err) + } else if *e != (litestream.WALNotFoundError{Generation: "0000000000000000", Index: 0}) { + t.Fatalf("unexpected error: %#v", err) + } + + // Close should return the same error. + if err := d.Close(); !errors.As(err, &e) { + t.Fatalf("unexpected error type: %#v", err) + } else if *e != (litestream.WALNotFoundError{Generation: "0000000000000000", Index: 0}) { + t.Fatalf("unexpected error: %#v", err) + } + }) + + // Ensure an empty WAL directory returns error. + t.Run("EmptyWALDir", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "empty-wal-dir") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 0, 0) + defer d.Close() + + var e *litestream.WALNotFoundError + if _, _, err := d.Next(context.Background()); !errors.As(err, &e) { + t.Fatalf("unexpected error type: %#v", err) + } else if *e != (litestream.WALNotFoundError{Generation: "0000000000000000", Index: 0}) { + t.Fatalf("unexpected error: %#v", err) + } else if got, want := d.N(), 0; got != want { + t.Fatalf("N=%d, want %d", got, want) + } + }) + + // Ensure an empty WAL index directory returns EOF. + t.Run("EmptyWALIndexDir", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "empty-wal-index-dir") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 0, 0) + defer d.Close() + + var e *litestream.WALNotFoundError + if _, _, err := d.Next(context.Background()); !errors.As(err, &e) { + t.Fatalf("unexpected error type: %#v", err) + } else if *e != (litestream.WALNotFoundError{Generation: "0000000000000000", Index: 0}) { + t.Fatalf("unexpected error: %#v", err) + } else if got, want := d.N(), 0; got != want { + t.Fatalf("N=%d, want %d", got, want) + } + }) + + // Ensure closing downloader before calling Next() does not panic. + t.Run("CloseWithoutNext", func(t *testing.T) { + client := litestream.NewFileReplicaClient(t.TempDir()) + d := litestream.NewWALDownloader(client, filepath.Join(t.TempDir(), "wal"), "0000000000000000", 0, 2) + if err := d.Close(); err != nil { + t.Fatal(err) + } + }) + + // Ensure downloader closes successfully if invoked after Next() but before last index. + t.Run("CloseEarly", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "ok") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 0, 2) + defer d.Close() + + if index, filename, err := d.Next(context.Background()); err != nil { + t.Fatal(err) + } else if got, want := index, 0; got != want { + t.Fatalf("index=%d, want %d", got, want) + } else if !fileEqual(t, filepath.Join(testDir, "00000000.wal"), filename) { + t.Fatalf("output file mismatch: %s", filename) + } + + if err := d.Close(); err != nil { + t.Fatal(err) + } + + if _, _, err := d.Next(context.Background()); err == nil { + t.Fatal("expected error") + } + }) + + // Ensure downloader without a minimum index returns an error. + t.Run("ErrMinIndexRequired", func(t *testing.T) { + d := litestream.NewWALDownloader(litestream.NewFileReplicaClient(t.TempDir()), t.TempDir(), "0000000000000000", -1, 2) + defer d.Close() + if _, _, err := d.Next(context.Background()); err == nil || err.Error() != `minimum index required` { + t.Fatalf("unexpected error: %#v", err) + } + }) + + // Ensure downloader without a maximum index returns an error. + t.Run("ErrMinIndexRequired", func(t *testing.T) { + d := litestream.NewWALDownloader(litestream.NewFileReplicaClient(t.TempDir()), t.TempDir(), "0000000000000000", 1, -1) + defer d.Close() + if _, _, err := d.Next(context.Background()); err == nil || err.Error() != `maximum index required` { + t.Fatalf("unexpected error: %#v", err) + } + }) + + // Ensure downloader with invalid min/max indexes returns an error. + t.Run("ErrMinIndexTooLarge", func(t *testing.T) { + d := litestream.NewWALDownloader(litestream.NewFileReplicaClient(t.TempDir()), t.TempDir(), "0000000000000000", 2, 1) + defer d.Close() + if _, _, err := d.Next(context.Background()); err == nil || err.Error() != `minimum index cannot be larger than maximum index` { + t.Fatalf("unexpected error: %#v", err) + } + }) + + // Ensure downloader returns error if parallelism field is invalid. + t.Run("ErrParallelismRequired", func(t *testing.T) { + d := litestream.NewWALDownloader(litestream.NewFileReplicaClient(t.TempDir()), t.TempDir(), "0000000000000000", 0, 0) + d.Parallelism = -1 + defer d.Close() + if _, _, err := d.Next(context.Background()); err == nil || err.Error() != `parallelism must be at least one` { + t.Fatalf("unexpected error: %#v", err) + } + }) + + // Ensure a missing index at the beginning returns an error. + t.Run("ErrMissingInitialIndex", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "missing-initial-index") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 0, 2) + defer d.Close() + + var e *litestream.WALNotFoundError + if _, _, err := d.Next(context.Background()); !errors.As(err, &e) { + t.Fatalf("unexpected error type: %#v", err) + } else if *e != (litestream.WALNotFoundError{Generation: "0000000000000000", Index: 0}) { + t.Fatalf("unexpected error: %#v", err) + } + }) + + // Ensure a gap in indicies returns an error. + t.Run("ErrMissingMiddleIndex", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "missing-middle-index") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 0, 2) + defer d.Close() + + var e *litestream.WALNotFoundError + if _, _, err := d.Next(context.Background()); !errors.As(err, &e) { + t.Fatalf("unexpected error type: %#v", err) + } else if *e != (litestream.WALNotFoundError{Generation: "0000000000000000", Index: 1}) { + t.Fatalf("unexpected error: %#v", err) + } + }) + + // Ensure a missing index at the end returns an error. + t.Run("ErrMissingEndingIndex", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "missing-ending-index") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 0, 2) + defer d.Close() + + var e *litestream.WALNotFoundError + if _, _, err := d.Next(context.Background()); !errors.As(err, &e) { + t.Fatalf("unexpected error type: %#v", err) + } else if *e != (litestream.WALNotFoundError{Generation: "0000000000000000", Index: 2}) { + t.Fatalf("unexpected error: %#v", err) + } + }) + + // Ensure downloader returns error WAL segment iterator creation returns error. + t.Run("ErrWALSegments", func(t *testing.T) { + var client mock.ReplicaClient + client.WALSegmentsFunc = func(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) { + return nil, errors.New("marker") + } + + d := litestream.NewWALDownloader(&client, filepath.Join(t.TempDir(), "wal"), "0000000000000000", 0, 2) + defer d.Close() + + if _, _, err := d.Next(context.Background()); err == nil || err.Error() != `wal segments: marker` { + t.Fatalf("unexpected error: %#v", err) + } + }) + + // Ensure downloader returns error if WAL segments have a gap in offsets. + t.Run("ErrMissingOffset", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "missing-offset") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 0, 0) + defer d.Close() + + if _, _, err := d.Next(context.Background()); err == nil || err.Error() != `missing WAL offset: generation=0000000000000000 index=00000000 offset=00002050` { + t.Fatal(err) + } else if err := d.Close(); err != nil { + t.Fatal(err) + } + }) + + // Ensure downloader returns error if context is canceled. + t.Run("ErrContextCanceled", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "wal-downloader", "ok")) + d := litestream.NewWALDownloader(client, filepath.Join(t.TempDir(), "wal"), "0000000000000000", 0, 2) + defer d.Close() + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + if _, _, err := d.Next(ctx); err != context.Canceled { + t.Fatal(err) + } else if err := d.Close(); err != nil { + t.Fatal(err) + } + }) + + // Ensure downloader returns error if error occurs while writing WAL to disk. + t.Run("ErrWriteWAL", func(t *testing.T) { + // Create a subdirectory that is not writable. + tempDir := t.TempDir() + if err := os.Mkdir(filepath.Join(tempDir, "nowrite"), 0000); err != nil { + t.Fatal(err) + } + + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "wal-downloader", "err-write-wal")) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "nowrite", "wal"), "0000000000000000", 0, 0) + defer d.Close() + + if _, _, err := d.Next(context.Background()); err == nil || !strings.Contains(err.Error(), `permission denied`) { + t.Fatal(err) + } else if err := d.Close(); err != nil { + t.Fatal(err) + } + }) + + // Ensure downloader returns error if error occurs while downloading WAL. + t.Run("ErrDownloadWAL", func(t *testing.T) { + fileClient := litestream.NewFileReplicaClient(filepath.Join("testdata", "wal-downloader", "err-download-wal")) + + var client mock.ReplicaClient + client.WALSegmentsFunc = fileClient.WALSegments + client.WALSegmentReaderFunc = func(ctx context.Context, pos litestream.Pos) (io.ReadCloser, error) { + return nil, fmt.Errorf("marker") + } + + d := litestream.NewWALDownloader(&client, filepath.Join(t.TempDir(), "wal"), "0000000000000000", 0, 0) + defer d.Close() + + if _, _, err := d.Next(context.Background()); err == nil || err.Error() != `read WAL segment: marker` { + t.Fatalf("unexpected error: %#v", err) + } else if err := d.Close(); err != nil { + t.Fatal(err) + } + }) + + // Ensure downloader returns error if reading the segment fails. + t.Run("ErrReadWALSegment", func(t *testing.T) { + fileClient := litestream.NewFileReplicaClient(filepath.Join("testdata", "wal-downloader", "err-read-wal-segment")) + + var client mock.ReplicaClient + client.WALSegmentsFunc = fileClient.WALSegments + client.WALSegmentReaderFunc = func(ctx context.Context, pos litestream.Pos) (io.ReadCloser, error) { + var rc mock.ReadCloser + rc.ReadFunc = func([]byte) (int, error) { return 0, errors.New("marker") } + rc.CloseFunc = func() error { return nil } + return &rc, nil + } + + d := litestream.NewWALDownloader(&client, filepath.Join(t.TempDir(), "wal"), "0000000000000000", 0, 0) + defer d.Close() + + if _, _, err := d.Next(context.Background()); err == nil || err.Error() != `copy WAL segment: marker` { + t.Fatalf("unexpected error: %#v", err) + } else if err := d.Close(); err != nil { + t.Fatal(err) + } + }) +} + +func TestWALNotFoundError(t *testing.T) { + err := &litestream.WALNotFoundError{Generation: "0123456789abcdef", Index: 1000} + if got, want := err.Error(), `wal not found: generation=0123456789abcdef index=000003e8`; got != want { + t.Fatalf("Error()=%q, want %q", got, want) + } +} From f308e0b154e76b1571d63c0f063291009b76ac9a Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Tue, 11 Jan 2022 13:05:14 -0700 Subject: [PATCH 12/95] CLI test coverage --- Makefile | 3 +- cmd/litestream/Makefile | 6 + cmd/litestream/databases.go | 25 +- cmd/litestream/databases_test.go | 66 ++++ cmd/litestream/generations.go | 130 +++---- cmd/litestream/generations_test.go | 140 ++++++++ cmd/litestream/main.go | 107 ++++-- cmd/litestream/main_test.go | 16 + cmd/litestream/main_windows.go | 3 +- cmd/litestream/replicate.go | 14 +- cmd/litestream/replicate_test.go | 7 +- cmd/litestream/restore.go | 89 +++-- cmd/litestream/restore_test.go | 330 ++++++++++++++++++ cmd/litestream/snapshots.go | 120 +++---- cmd/litestream/snapshots_test.go | 128 +++++++ cmd/litestream/testdata/Makefile | 13 + .../databases/invalid-config/litestream.yml | 4 + .../testdata/databases/no-config/.gitignore | 0 .../databases/no-databases/litestream.yml | 1 + .../testdata/databases/no-databases/stdout | 1 + .../testdata/databases/ok/litestream.yml | 7 + cmd/litestream/testdata/databases/ok/stdout | 3 + .../database-not-found/litestream.yml | 2 + .../generations/invalid-config/litestream.yml | 4 + .../testdata/generations/no-database/Makefile | 4 + .../generations/no-database/litestream.yml | 4 + .../snapshots/00000000.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/00000000.snapshot.lz4 | Bin 0 -> 93 bytes .../testdata/generations/no-database/stdout | 3 + .../testdata/generations/ok/Makefile | 9 + cmd/litestream/testdata/generations/ok/db | 0 .../testdata/generations/ok/litestream.yml | 4 + .../testdata/generations/ok/replica/db | 0 .../snapshots/00000000.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/00000001.snapshot.lz4 | Bin 0 -> 93 bytes .../wal/00000000/00000000.wal.lz4 | Bin 0 -> 93 bytes .../wal/00000000/00000001.wal.lz4 | Bin 0 -> 93 bytes .../wal/00000001/00000000.wal.lz4 | Bin 0 -> 93 bytes .../snapshots/00000000.snapshot.lz4 | Bin 0 -> 93 bytes cmd/litestream/testdata/generations/ok/stdout | 3 + .../generations/replica-name/Makefile | 5 + .../testdata/generations/replica-name/db | 0 .../generations/replica-name/litestream.yml | 7 + .../generations/replica-name/replica0/db | 0 .../snapshots/00000000.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/00000001.snapshot.lz4 | Bin 0 -> 93 bytes .../wal/00000000/00000000.wal.lz4 | Bin 0 -> 93 bytes .../wal/00000000/00000001.wal.lz4 | Bin 0 -> 93 bytes .../wal/00000001/00000000.wal.lz4 | Bin 0 -> 93 bytes .../generations/replica-name/replica1/db | 0 .../snapshots/00000000.snapshot.lz4 | Bin 0 -> 93 bytes .../testdata/generations/replica-name/stdout | 2 + .../replica-not-found/litestream.yml | 4 + .../testdata/generations/replica-url/Makefile | 9 + .../generations/replica-url/litestream.yml | 4 + .../snapshots/00000000.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/00000001.snapshot.lz4 | Bin 0 -> 93 bytes .../wal/00000000/00000000.wal.lz4 | Bin 0 -> 93 bytes .../wal/00000000/00000001.wal.lz4 | Bin 0 -> 93 bytes .../wal/00000001/00000000.wal.lz4 | Bin 0 -> 93 bytes .../snapshots/00000000.snapshot.lz4 | Bin 0 -> 93 bytes .../testdata/generations/replica-url/stdout | 3 + .../restore/database-not-found/litestream.yml | 4 + .../generation-with-no-replica/litestream.yml | 5 + .../testdata/restore/if-db-not-exists-flag/db | Bin 0 -> 8192 bytes .../if-db-not-exists-flag/litestream.yml | 4 + .../restore/if-db-not-exists-flag/stdout | 1 + .../if-replica-exists-flag/litestream.yml | 4 + .../restore/if-replica-exists-flag/stdout | 1 + .../restore/invalid-config/litestream.yml | 4 + .../testdata/restore/latest-replica/Makefile | 6 + .../restore/latest-replica/litestream.yml | 7 + .../snapshots/00000000.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/00000000.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/00000000.snapshot.lz4 | Bin 0 -> 93 bytes .../restore/no-backups/litestream.yml | 4 + .../testdata/restore/no-backups/stderr | 0 .../testdata/restore/no-backups/stdout | 0 .../restore/no-generation/litestream.yml | 4 + .../restore/no-replicas/litestream.yml | 2 + .../restore/no-snapshots/litestream.yml | 4 + .../testdata/restore/ok/00000002.db | Bin 0 -> 8192 bytes cmd/litestream/testdata/restore/ok/README | 36 ++ .../testdata/restore/ok/litestream.yml | 4 + .../snapshots/00000000.snapshot.lz4 | Bin 0 -> 93 bytes .../wal/00000000/00000000.wal.lz4 | Bin 0 -> 249 bytes .../wal/00000000/00002050.wal.lz4 | Bin 0 -> 90 bytes .../wal/00000000/00003068.wal.lz4 | Bin 0 -> 94 bytes .../wal/00000001/00000000.wal.lz4 | Bin 0 -> 128 bytes .../wal/00000002/00000000.wal.lz4 | Bin 0 -> 125 bytes .../wal/00000002/00001038.wal.lz4 | Bin 0 -> 108 bytes .../testdata/restore/output-path-exists/db | Bin 0 -> 8192 bytes .../restore/output-path-exists/litestream.yml | 4 + .../restore/replica-name/litestream.yml | 7 + .../snapshots/00000000.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/00000001.snapshot.lz4 | Bin 0 -> 93 bytes .../restore/replica-not-found/litestream.yml | 5 + .../snapshots/00000000.snapshot.lz4 | Bin 0 -> 93 bytes .../database-not-found/litestream.yml | 2 + .../snapshots/invalid-config/litestream.yml | 4 + cmd/litestream/testdata/snapshots/ok/Makefile | 6 + .../testdata/snapshots/ok/litestream.yml | 4 + .../snapshots/00000000.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/00000001.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/00000000.snapshot.lz4 | Bin 0 -> 93 bytes cmd/litestream/testdata/snapshots/ok/stdout | 4 + .../testdata/snapshots/replica-name/Makefile | 4 + .../snapshots/replica-name/litestream.yml | 7 + .../snapshots/00000000.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/00000001.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/00000000.snapshot.lz4 | Bin 0 -> 93 bytes .../testdata/snapshots/replica-name/stdout | 2 + .../replica-not-found/litestream.yml | 4 + .../testdata/snapshots/replica-url/Makefile | 5 + .../snapshots/00000000.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/00000001.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/00000000.snapshot.lz4 | Bin 0 -> 93 bytes .../testdata/snapshots/replica-url/stdout | 4 + .../wal/database-not-found/litestream.yml | 2 + .../wal/invalid-config/litestream.yml | 4 + cmd/litestream/testdata/wal/ok/Makefile | 7 + cmd/litestream/testdata/wal/ok/litestream.yml | 4 + .../wal/00000000/00000000.wal.lz4 | Bin 0 -> 93 bytes .../wal/00000000/00000001.wal.lz4 | Bin 0 -> 93 bytes .../wal/00000001/00000000.wal.lz4 | Bin 0 -> 93 bytes .../wal/00000000/00000000.wal.lz4 | Bin 0 -> 93 bytes cmd/litestream/testdata/wal/ok/stdout | 5 + .../testdata/wal/replica-name/Makefile | 6 + .../testdata/wal/replica-name/litestream.yml | 7 + .../wal/00000000/00000000.wal.lz4 | Bin 0 -> 93 bytes .../wal/00000000/00000001.wal.lz4 | Bin 0 -> 93 bytes .../wal/00000001/00000000.wal.lz4 | Bin 0 -> 93 bytes .../wal/00000000/00000000.wal.lz4 | Bin 0 -> 93 bytes .../testdata/wal/replica-name/stdout | 2 + .../wal/replica-not-found/litestream.yml | 4 + .../testdata/wal/replica-url/Makefile | 7 + .../wal/00000000/00000000.wal.lz4 | Bin 0 -> 93 bytes .../wal/00000000/00000001.wal.lz4 | Bin 0 -> 93 bytes .../wal/00000001/00000000.wal.lz4 | Bin 0 -> 93 bytes .../wal/00000000/00000000.wal.lz4 | Bin 0 -> 93 bytes .../testdata/wal/replica-url/stdout | 5 + cmd/litestream/version.go | 20 +- cmd/litestream/wal.go | 132 +++---- cmd/litestream/wal_test.go | 128 +++++++ internal/internal.go | 28 ++ internal/internal_test.go | 39 +++ internal/testingutil/testingutil.go | 43 +++ testdata/find-latest-generation/ok/Makefile | 10 +- testdata/generation-time-bounds/ok/Makefile | 10 +- .../snapshots-only/Makefile | 4 +- testdata/max-snapshot-index/ok/Makefile | 8 +- .../replica-client-time-bounds/ok/Makefile | 8 +- testdata/snapshot-time-bounds/ok/Makefile | 6 +- testdata/wal-time-bounds/ok/Makefile | 6 +- 154 files changed, 1617 insertions(+), 310 deletions(-) create mode 100644 cmd/litestream/Makefile create mode 100644 cmd/litestream/databases_test.go create mode 100644 cmd/litestream/generations_test.go create mode 100644 cmd/litestream/restore_test.go create mode 100644 cmd/litestream/snapshots_test.go create mode 100644 cmd/litestream/testdata/Makefile create mode 100644 cmd/litestream/testdata/databases/invalid-config/litestream.yml create mode 100644 cmd/litestream/testdata/databases/no-config/.gitignore create mode 100644 cmd/litestream/testdata/databases/no-databases/litestream.yml create mode 100644 cmd/litestream/testdata/databases/no-databases/stdout create mode 100644 cmd/litestream/testdata/databases/ok/litestream.yml create mode 100644 cmd/litestream/testdata/databases/ok/stdout create mode 100644 cmd/litestream/testdata/generations/database-not-found/litestream.yml create mode 100644 cmd/litestream/testdata/generations/invalid-config/litestream.yml create mode 100644 cmd/litestream/testdata/generations/no-database/Makefile create mode 100644 cmd/litestream/testdata/generations/no-database/litestream.yml create mode 100644 cmd/litestream/testdata/generations/no-database/replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 create mode 100644 cmd/litestream/testdata/generations/no-database/replica/generations/0000000000000001/snapshots/00000000.snapshot.lz4 create mode 100644 cmd/litestream/testdata/generations/no-database/stdout create mode 100644 cmd/litestream/testdata/generations/ok/Makefile create mode 100644 cmd/litestream/testdata/generations/ok/db create mode 100644 cmd/litestream/testdata/generations/ok/litestream.yml create mode 100644 cmd/litestream/testdata/generations/ok/replica/db create mode 100644 cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 create mode 100644 cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/snapshots/00000001.snapshot.lz4 create mode 100644 cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/wal/00000000/00000000.wal.lz4 create mode 100644 cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/wal/00000000/00000001.wal.lz4 create mode 100644 cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/wal/00000001/00000000.wal.lz4 create mode 100644 cmd/litestream/testdata/generations/ok/replica/generations/0000000000000001/snapshots/00000000.snapshot.lz4 create mode 100644 cmd/litestream/testdata/generations/ok/stdout create mode 100644 cmd/litestream/testdata/generations/replica-name/Makefile create mode 100644 cmd/litestream/testdata/generations/replica-name/db create mode 100644 cmd/litestream/testdata/generations/replica-name/litestream.yml create mode 100644 cmd/litestream/testdata/generations/replica-name/replica0/db create mode 100644 cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/snapshots/00000000.snapshot.lz4 create mode 100644 cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/snapshots/00000001.snapshot.lz4 create mode 100644 cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/wal/00000000/00000000.wal.lz4 create mode 100644 cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/wal/00000000/00000001.wal.lz4 create mode 100644 cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/wal/00000001/00000000.wal.lz4 create mode 100644 cmd/litestream/testdata/generations/replica-name/replica1/db create mode 100644 cmd/litestream/testdata/generations/replica-name/replica1/generations/0000000000000001/snapshots/00000000.snapshot.lz4 create mode 100644 cmd/litestream/testdata/generations/replica-name/stdout create mode 100644 cmd/litestream/testdata/generations/replica-not-found/litestream.yml create mode 100644 cmd/litestream/testdata/generations/replica-url/Makefile create mode 100644 cmd/litestream/testdata/generations/replica-url/litestream.yml create mode 100644 cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 create mode 100644 cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/snapshots/00000001.snapshot.lz4 create mode 100644 cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/wal/00000000/00000000.wal.lz4 create mode 100644 cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/wal/00000000/00000001.wal.lz4 create mode 100644 cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/wal/00000001/00000000.wal.lz4 create mode 100644 cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000001/snapshots/00000000.snapshot.lz4 create mode 100644 cmd/litestream/testdata/generations/replica-url/stdout create mode 100644 cmd/litestream/testdata/restore/database-not-found/litestream.yml create mode 100644 cmd/litestream/testdata/restore/generation-with-no-replica/litestream.yml create mode 100644 cmd/litestream/testdata/restore/if-db-not-exists-flag/db create mode 100644 cmd/litestream/testdata/restore/if-db-not-exists-flag/litestream.yml create mode 100644 cmd/litestream/testdata/restore/if-db-not-exists-flag/stdout create mode 100644 cmd/litestream/testdata/restore/if-replica-exists-flag/litestream.yml create mode 100644 cmd/litestream/testdata/restore/if-replica-exists-flag/stdout create mode 100644 cmd/litestream/testdata/restore/invalid-config/litestream.yml create mode 100644 cmd/litestream/testdata/restore/latest-replica/Makefile create mode 100644 cmd/litestream/testdata/restore/latest-replica/litestream.yml create mode 100644 cmd/litestream/testdata/restore/latest-replica/replica0/generations/0000000000000000/snapshots/00000000.snapshot.lz4 create mode 100644 cmd/litestream/testdata/restore/latest-replica/replica0/generations/0000000000000001/snapshots/00000000.snapshot.lz4 create mode 100644 cmd/litestream/testdata/restore/latest-replica/replica1/generations/0000000000000002/snapshots/00000000.snapshot.lz4 create mode 100644 cmd/litestream/testdata/restore/no-backups/litestream.yml create mode 100644 cmd/litestream/testdata/restore/no-backups/stderr create mode 100644 cmd/litestream/testdata/restore/no-backups/stdout create mode 100644 cmd/litestream/testdata/restore/no-generation/litestream.yml create mode 100644 cmd/litestream/testdata/restore/no-replicas/litestream.yml create mode 100644 cmd/litestream/testdata/restore/no-snapshots/litestream.yml create mode 100644 cmd/litestream/testdata/restore/ok/00000002.db create mode 100644 cmd/litestream/testdata/restore/ok/README create mode 100644 cmd/litestream/testdata/restore/ok/litestream.yml create mode 100644 cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 create mode 100644 cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/00000000/00000000.wal.lz4 create mode 100644 cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/00000000/00002050.wal.lz4 create mode 100644 cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/00000000/00003068.wal.lz4 create mode 100644 cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/00000001/00000000.wal.lz4 create mode 100644 cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/00000002/00000000.wal.lz4 create mode 100644 cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/00000002/00001038.wal.lz4 create mode 100644 cmd/litestream/testdata/restore/output-path-exists/db create mode 100644 cmd/litestream/testdata/restore/output-path-exists/litestream.yml create mode 100644 cmd/litestream/testdata/restore/replica-name/litestream.yml create mode 100644 cmd/litestream/testdata/restore/replica-name/replica0/generations/0000000000000000/snapshots/00000000.snapshot.lz4 create mode 100644 cmd/litestream/testdata/restore/replica-name/replica1/generations/0000000000000001/snapshots/00000001.snapshot.lz4 create mode 100644 cmd/litestream/testdata/restore/replica-not-found/litestream.yml create mode 100644 cmd/litestream/testdata/restore/replica-url/replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 create mode 100644 cmd/litestream/testdata/snapshots/database-not-found/litestream.yml create mode 100644 cmd/litestream/testdata/snapshots/invalid-config/litestream.yml create mode 100644 cmd/litestream/testdata/snapshots/ok/Makefile create mode 100644 cmd/litestream/testdata/snapshots/ok/litestream.yml create mode 100644 cmd/litestream/testdata/snapshots/ok/replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 create mode 100644 cmd/litestream/testdata/snapshots/ok/replica/generations/0000000000000000/snapshots/00000001.snapshot.lz4 create mode 100644 cmd/litestream/testdata/snapshots/ok/replica/generations/0000000000000001/snapshots/00000000.snapshot.lz4 create mode 100644 cmd/litestream/testdata/snapshots/ok/stdout create mode 100644 cmd/litestream/testdata/snapshots/replica-name/Makefile create mode 100644 cmd/litestream/testdata/snapshots/replica-name/litestream.yml create mode 100644 cmd/litestream/testdata/snapshots/replica-name/replica0/generations/0000000000000000/snapshots/00000000.snapshot.lz4 create mode 100644 cmd/litestream/testdata/snapshots/replica-name/replica0/generations/0000000000000000/snapshots/00000001.snapshot.lz4 create mode 100644 cmd/litestream/testdata/snapshots/replica-name/replica1/generations/0000000000000001/snapshots/00000000.snapshot.lz4 create mode 100644 cmd/litestream/testdata/snapshots/replica-name/stdout create mode 100644 cmd/litestream/testdata/snapshots/replica-not-found/litestream.yml create mode 100644 cmd/litestream/testdata/snapshots/replica-url/Makefile create mode 100644 cmd/litestream/testdata/snapshots/replica-url/replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 create mode 100644 cmd/litestream/testdata/snapshots/replica-url/replica/generations/0000000000000000/snapshots/00000001.snapshot.lz4 create mode 100644 cmd/litestream/testdata/snapshots/replica-url/replica/generations/0000000000000001/snapshots/00000000.snapshot.lz4 create mode 100644 cmd/litestream/testdata/snapshots/replica-url/stdout create mode 100644 cmd/litestream/testdata/wal/database-not-found/litestream.yml create mode 100644 cmd/litestream/testdata/wal/invalid-config/litestream.yml create mode 100644 cmd/litestream/testdata/wal/ok/Makefile create mode 100644 cmd/litestream/testdata/wal/ok/litestream.yml create mode 100644 cmd/litestream/testdata/wal/ok/replica/generations/0000000000000000/wal/00000000/00000000.wal.lz4 create mode 100644 cmd/litestream/testdata/wal/ok/replica/generations/0000000000000000/wal/00000000/00000001.wal.lz4 create mode 100644 cmd/litestream/testdata/wal/ok/replica/generations/0000000000000000/wal/00000001/00000000.wal.lz4 create mode 100644 cmd/litestream/testdata/wal/ok/replica/generations/0000000000000001/wal/00000000/00000000.wal.lz4 create mode 100644 cmd/litestream/testdata/wal/ok/stdout create mode 100644 cmd/litestream/testdata/wal/replica-name/Makefile create mode 100644 cmd/litestream/testdata/wal/replica-name/litestream.yml create mode 100644 cmd/litestream/testdata/wal/replica-name/replica0/generations/0000000000000000/wal/00000000/00000000.wal.lz4 create mode 100644 cmd/litestream/testdata/wal/replica-name/replica0/generations/0000000000000000/wal/00000000/00000001.wal.lz4 create mode 100644 cmd/litestream/testdata/wal/replica-name/replica0/generations/0000000000000000/wal/00000001/00000000.wal.lz4 create mode 100644 cmd/litestream/testdata/wal/replica-name/replica1/generations/0000000000000001/wal/00000000/00000000.wal.lz4 create mode 100644 cmd/litestream/testdata/wal/replica-name/stdout create mode 100644 cmd/litestream/testdata/wal/replica-not-found/litestream.yml create mode 100644 cmd/litestream/testdata/wal/replica-url/Makefile create mode 100644 cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000000/wal/00000000/00000000.wal.lz4 create mode 100644 cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000000/wal/00000000/00000001.wal.lz4 create mode 100644 cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000000/wal/00000001/00000000.wal.lz4 create mode 100644 cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000001/wal/00000000/00000000.wal.lz4 create mode 100644 cmd/litestream/testdata/wal/replica-url/stdout create mode 100644 cmd/litestream/wal_test.go create mode 100644 internal/testingutil/testingutil.go diff --git a/Makefile b/Makefile index 70d3709a..598eddd2 100644 --- a/Makefile +++ b/Makefile @@ -1,9 +1,10 @@ .PHONY: default -default: testdata +default: .PHONY: testdata testdata: make -C testdata + make -C cmd/litestream testdata docker: docker build -t litestream . diff --git a/cmd/litestream/Makefile b/cmd/litestream/Makefile new file mode 100644 index 00000000..40738583 --- /dev/null +++ b/cmd/litestream/Makefile @@ -0,0 +1,6 @@ +.PHONY: default +default: + +.PHONY: testdata +testdata: + make -C testdata diff --git a/cmd/litestream/databases.go b/cmd/litestream/databases.go index dd7747c5..a9e99aef 100644 --- a/cmd/litestream/databases.go +++ b/cmd/litestream/databases.go @@ -4,17 +4,30 @@ import ( "context" "flag" "fmt" - "os" + "io" "strings" "text/tabwriter" ) // DatabasesCommand is a command for listing managed databases. type DatabasesCommand struct { + stdin io.Reader + stdout io.Writer + stderr io.Writer + configPath string noExpandEnv bool } +// NewDatabasesCommand returns a new instance of DatabasesCommand. +func NewDatabasesCommand(stdin io.Reader, stdout, stderr io.Writer) *DatabasesCommand { + return &DatabasesCommand{ + stdin: stdin, + stdout: stdout, + stderr: stderr, + } +} + // Run executes the command. func (c *DatabasesCommand) Run(ctx context.Context, args []string) (err error) { fs := flag.NewFlagSet("litestream-databases", flag.ContinueOnError) @@ -27,16 +40,16 @@ func (c *DatabasesCommand) Run(ctx context.Context, args []string) (err error) { } // Load configuration. - if c.configPath == "" { - c.configPath = DefaultConfigPath() - } config, err := ReadConfigFile(c.configPath, !c.noExpandEnv) if err != nil { return err + } else if len(config.DBs) == 0 { + fmt.Fprintln(c.stdout, "No databases found in config file.") + return nil } // List all databases. - w := tabwriter.NewWriter(os.Stdout, 0, 8, 2, ' ', 0) + w := tabwriter.NewWriter(c.stdout, 0, 8, 2, ' ', 0) defer w.Flush() fmt.Fprintln(w, "path\treplicas") @@ -62,7 +75,7 @@ func (c *DatabasesCommand) Run(ctx context.Context, args []string) (err error) { // Usage prints the help screen to STDOUT. func (c *DatabasesCommand) Usage() { - fmt.Printf(` + fmt.Fprintf(c.stdout, ` The databases command lists all databases in the configuration file. Usage: diff --git a/cmd/litestream/databases_test.go b/cmd/litestream/databases_test.go new file mode 100644 index 00000000..9499dc67 --- /dev/null +++ b/cmd/litestream/databases_test.go @@ -0,0 +1,66 @@ +package main_test + +import ( + "context" + "flag" + "path/filepath" + "strings" + "testing" + + "github.com/benbjohnson/litestream/internal/testingutil" +) + +func TestDatabasesCommand(t *testing.T) { + t.Run("OK", func(t *testing.T) { + testDir := filepath.Join("testdata", "databases", "ok") + m, _, stdout, _ := newMain() + if err := m.Run(context.Background(), []string{"databases", "-config", filepath.Join(testDir, "litestream.yml")}); err != nil { + t.Fatal(err) + } else if got, want := stdout.String(), string(testingutil.MustReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + t.Fatalf("stdout=%q, want %q", got, want) + } + }) + + t.Run("NoDatabases", func(t *testing.T) { + testDir := filepath.Join("testdata", "databases", "no-databases") + m, _, stdout, _ := newMain() + if err := m.Run(context.Background(), []string{"databases", "-config", filepath.Join(testDir, "litestream.yml")}); err != nil { + t.Fatal(err) + } else if got, want := stdout.String(), string(testingutil.MustReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + t.Fatalf("stdout=%q, want %q", got, want) + } + }) + + t.Run("ErrConfigNotFound", func(t *testing.T) { + testDir := filepath.Join("testdata", "databases", "no-config") + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"databases", "-config", filepath.Join(testDir, "litestream.yml")}) + if err == nil || !strings.Contains(err.Error(), `config file not found:`) { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrInvalidConfig", func(t *testing.T) { + testDir := filepath.Join("testdata", "databases", "invalid-config") + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"databases", "-config", filepath.Join(testDir, "litestream.yml")}) + if err == nil || !strings.Contains(err.Error(), `replica path cannot be a url`) { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrTooManyArguments", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"databases", "xyz"}) + if err == nil || err.Error() != `too many arguments` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("Usage", func(t *testing.T) { + m, _, _, _ := newMain() + if err := m.Run(context.Background(), []string{"databases", "-h"}); err != flag.ErrHelp { + t.Fatalf("unexpected error: %s", err) + } + }) +} diff --git a/cmd/litestream/generations.go b/cmd/litestream/generations.go index e4f9fafd..da740998 100644 --- a/cmd/litestream/generations.go +++ b/cmd/litestream/generations.go @@ -4,93 +4,80 @@ import ( "context" "flag" "fmt" - "log" + "io" "os" "text/tabwriter" "time" "github.com/benbjohnson/litestream" + "github.com/benbjohnson/litestream/internal" ) // GenerationsCommand represents a command to list all generations for a database. type GenerationsCommand struct { + stdin io.Reader + stdout io.Writer + stderr io.Writer + configPath string noExpandEnv bool + + replicaName string +} + +// NewGenerationsCommand returns a new instance of GenerationsCommand. +func NewGenerationsCommand(stdin io.Reader, stdout, stderr io.Writer) *GenerationsCommand { + return &GenerationsCommand{ + stdin: stdin, + stdout: stdout, + stderr: stderr, + } } // Run executes the command. -func (c *GenerationsCommand) Run(ctx context.Context, args []string) (err error) { +func (c *GenerationsCommand) Run(ctx context.Context, args []string) (ret error) { fs := flag.NewFlagSet("litestream-generations", flag.ContinueOnError) registerConfigFlag(fs, &c.configPath, &c.noExpandEnv) - replicaName := fs.String("replica", "", "replica name") + fs.StringVar(&c.replicaName, "replica", "", "replica name") fs.Usage = c.Usage if err := fs.Parse(args); err != nil { return err - } else if fs.NArg() == 0 || fs.Arg(0) == "" { + } else if fs.Arg(0) == "" { return fmt.Errorf("database path or replica URL required") } else if fs.NArg() > 1 { return fmt.Errorf("too many arguments") } - var db *litestream.DB - var r *litestream.Replica - dbUpdatedAt := time.Now() - if isURL(fs.Arg(0)) { - if c.configPath != "" { - return fmt.Errorf("cannot specify a replica URL and the -config flag") - } - if r, err = NewReplicaFromConfig(&ReplicaConfig{URL: fs.Arg(0)}, nil); err != nil { - return err - } - } else { - if c.configPath == "" { - c.configPath = DefaultConfigPath() - } - - // Load configuration. - config, err := ReadConfigFile(c.configPath, !c.noExpandEnv) - if err != nil { - return err - } - - // Lookup database from configuration file by path. - if path, err := expand(fs.Arg(0)); err != nil { - return err - } else if dbc := config.DBConfig(path); dbc == nil { - return fmt.Errorf("database not found in config: %s", path) - } else if db, err = NewDBFromConfig(dbc); err != nil { - return err - } + // Load configuration. + config, err := ReadConfigFile(c.configPath, !c.noExpandEnv) + if err != nil { + return err + } - // Filter by replica, if specified. - if *replicaName != "" { - if r = db.Replica(*replicaName); r == nil { - return fmt.Errorf("replica %q not found for database %q", *replicaName, db.Path()) - } - } + replicas, db, err := loadReplicas(ctx, config, fs.Arg(0), c.replicaName) + if err != nil { + return err + } - // Determine last time database or WAL was updated. - if dbUpdatedAt, err = db.UpdatedAt(); err != nil { + // Determine last time database or WAL was updated. + var dbUpdatedAt time.Time + if db != nil { + if dbUpdatedAt, err = db.UpdatedAt(); err != nil && !os.IsNotExist(err) { return err } } - var replicas []*litestream.Replica - if r != nil { - replicas = []*litestream.Replica{r} - } else { - replicas = db.Replicas - } - // List each generation. - w := tabwriter.NewWriter(os.Stdout, 0, 8, 2, ' ', 0) + w := tabwriter.NewWriter(c.stdout, 0, 8, 2, ' ', 0) defer w.Flush() fmt.Fprintln(w, "name\tgeneration\tlag\tstart\tend") + for _, r := range replicas { generations, err := r.Client.Generations(ctx) if err != nil { - log.Printf("%s: cannot list generations: %s", r.Name(), err) + fmt.Fprintf(c.stderr, "%s: cannot list generations: %s", r.Name(), err) + ret = errExit // signal error return without printing message continue } @@ -98,26 +85,35 @@ func (c *GenerationsCommand) Run(ctx context.Context, args []string) (err error) for _, generation := range generations { createdAt, updatedAt, err := litestream.GenerationTimeBounds(ctx, r.Client, generation) if err != nil { - log.Printf("%s: cannot determine generation time bounds: %s", r.Name(), err) + fmt.Fprintf(c.stderr, "%s: cannot determine generation time bounds: %s", r.Name(), err) + ret = errExit // signal error return without printing message continue } + // Calculate lag from database mod time to the replica mod time. + // This is ignored if the database mod time is unavailable such as + // when specifying the replica URL or if the database file is missing. + lag := "-" + if !dbUpdatedAt.IsZero() { + lag = internal.TruncateDuration(dbUpdatedAt.Sub(updatedAt)).String() + } + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", r.Name(), generation, - truncateDuration(dbUpdatedAt.Sub(updatedAt)).String(), + lag, createdAt.Format(time.RFC3339), updatedAt.Format(time.RFC3339), ) } } - return nil + return ret } // Usage prints the help message to STDOUT. func (c *GenerationsCommand) Usage() { - fmt.Printf(` + fmt.Fprintf(c.stdout, ` The generations command lists all generations for a database or replica. It also lists stats about their lag behind the primary database and the time range they cover. @@ -144,29 +140,3 @@ Arguments: DefaultConfigPath(), ) } - -func truncateDuration(d time.Duration) time.Duration { - if d < 0 { - if d < -10*time.Second { - return d.Truncate(time.Second) - } else if d < -time.Second { - return d.Truncate(time.Second / 10) - } else if d < -time.Millisecond { - return d.Truncate(time.Millisecond) - } else if d < -time.Microsecond { - return d.Truncate(time.Microsecond) - } - return d - } - - if d > 10*time.Second { - return d.Truncate(time.Second) - } else if d > time.Second { - return d.Truncate(time.Second / 10) - } else if d > time.Millisecond { - return d.Truncate(time.Millisecond) - } else if d > time.Microsecond { - return d.Truncate(time.Microsecond) - } - return d -} diff --git a/cmd/litestream/generations_test.go b/cmd/litestream/generations_test.go new file mode 100644 index 00000000..097bd35b --- /dev/null +++ b/cmd/litestream/generations_test.go @@ -0,0 +1,140 @@ +package main_test + +import ( + "context" + "flag" + "path/filepath" + "strings" + "testing" + + "github.com/benbjohnson/litestream/internal/testingutil" +) + +func TestGenerationsCommand(t *testing.T) { + t.Run("OK", func(t *testing.T) { + testDir := filepath.Join("testdata", "generations", "ok") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + + m, _, stdout, _ := newMain() + if err := m.Run(context.Background(), []string{"generations", "-config", filepath.Join(testDir, "litestream.yml"), filepath.Join(testDir, "db")}); err != nil { + t.Fatal(err) + } else if got, want := stdout.String(), string(testingutil.MustReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + t.Fatalf("stdout=%q, want %q", got, want) + } + }) + + t.Run("ReplicaName", func(t *testing.T) { + testDir := filepath.Join("testdata", "generations", "replica-name") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + + m, _, stdout, _ := newMain() + if err := m.Run(context.Background(), []string{"generations", "-config", filepath.Join(testDir, "litestream.yml"), "-replica", "replica1", filepath.Join(testDir, "db")}); err != nil { + t.Fatal(err) + } else if got, want := stdout.String(), string(testingutil.MustReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + t.Fatalf("stdout=%q, want %q", got, want) + } + }) + + t.Run("ReplicaURL", func(t *testing.T) { + testDir := filepath.Join(testingutil.Getwd(t), "testdata", "generations", "replica-url") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + replicaURL := "file://" + filepath.ToSlash(testDir) + "/replica" + + m, _, stdout, _ := newMain() + if err := m.Run(context.Background(), []string{"generations", replicaURL}); err != nil { + t.Fatal(err) + } else if got, want := stdout.String(), string(testingutil.MustReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + t.Fatalf("stdout=%q, want %q", got, want) + } + }) + + t.Run("NoDatabase", func(t *testing.T) { + testDir := filepath.Join("testdata", "generations", "no-database") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + + m, _, stdout, _ := newMain() + if err := m.Run(context.Background(), []string{"generations", "-config", filepath.Join(testDir, "litestream.yml"), filepath.Join(testDir, "db")}); err != nil { + t.Fatal(err) + } else if got, want := stdout.String(), string(testingutil.MustReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + t.Fatalf("stdout=%q, want %q", got, want) + } + }) + + t.Run("ErrDatabaseOrReplicaRequired", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"generations"}) + if err == nil || err.Error() != `database path or replica URL required` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrTooManyArguments", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"generations", "abc", "123"}) + if err == nil || err.Error() != `too many arguments` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrInvalidFlags", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"generations", "-no-such-flag"}) + if err == nil || err.Error() != `flag provided but not defined: -no-such-flag` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrConfigFileNotFound", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"generations", "-config", "/no/such/file", "/var/lib/db"}) + if err == nil || err.Error() != `config file not found: /no/such/file` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrInvalidConfig", func(t *testing.T) { + testDir := filepath.Join("testdata", "generations", "invalid-config") + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"generations", "-config", filepath.Join(testDir, "litestream.yml"), "/var/lib/db"}) + if err == nil || !strings.Contains(err.Error(), `replica path cannot be a url`) { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrDatabaseNotFound", func(t *testing.T) { + testDir := filepath.Join("testdata", "generations", "database-not-found") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"generations", "-config", filepath.Join(testDir, "litestream.yml"), "/no/such/db"}) + if err == nil || err.Error() != `database not found in config: /no/such/db` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrReplicaNotFound", func(t *testing.T) { + testDir := filepath.Join(testingutil.Getwd(t), "testdata", "generations", "replica-not-found") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"generations", "-config", filepath.Join(testDir, "litestream.yml"), "-replica", "no_such_replica", filepath.Join(testDir, "db")}) + if err == nil || err.Error() != `replica "no_such_replica" not found for database "`+filepath.Join(testDir, "db")+`"` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrInvalidReplicaURL", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"generations", "xyz://xyz"}) + if err == nil || !strings.Contains(err.Error(), `unknown replica type in config: "xyz"`) { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("Usage", func(t *testing.T) { + m, _, _, _ := newMain() + if err := m.Run(context.Background(), []string{"generations", "-h"}); err != flag.ErrHelp { + t.Fatalf("unexpected error: %s", err) + } + }) +} diff --git a/cmd/litestream/main.go b/cmd/litestream/main.go index 7f6f101a..176ec991 100644 --- a/cmd/litestream/main.go +++ b/cmd/litestream/main.go @@ -5,6 +5,7 @@ import ( "errors" "flag" "fmt" + "io" "io/ioutil" "log" "net/url" @@ -32,14 +33,14 @@ var ( Version = "(development build)" ) -// errStop is a terminal error for indicating program should quit. -var errStop = errors.New("stop") +// errExit is a terminal error for indicating program should quit. +var errExit = errors.New("exit") func main() { log.SetFlags(0) - m := NewMain() - if err := m.Run(context.Background(), os.Args[1:]); err == flag.ErrHelp || err == errStop { + m := NewMain(os.Stdin, os.Stdout, os.Stderr) + if err := m.Run(context.Background(), os.Args[1:]); err == flag.ErrHelp || err == errExit { os.Exit(1) } else if err != nil { log.Println(err) @@ -48,11 +49,19 @@ func main() { } // Main represents the main program execution. -type Main struct{} +type Main struct { + stdin io.Reader + stdout io.Writer + stderr io.Writer +} // NewMain returns a new instance of Main. -func NewMain() *Main { - return &Main{} +func NewMain(stdin io.Reader, stdout, stderr io.Writer) *Main { + return &Main{ + stdin: stdin, + stdout: stdout, + stderr: stderr, + } } // Run executes the program. @@ -75,11 +84,11 @@ func (m *Main) Run(ctx context.Context, args []string) (err error) { switch cmd { case "databases": - return (&DatabasesCommand{}).Run(ctx, args) + return NewDatabasesCommand(m.stdin, m.stdout, m.stderr).Run(ctx, args) case "generations": - return (&GenerationsCommand{}).Run(ctx, args) + return NewGenerationsCommand(m.stdin, m.stdout, m.stderr).Run(ctx, args) case "replicate": - c := NewReplicateCommand() + c := NewReplicateCommand(m.stdin, m.stdout, m.stderr) if err := c.ParseFlags(ctx, args); err != nil { return err } @@ -96,21 +105,21 @@ func (m *Main) Run(ctx context.Context, args []string) (err error) { // Wait for signal to stop program. select { case <-ctx.Done(): - fmt.Println("context done, litestream shutting down") + fmt.Fprintln(m.stdout, "context done, litestream shutting down") case err = <-c.execCh: cancel() - fmt.Println("subprocess exited, litestream shutting down") + fmt.Fprintln(m.stdout, "subprocess exited, litestream shutting down") case sig := <-signalCh: cancel() - fmt.Println("signal received, litestream shutting down") + fmt.Fprintln(m.stdout, "signal received, litestream shutting down") if c.cmd != nil { - fmt.Println("sending signal to exec process") + fmt.Fprintln(m.stdout, "sending signal to exec process") if err := c.cmd.Process.Signal(sig); err != nil { return fmt.Errorf("cannot signal exec process: %w", err) } - fmt.Println("waiting for exec process to close") + fmt.Fprintln(m.stdout, "waiting for exec process to close") if err := <-c.execCh; err != nil && !strings.HasPrefix(err.Error(), "signal:") { return fmt.Errorf("cannot wait for exec process: %w", err) } @@ -121,17 +130,17 @@ func (m *Main) Run(ctx context.Context, args []string) (err error) { if e := c.Close(); e != nil && err == nil { err = e } - fmt.Println("litestream shut down") + fmt.Fprintln(m.stdout, "litestream shut down") return err case "restore": - return NewRestoreCommand().Run(ctx, args) + return NewRestoreCommand(m.stdin, m.stdout, m.stderr).Run(ctx, args) case "snapshots": - return (&SnapshotsCommand{}).Run(ctx, args) + return NewSnapshotsCommand(m.stdin, m.stdout, m.stderr).Run(ctx, args) case "version": - return (&VersionCommand{}).Run(ctx, args) + return NewVersionCommand(m.stdin, m.stdout, m.stderr).Run(ctx, args) case "wal": - return (&WALCommand{}).Run(ctx, args) + return NewWALCommand(m.stdin, m.stdout, m.stderr).Run(ctx, args) default: if cmd == "" || cmd == "help" || strings.HasPrefix(cmd, "-") { m.Usage() @@ -143,7 +152,7 @@ func (m *Main) Run(ctx context.Context, args []string) (err error) { // Usage prints the help screen to STDOUT. func (m *Main) Usage() { - fmt.Println(` + fmt.Fprintln(m.stdout, ` litestream is a tool for replicating SQLite databases. Usage: @@ -210,9 +219,15 @@ func (c *Config) DBConfig(path string) *DBConfig { // ReadConfigFile unmarshals config from filename. Expands path if needed. // If expandEnv is true then environment variables are expanded in the config. +// If filename is blank then the default config path is used. func ReadConfigFile(filename string, expandEnv bool) (_ Config, err error) { config := DefaultConfig() + useDefaultPath := filename == "" + if useDefaultPath { + filename = DefaultConfigPath() + } + // Expand filename, if necessary. filename, err = expand(filename) if err != nil { @@ -220,8 +235,12 @@ func ReadConfigFile(filename string, expandEnv bool) (_ Config, err error) { } // Read configuration. + // Do not return an error if using default path and file is missing. buf, err := ioutil.ReadFile(filename) if os.IsNotExist(err) { + if useDefaultPath { + return config, nil + } return config, fmt.Errorf("config file not found: %s", filename) } else if err != nil { return config, err @@ -354,7 +373,7 @@ func NewReplicaFromConfig(c *ReplicaConfig, db *litestream.DB) (_ *litestream.Re } // Build and set client on replica. - switch c.ReplicaType() { + switch typ := c.ReplicaType(); typ { case "file": if r.Client, err = newFileReplicaClientFromConfig(c, r); err != nil { return nil, err @@ -376,7 +395,7 @@ func NewReplicaFromConfig(c *ReplicaConfig, db *litestream.DB) (_ *litestream.Re return nil, err } default: - return nil, fmt.Errorf("unknown replica type in config: %q", c.Type) + return nil, fmt.Errorf("unknown replica type in config: %q", typ) } return r, nil @@ -714,3 +733,45 @@ func (v *indexVar) Set(s string) error { *v = indexVar(i) return nil } + +// loadReplicas returns a list of replicas to use based on CLI flags. Filters +// by replicaName, if not blank. The DB is returned if pathOrURL is not a replica URL. +func loadReplicas(ctx context.Context, config Config, pathOrURL, replicaName string) ([]*litestream.Replica, *litestream.DB, error) { + // Build a replica based on URL, if specified. + if isURL(pathOrURL) { + r, err := NewReplicaFromConfig(&ReplicaConfig{ + URL: pathOrURL, + AccessKeyID: config.AccessKeyID, + SecretAccessKey: config.SecretAccessKey, + }, nil) + if err != nil { + return nil, nil, err + } + return []*litestream.Replica{r}, nil, nil + } + + // Otherwise use replicas from the database configuration file. + path, err := expand(pathOrURL) + if err != nil { + return nil, nil, err + } + dbc := config.DBConfig(path) + if dbc == nil { + return nil, nil, fmt.Errorf("database not found in config: %s", path) + } + db, err := NewDBFromConfig(dbc) + if err != nil { + return nil, nil, err + } + + // Filter by replica, if specified. + if replicaName != "" { + r := db.Replica(replicaName) + if r == nil { + return nil, nil, fmt.Errorf("replica %q not found for database %q", replicaName, db.Path()) + } + return []*litestream.Replica{r}, db, nil + } + + return db.Replicas, db, nil +} diff --git a/cmd/litestream/main_test.go b/cmd/litestream/main_test.go index 38860950..f3e9fb1b 100644 --- a/cmd/litestream/main_test.go +++ b/cmd/litestream/main_test.go @@ -1,6 +1,8 @@ package main_test import ( + "bytes" + "io" "io/ioutil" "log" "os" @@ -180,3 +182,17 @@ func TestNewGCSReplicaFromConfig(t *testing.T) { t.Fatalf("Path=%s, want %s", got, want) } } + +// newMain returns a new instance of Main and associated buffers. +func newMain() (m *main.Main, stdin, stdout, stderr *bytes.Buffer) { + stdin, stdout, stderr = &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{} + + // Split stdout/stderr to terminal if verbose flag set. + out, err := io.Writer(stdout), io.Writer(stderr) + if testing.Verbose() { + out = io.MultiWriter(out, os.Stdout) + err = io.MultiWriter(err, os.Stderr) + } + + return main.NewMain(stdin, out, err), stdin, stdout, stderr +} diff --git a/cmd/litestream/main_windows.go b/cmd/litestream/main_windows.go index 512ab263..d437c1bc 100644 --- a/cmd/litestream/main_windows.go +++ b/cmd/litestream/main_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package main @@ -41,7 +42,7 @@ func runWindowsService(ctx context.Context) error { log.Print("Litestream service starting") if err := svc.Run(serviceName, &windowsService{ctx: ctx}); err != nil { - return errStop + return errExit } log.Print("Litestream service stopped") diff --git a/cmd/litestream/replicate.go b/cmd/litestream/replicate.go index 3da238fd..5d32db74 100644 --- a/cmd/litestream/replicate.go +++ b/cmd/litestream/replicate.go @@ -4,6 +4,7 @@ import ( "context" "flag" "fmt" + "io" "log" "net" "net/http" @@ -22,6 +23,10 @@ import ( // ReplicateCommand represents a command that continuously replicates SQLite databases. type ReplicateCommand struct { + stdin io.Reader + stdout io.Writer + stderr io.Writer + configPath string noExpandEnv bool @@ -34,8 +39,13 @@ type ReplicateCommand struct { DBs []*litestream.DB } -func NewReplicateCommand() *ReplicateCommand { +// NewReplicateCommand returns a new instance of ReplicateCommand. +func NewReplicateCommand(stdin io.Reader, stdout, stderr io.Writer) *ReplicateCommand { return &ReplicateCommand{ + stdin: stdin, + stdout: stdout, + stderr: stderr, + execCh: make(chan error), } } @@ -181,7 +191,7 @@ func (c *ReplicateCommand) Close() (err error) { // Usage prints the help screen to STDOUT. func (c *ReplicateCommand) Usage() { - fmt.Printf(` + fmt.Fprintf(c.stdout, ` The replicate command starts a server to monitor & replicate databases. You can specify your database & replicas in a configuration file or you can replicate a single database file by specifying its path and its replicas in the diff --git a/cmd/litestream/replicate_test.go b/cmd/litestream/replicate_test.go index 47085808..7d85b04e 100644 --- a/cmd/litestream/replicate_test.go +++ b/cmd/litestream/replicate_test.go @@ -13,7 +13,6 @@ import ( "testing" "time" - main "github.com/benbjohnson/litestream/cmd/litestream" "golang.org/x/sync/errgroup" ) @@ -82,7 +81,8 @@ dbs: // Replicate database unless the context is canceled. g.Go(func() error { - return main.NewMain().Run(mainctx, []string{"replicate", "-config", configPath}) + m, _, _, _ := newMain() + return m.Run(mainctx, []string{"replicate", "-config", configPath}) }) if err := g.Wait(); err != nil { @@ -94,7 +94,8 @@ dbs: chksum0 := mustChecksum(t, dbPath) // Restore to another path. - if err := main.NewMain().Run(context.Background(), []string{"restore", "-config", configPath, "-o", restorePath, dbPath}); err != nil && !errors.Is(err, context.Canceled) { + m, _, _, _ := newMain() + if err := m.Run(context.Background(), []string{"restore", "-config", configPath, "-o", restorePath, dbPath}); err != nil && !errors.Is(err, context.Canceled) { t.Fatal(err) } diff --git a/cmd/litestream/restore.go b/cmd/litestream/restore.go index 9e3dca1b..1a0f5fd2 100644 --- a/cmd/litestream/restore.go +++ b/cmd/litestream/restore.go @@ -5,6 +5,7 @@ import ( "errors" "flag" "fmt" + "io" "log" "os" "path/filepath" @@ -15,6 +16,10 @@ import ( // RestoreCommand represents a command to restore a database from a backup. type RestoreCommand struct { + stdin io.Reader + stdout io.Writer + stderr io.Writer + snapshotIndex int // index of snapshot to start from // CLI options @@ -29,8 +34,13 @@ type RestoreCommand struct { opt litestream.RestoreOptions } -func NewRestoreCommand() *RestoreCommand { +// NewRestoreCommand returns a new instance of RestoreCommand. +func NewRestoreCommand(stdin io.Reader, stdout, stderr io.Writer) *RestoreCommand { return &RestoreCommand{ + stdin: stdin, + stdout: stdout, + stderr: stderr, + targetIndex: -1, opt: litestream.NewRestoreOptions(), } @@ -55,31 +65,39 @@ func (c *RestoreCommand) Run(ctx context.Context, args []string) (err error) { } else if fs.NArg() > 1 { return fmt.Errorf("too many arguments") } - arg := fs.Arg(0) + pathOrURL := fs.Arg(0) // Ensure a generation is specified if target index is specified. if c.targetIndex != -1 && c.generation == "" { - return fmt.Errorf("must specify -generation when using -index flag") + return fmt.Errorf("must specify -generation flag when using -index flag") } // Default to original database path if output path not specified. - if !isURL(arg) && c.outputPath == "" { - c.outputPath = arg + if !isURL(pathOrURL) && c.outputPath == "" { + c.outputPath = pathOrURL } // Exit successfully if the output file already exists and flag is set. - if _, err := os.Stat(c.outputPath); !os.IsNotExist(err) && c.ifDBNotExists { - fmt.Println("database already exists, skipping") - return nil + if _, err := os.Stat(c.outputPath); os.IsNotExist(err) { + // file doesn't exist, continue + } else if err != nil { + return err + } else if err == nil { + if c.ifDBNotExists { + fmt.Fprintln(c.stdout, "database already exists, skipping") + return nil + } + return fmt.Errorf("output file already exists: %s", c.outputPath) } - // Create parent directory if it doesn't already exist. - if err := os.MkdirAll(filepath.Dir(c.outputPath), 0700); err != nil { - return fmt.Errorf("cannot create parent directory: %w", err) + // Load configuration. + config, err := ReadConfigFile(c.configPath, !c.noExpandEnv) + if err != nil { + return err } // Build replica from either a URL or config. - r, err := c.loadReplica(ctx, arg) + r, err := c.loadReplica(ctx, config, pathOrURL) if err != nil { return err } @@ -90,7 +108,7 @@ func (c *RestoreCommand) Run(ctx context.Context, args []string) (err error) { // Return an error if no matching targets found. // If optional flag set, return success. Useful for automated recovery. if c.ifReplicaExists { - fmt.Println("no matching backups found") + fmt.Fprintln(c.stdout, "no matching backups found, skipping") return nil } return fmt.Errorf("no matching backups found") @@ -112,47 +130,42 @@ func (c *RestoreCommand) Run(ctx context.Context, args []string) (err error) { return fmt.Errorf("cannot find snapshot index: %w", err) } - c.opt.Logger = log.New(os.Stderr, "", log.LstdFlags|log.Lmicroseconds) + // Create parent directory if it doesn't already exist. + if err := os.MkdirAll(filepath.Dir(c.outputPath), 0700); err != nil { + return fmt.Errorf("cannot create parent directory: %w", err) + } + + c.opt.Logger = log.New(c.stdout, "", log.LstdFlags|log.Lmicroseconds) return litestream.Restore(ctx, r.Client, c.outputPath, c.generation, c.snapshotIndex, c.targetIndex, c.opt) } -func (c *RestoreCommand) loadReplica(ctx context.Context, arg string) (*litestream.Replica, error) { +func (c *RestoreCommand) loadReplica(ctx context.Context, config Config, arg string) (*litestream.Replica, error) { if isURL(arg) { - return c.loadReplicaFromURL(ctx, arg) + return c.loadReplicaFromURL(ctx, config, arg) } - return c.loadReplicaFromConfig(ctx, arg) + return c.loadReplicaFromConfig(ctx, config, arg) } // loadReplicaFromURL creates a replica & updates the restore options from a replica URL. -func (c *RestoreCommand) loadReplicaFromURL(ctx context.Context, replicaURL string) (*litestream.Replica, error) { - if c.configPath != "" { - return nil, fmt.Errorf("cannot specify a replica URL and the -config flag") - } else if c.replicaName != "" { - return nil, fmt.Errorf("cannot specify a replica URL and the -replica flag") +func (c *RestoreCommand) loadReplicaFromURL(ctx context.Context, config Config, replicaURL string) (*litestream.Replica, error) { + if c.replicaName != "" { + return nil, fmt.Errorf("cannot specify both the replica URL and the -replica flag") } else if c.outputPath == "" { - return nil, fmt.Errorf("output path required") + return nil, fmt.Errorf("output path required when using a replica URL") } syncInterval := litestream.DefaultSyncInterval return NewReplicaFromConfig(&ReplicaConfig{ - URL: replicaURL, - SyncInterval: &syncInterval, + URL: replicaURL, + AccessKeyID: config.AccessKeyID, + SecretAccessKey: config.SecretAccessKey, + SyncInterval: &syncInterval, }, nil) } // loadReplicaFromConfig returns replicas based on the specific config path. -func (c *RestoreCommand) loadReplicaFromConfig(ctx context.Context, dbPath string) (*litestream.Replica, error) { - if c.configPath == "" { - c.configPath = DefaultConfigPath() - } - - // Load configuration. - config, err := ReadConfigFile(c.configPath, !c.noExpandEnv) - if err != nil { - return nil, err - } - +func (c *RestoreCommand) loadReplicaFromConfig(ctx context.Context, config Config, dbPath string) (_ *litestream.Replica, err error) { // Lookup database from configuration file by path. if dbPath, err = expand(dbPath); err != nil { return nil, err @@ -184,7 +197,7 @@ func (c *RestoreCommand) loadReplicaFromConfig(ctx context.Context, dbPath strin // A replica must be specified when restoring a specific generation with multiple replicas. if c.generation != "" { - return nil, fmt.Errorf("must specify -replica when restoring from a specific generation") + return nil, fmt.Errorf("must specify -replica flag when restoring from a specific generation") } // Determine latest replica to restore from. @@ -197,7 +210,7 @@ func (c *RestoreCommand) loadReplicaFromConfig(ctx context.Context, dbPath strin // Usage prints the help screen to STDOUT. func (c *RestoreCommand) Usage() { - fmt.Printf(` + fmt.Fprintf(c.stdout, ` The restore command recovers a database from a previous snapshot and WAL. Usage: diff --git a/cmd/litestream/restore_test.go b/cmd/litestream/restore_test.go new file mode 100644 index 00000000..4d0770cf --- /dev/null +++ b/cmd/litestream/restore_test.go @@ -0,0 +1,330 @@ +package main_test + +import ( + "context" + "flag" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/benbjohnson/litestream/internal/testingutil" +) + +func TestRestoreCommand(t *testing.T) { + t.Run("OK", func(t *testing.T) { + testDir := filepath.Join("testdata", "restore", "ok") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + tempDir := t.TempDir() + + m, _, stdout, stderr := newMain() + if err := m.Run(context.Background(), []string{"restore", "-config", filepath.Join(testDir, "litestream.yml"), "-o", filepath.Join(tempDir, "db"), filepath.Join(testDir, "db")}); err != nil { + t.Fatal(err) + } else if got, want := stderr.String(), ""; got != want { + t.Fatalf("stderr=%q, want %q", got, want) + } + + // STDOUT has timing info so we need to grep per line. + lines := strings.Split(stdout.String(), "\n") + for i, substr := range []string{ + `restoring snapshot 0000000000000000/00000000 to ` + filepath.Join(tempDir, "db.tmp"), + `applied wal 0000000000000000/00000000 elapsed=`, + `applied wal 0000000000000000/00000001 elapsed=`, + `applied wal 0000000000000000/00000002 elapsed=`, + `renaming database from temporary location`, + } { + if !strings.Contains(lines[i], substr) { + t.Fatalf("stdout: unexpected line %d:\n%s", i+1, stdout) + } + } + }) + + t.Run("ReplicaName", func(t *testing.T) { + testDir := filepath.Join("testdata", "restore", "replica-name") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + tempDir := t.TempDir() + + m, _, stdout, stderr := newMain() + if err := m.Run(context.Background(), []string{"restore", "-config", filepath.Join(testDir, "litestream.yml"), "-o", filepath.Join(tempDir, "db"), "-replica", "replica1", filepath.Join(testDir, "db")}); err != nil { + t.Fatal(err) + } else if got, want := stderr.String(), ""; got != want { + t.Fatalf("stderr=%q, want %q", got, want) + } + + // STDOUT has timing info so we need to grep per line. + lines := strings.Split(stdout.String(), "\n") + for i, substr := range []string{ + `restoring snapshot 0000000000000001/00000001 to ` + filepath.Join(tempDir, "db.tmp"), + `no wal files found, snapshot only`, + `renaming database from temporary location`, + } { + if !strings.Contains(lines[i], substr) { + t.Fatalf("stdout: unexpected line %d:\n%s", i+1, stdout) + } + } + }) + + t.Run("ReplicaURL", func(t *testing.T) { + testDir := filepath.Join(testingutil.Getwd(t), "testdata", "restore", "replica-url") + tempDir := t.TempDir() + replicaURL := "file://" + filepath.ToSlash(testDir) + "/replica" + + m, _, stdout, stderr := newMain() + if err := m.Run(context.Background(), []string{"restore", "-o", filepath.Join(tempDir, "db"), replicaURL}); err != nil { + t.Fatal(err) + } else if got, want := stderr.String(), ""; got != want { + t.Fatalf("stderr=%q, want %q", got, want) + } + + lines := strings.Split(stdout.String(), "\n") + for i, substr := range []string{ + `restoring snapshot 0000000000000000/00000000 to ` + filepath.Join(tempDir, "db.tmp"), + `no wal files found, snapshot only`, + `renaming database from temporary location`, + } { + if !strings.Contains(lines[i], substr) { + t.Fatalf("stdout: unexpected line %d:\n%s", i+1, stdout) + } + } + }) + + t.Run("LatestReplica", func(t *testing.T) { + testDir := filepath.Join("testdata", "restore", "latest-replica") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + tempDir := t.TempDir() + + m, _, stdout, stderr := newMain() + if err := m.Run(context.Background(), []string{"restore", "-config", filepath.Join(testDir, "litestream.yml"), "-o", filepath.Join(tempDir, "db"), filepath.Join(testDir, "db")}); err != nil { + t.Fatal(err) + } else if got, want := stderr.String(), ""; got != want { + t.Fatalf("stderr=%q, want %q", got, want) + } + + lines := strings.Split(stdout.String(), "\n") + for i, substr := range []string{ + `restoring snapshot 0000000000000001/00000000 to ` + filepath.Join(tempDir, "db.tmp"), + `no wal files found, snapshot only`, + `renaming database from temporary location`, + } { + if !strings.Contains(lines[i], substr) { + t.Fatalf("stdout: unexpected line %d:\n%s", i+1, stdout) + } + } + }) + + t.Run("IfDBNotExistsFlag", func(t *testing.T) { + testDir := filepath.Join("testdata", "restore", "if-db-not-exists-flag") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + + m, _, stdout, _ := newMain() + err := m.Run(context.Background(), []string{"restore", "-config", filepath.Join(testDir, "litestream.yml"), "-if-db-not-exists", filepath.Join(testDir, "db")}) + if err != nil { + t.Fatal(err) + } else if got, want := stdout.String(), string(testingutil.MustReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + t.Fatalf("stdout=%q, want %q", got, want) + } + }) + + t.Run("IfReplicaExists", func(t *testing.T) { + testDir := filepath.Join("testdata", "restore", "if-replica-exists-flag") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + + m, _, stdout, _ := newMain() + err := m.Run(context.Background(), []string{"restore", "-config", filepath.Join(testDir, "litestream.yml"), "-if-replica-exists", filepath.Join(testDir, "db")}) + if err != nil { + t.Fatal(err) + } else if got, want := stdout.String(), string(testingutil.MustReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + t.Fatalf("stdout=%q, want %q", got, want) + } + }) + + t.Run("ErrNoBackups", func(t *testing.T) { + testDir := filepath.Join("testdata", "restore", "no-backups") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + tempDir := t.TempDir() + + m, _, stdout, stderr := newMain() + err := m.Run(context.Background(), []string{"restore", "-config", filepath.Join(testDir, "litestream.yml"), "-o", filepath.Join(tempDir, "db"), filepath.Join(testDir, "db")}) + if err == nil || err.Error() != `no matching backups found` { + t.Fatalf("unexpected error: %s", err) + } else if got, want := stdout.String(), string(testingutil.MustReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + t.Fatalf("stdout=%q, want %q", got, want) + } else if got, want := stderr.String(), string(testingutil.MustReadFile(t, filepath.Join(testDir, "stderr"))); got != want { + t.Fatalf("stderr=%q, want %q", got, want) + } + }) + + t.Run("ErrNoGeneration", func(t *testing.T) { + testDir := filepath.Join("testdata", "restore", "no-generation") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"restore", "-config", filepath.Join(testDir, "litestream.yml"), filepath.Join(testDir, "db")}) + if err == nil || err.Error() != `no matching backups found` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrOutputPathExists", func(t *testing.T) { + testDir := filepath.Join("testdata", "restore", "output-path-exists") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"restore", "-config", filepath.Join(testDir, "litestream.yml"), filepath.Join(testDir, "db")}) + if err == nil || err.Error() != `output file already exists: `+filepath.Join(testDir, "db") { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrDatabaseOrReplicaRequired", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"restore"}) + if err == nil || err.Error() != `database path or replica URL required` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrTooManyArguments", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"restore", "abc", "123"}) + if err == nil || err.Error() != `too many arguments` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrInvalidFlags", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"restore", "-no-such-flag"}) + if err == nil || err.Error() != `flag provided but not defined: -no-such-flag` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrIndexFlagOnly", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"restore", "-index", "0", "/var/lib/db"}) + if err == nil || err.Error() != `must specify -generation flag when using -index flag` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrConfigFileNotFound", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"restore", "-config", "/no/such/file", "/var/lib/db"}) + if err == nil || err.Error() != `config file not found: /no/such/file` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrInvalidConfig", func(t *testing.T) { + testDir := filepath.Join("testdata", "restore", "invalid-config") + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"restore", "-config", filepath.Join(testDir, "litestream.yml"), "/var/lib/db"}) + if err == nil || !strings.Contains(err.Error(), `replica path cannot be a url`) { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrMkdir", func(t *testing.T) { + tempDir := t.TempDir() + if err := os.Mkdir(filepath.Join(tempDir, "noperm"), 0000); err != nil { + t.Fatal(err) + } + + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"restore", "-o", filepath.Join(tempDir, "noperm", "subdir", "db"), "/var/lib/db"}) + if err == nil || !strings.Contains(err.Error(), `permission denied`) { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrNoOutputPathWithReplicaURL", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"restore", "file://path/to/replica"}) + if err == nil || err.Error() != `output path required when using a replica URL` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrReplicaNameWithReplicaURL", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"restore", "-replica", "replica0", "file://path/to/replica"}) + if err == nil || err.Error() != `cannot specify both the replica URL and the -replica flag` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrInvalidReplicaURL", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"restore", "-o", "/tmp/db", "xyz://xyz"}) + if err == nil || err.Error() != `unknown replica type in config: "xyz"` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrDatabaseNotFound", func(t *testing.T) { + testDir := filepath.Join("testdata", "restore", "database-not-found") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"restore", "-config", filepath.Join(testDir, "litestream.yml"), "/no/such/db"}) + if err == nil || err.Error() != `database not found in config: /no/such/db` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrNoReplicas", func(t *testing.T) { + testDir := filepath.Join("testdata", "restore", "no-replicas") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + tempDir := t.TempDir() + + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"restore", "-config", filepath.Join(testDir, "litestream.yml"), "-o", filepath.Join(tempDir, "db"), filepath.Join(testDir, "db")}) + if err == nil || err.Error() != `database has no replicas: `+filepath.Join(testingutil.Getwd(t), testDir, "db") { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrReplicaNotFound", func(t *testing.T) { + testDir := filepath.Join("testdata", "restore", "replica-not-found") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + tempDir := t.TempDir() + + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"restore", "-config", filepath.Join(testDir, "litestream.yml"), "-o", filepath.Join(tempDir, "db"), "-replica", "no_such_replica", filepath.Join(testDir, "db")}) + if err == nil || err.Error() != `replica "no_such_replica" not found` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrGenerationWithNoReplicaName", func(t *testing.T) { + testDir := filepath.Join("testdata", "restore", "generation-with-no-replica") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + tempDir := t.TempDir() + + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"restore", "-config", filepath.Join(testDir, "litestream.yml"), "-o", filepath.Join(tempDir, "db"), "-generation", "0000000000000000", filepath.Join(testDir, "db")}) + if err == nil || err.Error() != `must specify -replica flag when restoring from a specific generation` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrNoSnapshotsAvailable", func(t *testing.T) { + testDir := filepath.Join("testdata", "restore", "no-snapshots") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + tempDir := t.TempDir() + + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"restore", "-config", filepath.Join(testDir, "litestream.yml"), "-o", filepath.Join(tempDir, "db"), "-generation", "0000000000000000", filepath.Join(testDir, "db")}) + if err == nil || err.Error() != `cannot determine latest index in generation "0000000000000000": no snapshots available` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("Usage", func(t *testing.T) { + m, _, _, _ := newMain() + if err := m.Run(context.Background(), []string{"restore", "-h"}); err != flag.ErrHelp { + t.Fatalf("unexpected error: %s", err) + } + }) +} diff --git a/cmd/litestream/snapshots.go b/cmd/litestream/snapshots.go index d8f84fa7..c274f3e9 100644 --- a/cmd/litestream/snapshots.go +++ b/cmd/litestream/snapshots.go @@ -4,8 +4,8 @@ import ( "context" "flag" "fmt" + "io" "log" - "os" "sort" "text/tabwriter" "time" @@ -15,99 +15,89 @@ import ( // SnapshotsCommand represents a command to list snapshots for a command. type SnapshotsCommand struct { + stdin io.Reader + stdout io.Writer + stderr io.Writer + configPath string noExpandEnv bool + + replicaName string +} + +// NewSnapshotsCommand returns a new instance of SnapshotsCommand. +func NewSnapshotsCommand(stdin io.Reader, stdout, stderr io.Writer) *SnapshotsCommand { + return &SnapshotsCommand{ + stdin: stdin, + stdout: stdout, + stderr: stderr, + } } // Run executes the command. -func (c *SnapshotsCommand) Run(ctx context.Context, args []string) (err error) { +func (c *SnapshotsCommand) Run(ctx context.Context, args []string) (ret error) { fs := flag.NewFlagSet("litestream-snapshots", flag.ContinueOnError) registerConfigFlag(fs, &c.configPath, &c.noExpandEnv) - replicaName := fs.String("replica", "", "replica name") + fs.StringVar(&c.replicaName, "replica", "", "replica name") fs.Usage = c.Usage if err := fs.Parse(args); err != nil { return err } else if fs.NArg() == 0 || fs.Arg(0) == "" { - return fmt.Errorf("database path required") + return fmt.Errorf("database path or replica URL required") } else if fs.NArg() > 1 { return fmt.Errorf("too many arguments") } - var db *litestream.DB - var r *litestream.Replica - if isURL(fs.Arg(0)) { - if c.configPath != "" { - return fmt.Errorf("cannot specify a replica URL and the -config flag") - } - if r, err = NewReplicaFromConfig(&ReplicaConfig{URL: fs.Arg(0)}, nil); err != nil { - return err - } - } else { - if c.configPath == "" { - c.configPath = DefaultConfigPath() - } + // Load configuration. + config, err := ReadConfigFile(c.configPath, !c.noExpandEnv) + if err != nil { + return err + } - // Load configuration. - config, err := ReadConfigFile(c.configPath, !c.noExpandEnv) - if err != nil { - return err - } + // Determine list of replicas to pull snapshots from. + replicas, _, err := loadReplicas(ctx, config, fs.Arg(0), c.replicaName) + if err != nil { + return err + } - // Lookup database from configuration file by path. - if path, err := expand(fs.Arg(0)); err != nil { - return err - } else if dbc := config.DBConfig(path); dbc == nil { - return fmt.Errorf("database not found in config: %s", path) - } else if db, err = NewDBFromConfig(dbc); err != nil { - return err + // Build list of snapshot metadata with associated replica. + var infos []replicaSnapshotInfo + for _, r := range replicas { + a, err := r.Snapshots(ctx) + if err != nil { + log.Printf("cannot determine snapshots: %s", err) + ret = errExit // signal error return without printing message + continue } - - // Filter by replica, if specified. - if *replicaName != "" { - if r = db.Replica(*replicaName); r == nil { - return fmt.Errorf("replica %q not found for database %q", *replicaName, db.Path()) - } + for i := range a { + infos = append(infos, replicaSnapshotInfo{SnapshotInfo: a[i], replicaName: r.Name()}) } } - // Find snapshots by db or replica. - var replicas []*litestream.Replica - if r != nil { - replicas = []*litestream.Replica{r} - } else { - replicas = db.Replicas - } + // Sort snapshots by creation time from newest to oldest. + sort.Slice(infos, func(i, j int) bool { return infos[i].CreatedAt.After(infos[j].CreatedAt) }) // List all snapshots. - w := tabwriter.NewWriter(os.Stdout, 0, 8, 2, ' ', 0) + w := tabwriter.NewWriter(c.stdout, 0, 8, 2, ' ', 0) defer w.Flush() fmt.Fprintln(w, "replica\tgeneration\tindex\tsize\tcreated") - for _, r := range replicas { - infos, err := r.Snapshots(ctx) - if err != nil { - log.Printf("cannot determine snapshots: %s", err) - continue - } - // Sort snapshots by creation time from newest to oldest. - sort.Slice(infos, func(i, j int) bool { return infos[i].CreatedAt.After(infos[j].CreatedAt) }) - for _, info := range infos { - fmt.Fprintf(w, "%s\t%s\t%d\t%d\t%s\n", - r.Name(), - info.Generation, - info.Index, - info.Size, - info.CreatedAt.Format(time.RFC3339), - ) - } + for _, info := range infos { + fmt.Fprintf(w, "%s\t%s\t%08x\t%d\t%s\n", + info.replicaName, + info.Generation, + info.Index, + info.Size, + info.CreatedAt.Format(time.RFC3339), + ) } - return nil + return ret } // Usage prints the help screen to STDOUT. func (c *SnapshotsCommand) Usage() { - fmt.Printf(` + fmt.Fprintf(c.stdout, ` The snapshots command lists all snapshots available for a database or replica. Usage: @@ -143,3 +133,9 @@ Examples: DefaultConfigPath(), ) } + +// replicaSnapshotInfo represents snapshot metadata with associated replica name. +type replicaSnapshotInfo struct { + litestream.SnapshotInfo + replicaName string +} diff --git a/cmd/litestream/snapshots_test.go b/cmd/litestream/snapshots_test.go new file mode 100644 index 00000000..3dc92884 --- /dev/null +++ b/cmd/litestream/snapshots_test.go @@ -0,0 +1,128 @@ +package main_test + +import ( + "context" + "flag" + "path/filepath" + "strings" + "testing" + + "github.com/benbjohnson/litestream/internal/testingutil" +) + +func TestSnapshotsCommand(t *testing.T) { + t.Run("OK", func(t *testing.T) { + testDir := filepath.Join("testdata", "snapshots", "ok") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + + m, _, stdout, _ := newMain() + if err := m.Run(context.Background(), []string{"snapshots", "-config", filepath.Join(testDir, "litestream.yml"), filepath.Join(testDir, "db")}); err != nil { + t.Fatal(err) + } else if got, want := stdout.String(), string(testingutil.MustReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + t.Fatalf("stdout=%q, want %q", got, want) + } + }) + + t.Run("ReplicaName", func(t *testing.T) { + testDir := filepath.Join("testdata", "snapshots", "replica-name") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + + m, _, stdout, _ := newMain() + if err := m.Run(context.Background(), []string{"snapshots", "-config", filepath.Join(testDir, "litestream.yml"), "-replica", "replica1", filepath.Join(testDir, "db")}); err != nil { + t.Fatal(err) + } else if got, want := stdout.String(), string(testingutil.MustReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + t.Fatalf("stdout=%q, want %q", got, want) + } + }) + + t.Run("ReplicaURL", func(t *testing.T) { + testDir := filepath.Join(testingutil.Getwd(t), "testdata", "snapshots", "replica-url") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + replicaURL := "file://" + filepath.ToSlash(testDir) + "/replica" + + m, _, stdout, _ := newMain() + if err := m.Run(context.Background(), []string{"snapshots", replicaURL}); err != nil { + t.Fatal(err) + } else if got, want := stdout.String(), string(testingutil.MustReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + t.Fatalf("stdout=%q, want %q", got, want) + } + }) + + t.Run("ErrDatabaseOrReplicaRequired", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"snapshots"}) + if err == nil || err.Error() != `database path or replica URL required` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrTooManyArguments", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"snapshots", "abc", "123"}) + if err == nil || err.Error() != `too many arguments` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrInvalidFlags", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"snapshots", "-no-such-flag"}) + if err == nil || err.Error() != `flag provided but not defined: -no-such-flag` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrConfigFileNotFound", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"snapshots", "-config", "/no/such/file", "/var/lib/db"}) + if err == nil || err.Error() != `config file not found: /no/such/file` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrInvalidConfig", func(t *testing.T) { + testDir := filepath.Join("testdata", "snapshots", "invalid-config") + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"snapshots", "-config", filepath.Join(testDir, "litestream.yml"), "/var/lib/db"}) + if err == nil || !strings.Contains(err.Error(), `replica path cannot be a url`) { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrDatabaseNotFound", func(t *testing.T) { + testDir := filepath.Join("testdata", "snapshots", "database-not-found") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"snapshots", "-config", filepath.Join(testDir, "litestream.yml"), "/no/such/db"}) + if err == nil || err.Error() != `database not found in config: /no/such/db` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrReplicaNotFound", func(t *testing.T) { + testDir := filepath.Join(testingutil.Getwd(t), "testdata", "snapshots", "replica-not-found") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"snapshots", "-config", filepath.Join(testDir, "litestream.yml"), "-replica", "no_such_replica", filepath.Join(testDir, "db")}) + if err == nil || err.Error() != `replica "no_such_replica" not found for database "`+filepath.Join(testDir, "db")+`"` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrInvalidReplicaURL", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"snapshots", "xyz://xyz"}) + if err == nil || !strings.Contains(err.Error(), `unknown replica type in config: "xyz"`) { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("Usage", func(t *testing.T) { + m, _, _, _ := newMain() + if err := m.Run(context.Background(), []string{"snapshots", "-h"}); err != flag.ErrHelp { + t.Fatalf("unexpected error: %s", err) + } + }) +} diff --git a/cmd/litestream/testdata/Makefile b/cmd/litestream/testdata/Makefile new file mode 100644 index 00000000..14e20850 --- /dev/null +++ b/cmd/litestream/testdata/Makefile @@ -0,0 +1,13 @@ +.PHONY: default +default: + make -C generations/ok + make -C generations/no-database + make -C generations/replica-name + make -C generations/replica-url + make -C restore/latest-replica + make -C snapshots/ok + make -C snapshots/replica-name + make -C snapshots/replica-url + make -C wal/ok + make -C wal/replica-name + make -C wal/replica-url diff --git a/cmd/litestream/testdata/databases/invalid-config/litestream.yml b/cmd/litestream/testdata/databases/invalid-config/litestream.yml new file mode 100644 index 00000000..26eb1ffe --- /dev/null +++ b/cmd/litestream/testdata/databases/invalid-config/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: /var/lib/db + replicas: + - path: s3://bkt/db diff --git a/cmd/litestream/testdata/databases/no-config/.gitignore b/cmd/litestream/testdata/databases/no-config/.gitignore new file mode 100644 index 00000000..e69de29b diff --git a/cmd/litestream/testdata/databases/no-databases/litestream.yml b/cmd/litestream/testdata/databases/no-databases/litestream.yml new file mode 100644 index 00000000..f6fff35c --- /dev/null +++ b/cmd/litestream/testdata/databases/no-databases/litestream.yml @@ -0,0 +1 @@ +dbs: diff --git a/cmd/litestream/testdata/databases/no-databases/stdout b/cmd/litestream/testdata/databases/no-databases/stdout new file mode 100644 index 00000000..9f9c245e --- /dev/null +++ b/cmd/litestream/testdata/databases/no-databases/stdout @@ -0,0 +1 @@ +No databases found in config file. diff --git a/cmd/litestream/testdata/databases/ok/litestream.yml b/cmd/litestream/testdata/databases/ok/litestream.yml new file mode 100644 index 00000000..14788e4d --- /dev/null +++ b/cmd/litestream/testdata/databases/ok/litestream.yml @@ -0,0 +1,7 @@ +dbs: + - path: /var/lib/db + replicas: + - path: /var/lib/replica + - url: s3://mybkt/db + + - path: /my/other/db \ No newline at end of file diff --git a/cmd/litestream/testdata/databases/ok/stdout b/cmd/litestream/testdata/databases/ok/stdout new file mode 100644 index 00000000..58fcd650 --- /dev/null +++ b/cmd/litestream/testdata/databases/ok/stdout @@ -0,0 +1,3 @@ +path replicas +/var/lib/db file,s3 +/my/other/db diff --git a/cmd/litestream/testdata/generations/database-not-found/litestream.yml b/cmd/litestream/testdata/generations/database-not-found/litestream.yml new file mode 100644 index 00000000..266721eb --- /dev/null +++ b/cmd/litestream/testdata/generations/database-not-found/litestream.yml @@ -0,0 +1,2 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db diff --git a/cmd/litestream/testdata/generations/invalid-config/litestream.yml b/cmd/litestream/testdata/generations/invalid-config/litestream.yml new file mode 100644 index 00000000..26eb1ffe --- /dev/null +++ b/cmd/litestream/testdata/generations/invalid-config/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: /var/lib/db + replicas: + - path: s3://bkt/db diff --git a/cmd/litestream/testdata/generations/no-database/Makefile b/cmd/litestream/testdata/generations/no-database/Makefile new file mode 100644 index 00000000..793e5cd3 --- /dev/null +++ b/cmd/litestream/testdata/generations/no-database/Makefile @@ -0,0 +1,4 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001010000 replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 replica/generations/0000000000000001/snapshots/00000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/generations/no-database/litestream.yml b/cmd/litestream/testdata/generations/no-database/litestream.yml new file mode 100644 index 00000000..544b74f8 --- /dev/null +++ b/cmd/litestream/testdata/generations/no-database/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - path: $LITESTREAM_TESTDIR/replica diff --git a/cmd/litestream/testdata/generations/no-database/replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/cmd/litestream/testdata/generations/no-database/replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/generations/no-database/replica/generations/0000000000000001/snapshots/00000000.snapshot.lz4 b/cmd/litestream/testdata/generations/no-database/replica/generations/0000000000000001/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/generations/no-database/stdout b/cmd/litestream/testdata/generations/no-database/stdout new file mode 100644 index 00000000..774650c6 --- /dev/null +++ b/cmd/litestream/testdata/generations/no-database/stdout @@ -0,0 +1,3 @@ +name generation lag start end +file 0000000000000000 - 2000-01-01T00:00:00Z 2000-01-01T00:00:00Z +file 0000000000000001 - 2000-01-02T00:00:00Z 2000-01-02T00:00:00Z diff --git a/cmd/litestream/testdata/generations/ok/Makefile b/cmd/litestream/testdata/generations/ok/Makefile new file mode 100644 index 00000000..51f53943 --- /dev/null +++ b/cmd/litestream/testdata/generations/ok/Makefile @@ -0,0 +1,9 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001030000 db + TZ=UTC touch -ct 200001010000 replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 replica/generations/0000000000000000/snapshots/00000001.snapshot.lz4 + TZ=UTC touch -ct 200001010000 replica/generations/0000000000000000/wal/00000000/00000000.wal.lz4 + TZ=UTC touch -ct 200001020000 replica/generations/0000000000000000/wal/00000000/00000001.wal.lz4 + TZ=UTC touch -ct 200001030000 replica/generations/0000000000000000/wal/00000001/00000000.wal.lz4 + TZ=UTC touch -ct 200001010000 replica/generations/0000000000000001/snapshots/00000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/generations/ok/db b/cmd/litestream/testdata/generations/ok/db new file mode 100644 index 00000000..e69de29b diff --git a/cmd/litestream/testdata/generations/ok/litestream.yml b/cmd/litestream/testdata/generations/ok/litestream.yml new file mode 100644 index 00000000..544b74f8 --- /dev/null +++ b/cmd/litestream/testdata/generations/ok/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - path: $LITESTREAM_TESTDIR/replica diff --git a/cmd/litestream/testdata/generations/ok/replica/db b/cmd/litestream/testdata/generations/ok/replica/db new file mode 100644 index 00000000..e69de29b diff --git a/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/snapshots/00000001.snapshot.lz4 b/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/snapshots/00000001.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/wal/00000000/00000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/wal/00000000/00000001.wal.lz4 b/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/wal/00000000/00000001.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/wal/00000001/00000000.wal.lz4 b/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/wal/00000001/00000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000001/snapshots/00000000.snapshot.lz4 b/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000001/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/generations/ok/stdout b/cmd/litestream/testdata/generations/ok/stdout new file mode 100644 index 00000000..23d77954 --- /dev/null +++ b/cmd/litestream/testdata/generations/ok/stdout @@ -0,0 +1,3 @@ +name generation lag start end +file 0000000000000000 0s 2000-01-01T00:00:00Z 2000-01-03T00:00:00Z +file 0000000000000001 48h0m0s 2000-01-01T00:00:00Z 2000-01-01T00:00:00Z diff --git a/cmd/litestream/testdata/generations/replica-name/Makefile b/cmd/litestream/testdata/generations/replica-name/Makefile new file mode 100644 index 00000000..f6a5eaed --- /dev/null +++ b/cmd/litestream/testdata/generations/replica-name/Makefile @@ -0,0 +1,5 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001030000 db + TZ=UTC touch -ct 200001010000 replica0/generations/0000000000000000/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 replica1/generations/0000000000000001/snapshots/00000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/generations/replica-name/db b/cmd/litestream/testdata/generations/replica-name/db new file mode 100644 index 00000000..e69de29b diff --git a/cmd/litestream/testdata/generations/replica-name/litestream.yml b/cmd/litestream/testdata/generations/replica-name/litestream.yml new file mode 100644 index 00000000..8511213a --- /dev/null +++ b/cmd/litestream/testdata/generations/replica-name/litestream.yml @@ -0,0 +1,7 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - name: replica0 + path: $LITESTREAM_TESTDIR/replica0 + - name: replica1 + path: $LITESTREAM_TESTDIR/replica1 diff --git a/cmd/litestream/testdata/generations/replica-name/replica0/db b/cmd/litestream/testdata/generations/replica-name/replica0/db new file mode 100644 index 00000000..e69de29b diff --git a/cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/snapshots/00000001.snapshot.lz4 b/cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/snapshots/00000001.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/wal/00000000/00000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/wal/00000000/00000001.wal.lz4 b/cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/wal/00000000/00000001.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/wal/00000001/00000000.wal.lz4 b/cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/wal/00000001/00000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/generations/replica-name/replica1/db b/cmd/litestream/testdata/generations/replica-name/replica1/db new file mode 100644 index 00000000..e69de29b diff --git a/cmd/litestream/testdata/generations/replica-name/replica1/generations/0000000000000001/snapshots/00000000.snapshot.lz4 b/cmd/litestream/testdata/generations/replica-name/replica1/generations/0000000000000001/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/generations/replica-name/stdout b/cmd/litestream/testdata/generations/replica-name/stdout new file mode 100644 index 00000000..111a6b23 --- /dev/null +++ b/cmd/litestream/testdata/generations/replica-name/stdout @@ -0,0 +1,2 @@ +name generation lag start end +replica1 0000000000000001 24h0m0s 2000-01-02T00:00:00Z 2000-01-02T00:00:00Z diff --git a/cmd/litestream/testdata/generations/replica-not-found/litestream.yml b/cmd/litestream/testdata/generations/replica-not-found/litestream.yml new file mode 100644 index 00000000..5d911bd6 --- /dev/null +++ b/cmd/litestream/testdata/generations/replica-not-found/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - url: s3://bkt/db diff --git a/cmd/litestream/testdata/generations/replica-url/Makefile b/cmd/litestream/testdata/generations/replica-url/Makefile new file mode 100644 index 00000000..3125ed28 --- /dev/null +++ b/cmd/litestream/testdata/generations/replica-url/Makefile @@ -0,0 +1,9 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001010000 replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 replica/generations/0000000000000000/snapshots/00000001.snapshot.lz4 + TZ=UTC touch -ct 200001010000 replica/generations/0000000000000000/wal/00000000/00000000.wal.lz4 + TZ=UTC touch -ct 200001020000 replica/generations/0000000000000000/wal/00000000/00000001.wal.lz4 + TZ=UTC touch -ct 200001030000 replica/generations/0000000000000000/wal/00000001/00000000.wal.lz4 + TZ=UTC touch -ct 200001020000 replica/generations/0000000000000001/snapshots/00000000.snapshot.lz4 + diff --git a/cmd/litestream/testdata/generations/replica-url/litestream.yml b/cmd/litestream/testdata/generations/replica-url/litestream.yml new file mode 100644 index 00000000..544b74f8 --- /dev/null +++ b/cmd/litestream/testdata/generations/replica-url/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - path: $LITESTREAM_TESTDIR/replica diff --git a/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/snapshots/00000001.snapshot.lz4 b/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/snapshots/00000001.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/wal/00000000/00000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/wal/00000000/00000001.wal.lz4 b/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/wal/00000000/00000001.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/wal/00000001/00000000.wal.lz4 b/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/wal/00000001/00000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000001/snapshots/00000000.snapshot.lz4 b/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000001/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/generations/replica-url/stdout b/cmd/litestream/testdata/generations/replica-url/stdout new file mode 100644 index 00000000..e099c745 --- /dev/null +++ b/cmd/litestream/testdata/generations/replica-url/stdout @@ -0,0 +1,3 @@ +name generation lag start end +file 0000000000000000 - 2000-01-01T00:00:00Z 2000-01-03T00:00:00Z +file 0000000000000001 - 2000-01-02T00:00:00Z 2000-01-02T00:00:00Z diff --git a/cmd/litestream/testdata/restore/database-not-found/litestream.yml b/cmd/litestream/testdata/restore/database-not-found/litestream.yml new file mode 100644 index 00000000..544b74f8 --- /dev/null +++ b/cmd/litestream/testdata/restore/database-not-found/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - path: $LITESTREAM_TESTDIR/replica diff --git a/cmd/litestream/testdata/restore/generation-with-no-replica/litestream.yml b/cmd/litestream/testdata/restore/generation-with-no-replica/litestream.yml new file mode 100644 index 00000000..8696dbe0 --- /dev/null +++ b/cmd/litestream/testdata/restore/generation-with-no-replica/litestream.yml @@ -0,0 +1,5 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - path: $LITESTREAM_TESTDIR/replica0 + - path: $LITESTREAM_TESTDIR/replica1 diff --git a/cmd/litestream/testdata/restore/if-db-not-exists-flag/db b/cmd/litestream/testdata/restore/if-db-not-exists-flag/db new file mode 100644 index 0000000000000000000000000000000000000000..cfd2b8d8296d0838ebb5cb0d8ab75f3408377b65 GIT binary patch literal 8192 zcmeI#p$Y;)5C-6x**(D^W3x`wB8WbKr(C;K_ z&Ma*D+HO|mJ~XyFosV^}DfLN&=4M1BZO8Em$J4ia8tMKmLgShB;w#oG#X=we0SG_< z0uX=z1Rwwb2tWV=5cs)(ac7{)lTdFDO$f4F7kO1!l`qP|f(nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/restore/latest-replica/replica0/generations/0000000000000001/snapshots/00000000.snapshot.lz4 b/cmd/litestream/testdata/restore/latest-replica/replica0/generations/0000000000000001/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/restore/latest-replica/replica1/generations/0000000000000002/snapshots/00000000.snapshot.lz4 b/cmd/litestream/testdata/restore/latest-replica/replica1/generations/0000000000000002/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/restore/no-backups/litestream.yml b/cmd/litestream/testdata/restore/no-backups/litestream.yml new file mode 100644 index 00000000..544b74f8 --- /dev/null +++ b/cmd/litestream/testdata/restore/no-backups/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - path: $LITESTREAM_TESTDIR/replica diff --git a/cmd/litestream/testdata/restore/no-backups/stderr b/cmd/litestream/testdata/restore/no-backups/stderr new file mode 100644 index 00000000..e69de29b diff --git a/cmd/litestream/testdata/restore/no-backups/stdout b/cmd/litestream/testdata/restore/no-backups/stdout new file mode 100644 index 00000000..e69de29b diff --git a/cmd/litestream/testdata/restore/no-generation/litestream.yml b/cmd/litestream/testdata/restore/no-generation/litestream.yml new file mode 100644 index 00000000..544b74f8 --- /dev/null +++ b/cmd/litestream/testdata/restore/no-generation/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - path: $LITESTREAM_TESTDIR/replica diff --git a/cmd/litestream/testdata/restore/no-replicas/litestream.yml b/cmd/litestream/testdata/restore/no-replicas/litestream.yml new file mode 100644 index 00000000..266721eb --- /dev/null +++ b/cmd/litestream/testdata/restore/no-replicas/litestream.yml @@ -0,0 +1,2 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db diff --git a/cmd/litestream/testdata/restore/no-snapshots/litestream.yml b/cmd/litestream/testdata/restore/no-snapshots/litestream.yml new file mode 100644 index 00000000..544b74f8 --- /dev/null +++ b/cmd/litestream/testdata/restore/no-snapshots/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - path: $LITESTREAM_TESTDIR/replica diff --git a/cmd/litestream/testdata/restore/ok/00000002.db b/cmd/litestream/testdata/restore/ok/00000002.db new file mode 100644 index 0000000000000000000000000000000000000000..cfd2b8d8296d0838ebb5cb0d8ab75f3408377b65 GIT binary patch literal 8192 zcmeI#p$Y;)5C-6x**(D^W3x`wB8WbKr(C;K_ z&Ma*D+HO|mJ~XyFosV^}DfLN&=4M1BZO8Em$J4ia8tMKmLgShB;w#oG#X=we0SG_< z0uX=z1Rwwb2tWV=5cs)(ac7{)lTdFDO$f4F7kO1!l`qP|f(nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/00000000/00000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..37e1dcf92fd018a7fb7c339d5b49ad06aaf564ca GIT binary patch literal 249 zcmZQk@|8#_*!hfsf#HX!c|BVbgYF{<1_l8jpMl}ZKh{^Tmx-k9d&~Iz&dY{!pb(=3 z!)MXfb%#S9-oAG^IM64vBvm0TzbH4cM8O!Si;0QBK|ukij){eVg^3}U5y)iWVGv;m zVBlwB;Q7zM#Gq%QzzZ^n{~iPXJv{~Am>8KjL5$^TXVU@v3L-ZE literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/00000000/00003068.wal.lz4 b/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/00000000/00003068.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..c73bf2cb4dd5c84407c33054dd017d797727b4b0 GIT binary patch literal 94 zcmZQk@|8#_*y+u{z~BJHOe_o^n6CU|ef4^oNE+X)Bp4a~ dqk>xuOh9c67l4`>n3<#NBVhml literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/00000001/00000000.wal.lz4 b/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/00000001/00000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..64a4899be6ce976d393d9b4e84d31dfe6d8b663d GIT binary patch literal 128 zcmZQk@|8#_*qO_~!0<)Vyq>LzLHCga1A_pN&%k))AM5L=vt3OG-ZDP_6Tj>kP>czL zB^ds5o{{bPy*x&>kQXSx%>Rjj{}2CnAO%$L9~ImH3VmP%Dq?101Y#ye5aI;tVql36 GumAuqQY)1J literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/00000002/00000000.wal.lz4 b/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/00000002/00000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..2265d0e07bb8a0f6707a5f7ebc567e634c5caaf2 GIT binary patch literal 125 zcmZQk@|8#_*qOz^!0LzLHCga1A_pN&%ku$AL|=^`&6w%ZyBF2x02Wm5|d!~ z&lM)TyiaP|&h@-N0T%wZ4E%rizw>_rG8h^Dqkw|w#S$@h4H60H0$82JD2f9L3NC!$WM*YzWMyVyVq{@vW?}?lCPonA1Zrn!ojYp-05jAjMgRZ+ literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/restore/output-path-exists/db b/cmd/litestream/testdata/restore/output-path-exists/db new file mode 100644 index 0000000000000000000000000000000000000000..cfd2b8d8296d0838ebb5cb0d8ab75f3408377b65 GIT binary patch literal 8192 zcmeI#p$Y;)5C-6x**(D^W3x`wB8WbKr(C;K_ z&Ma*D+HO|mJ~XyFosV^}DfLN&=4M1BZO8Em$J4ia8tMKmLgShB;w#oG#X=we0SG_< z0uX=z1Rwwb2tWV=5cs)(ac7{)lTdFDO$f4F7kO1!l`qP|f(nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/restore/replica-name/replica1/generations/0000000000000001/snapshots/00000001.snapshot.lz4 b/cmd/litestream/testdata/restore/replica-name/replica1/generations/0000000000000001/snapshots/00000001.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/restore/replica-not-found/litestream.yml b/cmd/litestream/testdata/restore/replica-not-found/litestream.yml new file mode 100644 index 00000000..b2a5e141 --- /dev/null +++ b/cmd/litestream/testdata/restore/replica-not-found/litestream.yml @@ -0,0 +1,5 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - name: replica0 + path: $LITESTREAM_TESTDIR/replica diff --git a/cmd/litestream/testdata/restore/replica-url/replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/cmd/litestream/testdata/restore/replica-url/replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/snapshots/database-not-found/litestream.yml b/cmd/litestream/testdata/snapshots/database-not-found/litestream.yml new file mode 100644 index 00000000..266721eb --- /dev/null +++ b/cmd/litestream/testdata/snapshots/database-not-found/litestream.yml @@ -0,0 +1,2 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db diff --git a/cmd/litestream/testdata/snapshots/invalid-config/litestream.yml b/cmd/litestream/testdata/snapshots/invalid-config/litestream.yml new file mode 100644 index 00000000..26eb1ffe --- /dev/null +++ b/cmd/litestream/testdata/snapshots/invalid-config/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: /var/lib/db + replicas: + - path: s3://bkt/db diff --git a/cmd/litestream/testdata/snapshots/ok/Makefile b/cmd/litestream/testdata/snapshots/ok/Makefile new file mode 100644 index 00000000..866903e6 --- /dev/null +++ b/cmd/litestream/testdata/snapshots/ok/Makefile @@ -0,0 +1,6 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001010000 replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 replica/generations/0000000000000000/snapshots/00000001.snapshot.lz4 + TZ=UTC touch -ct 200001030000 replica/generations/0000000000000001/snapshots/00000000.snapshot.lz4 + diff --git a/cmd/litestream/testdata/snapshots/ok/litestream.yml b/cmd/litestream/testdata/snapshots/ok/litestream.yml new file mode 100644 index 00000000..544b74f8 --- /dev/null +++ b/cmd/litestream/testdata/snapshots/ok/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - path: $LITESTREAM_TESTDIR/replica diff --git a/cmd/litestream/testdata/snapshots/ok/replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/cmd/litestream/testdata/snapshots/ok/replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/snapshots/ok/replica/generations/0000000000000000/snapshots/00000001.snapshot.lz4 b/cmd/litestream/testdata/snapshots/ok/replica/generations/0000000000000000/snapshots/00000001.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/snapshots/ok/replica/generations/0000000000000001/snapshots/00000000.snapshot.lz4 b/cmd/litestream/testdata/snapshots/ok/replica/generations/0000000000000001/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/snapshots/ok/stdout b/cmd/litestream/testdata/snapshots/ok/stdout new file mode 100644 index 00000000..270a2524 --- /dev/null +++ b/cmd/litestream/testdata/snapshots/ok/stdout @@ -0,0 +1,4 @@ +replica generation index size created +file 0000000000000001 00000000 93 2000-01-03T00:00:00Z +file 0000000000000000 00000001 93 2000-01-02T00:00:00Z +file 0000000000000000 00000000 93 2000-01-01T00:00:00Z diff --git a/cmd/litestream/testdata/snapshots/replica-name/Makefile b/cmd/litestream/testdata/snapshots/replica-name/Makefile new file mode 100644 index 00000000..050a241b --- /dev/null +++ b/cmd/litestream/testdata/snapshots/replica-name/Makefile @@ -0,0 +1,4 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001010000 replica0/generations/0000000000000000/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 replica1/generations/0000000000000001/snapshots/00000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/snapshots/replica-name/litestream.yml b/cmd/litestream/testdata/snapshots/replica-name/litestream.yml new file mode 100644 index 00000000..8511213a --- /dev/null +++ b/cmd/litestream/testdata/snapshots/replica-name/litestream.yml @@ -0,0 +1,7 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - name: replica0 + path: $LITESTREAM_TESTDIR/replica0 + - name: replica1 + path: $LITESTREAM_TESTDIR/replica1 diff --git a/cmd/litestream/testdata/snapshots/replica-name/replica0/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/cmd/litestream/testdata/snapshots/replica-name/replica0/generations/0000000000000000/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/snapshots/replica-name/replica0/generations/0000000000000000/snapshots/00000001.snapshot.lz4 b/cmd/litestream/testdata/snapshots/replica-name/replica0/generations/0000000000000000/snapshots/00000001.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/snapshots/replica-name/replica1/generations/0000000000000001/snapshots/00000000.snapshot.lz4 b/cmd/litestream/testdata/snapshots/replica-name/replica1/generations/0000000000000001/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/snapshots/replica-name/stdout b/cmd/litestream/testdata/snapshots/replica-name/stdout new file mode 100644 index 00000000..42c074e0 --- /dev/null +++ b/cmd/litestream/testdata/snapshots/replica-name/stdout @@ -0,0 +1,2 @@ +replica generation index size created +replica1 0000000000000001 00000000 93 2000-01-02T00:00:00Z diff --git a/cmd/litestream/testdata/snapshots/replica-not-found/litestream.yml b/cmd/litestream/testdata/snapshots/replica-not-found/litestream.yml new file mode 100644 index 00000000..5d911bd6 --- /dev/null +++ b/cmd/litestream/testdata/snapshots/replica-not-found/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - url: s3://bkt/db diff --git a/cmd/litestream/testdata/snapshots/replica-url/Makefile b/cmd/litestream/testdata/snapshots/replica-url/Makefile new file mode 100644 index 00000000..f300c83a --- /dev/null +++ b/cmd/litestream/testdata/snapshots/replica-url/Makefile @@ -0,0 +1,5 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001010000 replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 replica/generations/0000000000000000/snapshots/00000001.snapshot.lz4 + TZ=UTC touch -ct 200001030000 replica/generations/0000000000000001/snapshots/00000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/snapshots/replica-url/replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/cmd/litestream/testdata/snapshots/replica-url/replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/snapshots/replica-url/replica/generations/0000000000000000/snapshots/00000001.snapshot.lz4 b/cmd/litestream/testdata/snapshots/replica-url/replica/generations/0000000000000000/snapshots/00000001.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/snapshots/replica-url/replica/generations/0000000000000001/snapshots/00000000.snapshot.lz4 b/cmd/litestream/testdata/snapshots/replica-url/replica/generations/0000000000000001/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/snapshots/replica-url/stdout b/cmd/litestream/testdata/snapshots/replica-url/stdout new file mode 100644 index 00000000..270a2524 --- /dev/null +++ b/cmd/litestream/testdata/snapshots/replica-url/stdout @@ -0,0 +1,4 @@ +replica generation index size created +file 0000000000000001 00000000 93 2000-01-03T00:00:00Z +file 0000000000000000 00000001 93 2000-01-02T00:00:00Z +file 0000000000000000 00000000 93 2000-01-01T00:00:00Z diff --git a/cmd/litestream/testdata/wal/database-not-found/litestream.yml b/cmd/litestream/testdata/wal/database-not-found/litestream.yml new file mode 100644 index 00000000..266721eb --- /dev/null +++ b/cmd/litestream/testdata/wal/database-not-found/litestream.yml @@ -0,0 +1,2 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db diff --git a/cmd/litestream/testdata/wal/invalid-config/litestream.yml b/cmd/litestream/testdata/wal/invalid-config/litestream.yml new file mode 100644 index 00000000..26eb1ffe --- /dev/null +++ b/cmd/litestream/testdata/wal/invalid-config/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: /var/lib/db + replicas: + - path: s3://bkt/db diff --git a/cmd/litestream/testdata/wal/ok/Makefile b/cmd/litestream/testdata/wal/ok/Makefile new file mode 100644 index 00000000..2bb5a8e1 --- /dev/null +++ b/cmd/litestream/testdata/wal/ok/Makefile @@ -0,0 +1,7 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001010000 replica/generations/0000000000000000/wal/00000000/00000000.wal.lz4 + TZ=UTC touch -ct 200001020000 replica/generations/0000000000000000/wal/00000000/00000001.wal.lz4 + TZ=UTC touch -ct 200001030000 replica/generations/0000000000000000/wal/00000001/00000000.wal.lz4 + TZ=UTC touch -ct 200001040000 replica/generations/0000000000000001/wal/00000000/00000000.wal.lz4 + diff --git a/cmd/litestream/testdata/wal/ok/litestream.yml b/cmd/litestream/testdata/wal/ok/litestream.yml new file mode 100644 index 00000000..544b74f8 --- /dev/null +++ b/cmd/litestream/testdata/wal/ok/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - path: $LITESTREAM_TESTDIR/replica diff --git a/cmd/litestream/testdata/wal/ok/replica/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/cmd/litestream/testdata/wal/ok/replica/generations/0000000000000000/wal/00000000/00000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/wal/ok/replica/generations/0000000000000000/wal/00000000/00000001.wal.lz4 b/cmd/litestream/testdata/wal/ok/replica/generations/0000000000000000/wal/00000000/00000001.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/wal/ok/replica/generations/0000000000000000/wal/00000001/00000000.wal.lz4 b/cmd/litestream/testdata/wal/ok/replica/generations/0000000000000000/wal/00000001/00000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/wal/ok/replica/generations/0000000000000001/wal/00000000/00000000.wal.lz4 b/cmd/litestream/testdata/wal/ok/replica/generations/0000000000000001/wal/00000000/00000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/wal/ok/stdout b/cmd/litestream/testdata/wal/ok/stdout new file mode 100644 index 00000000..90a58f59 --- /dev/null +++ b/cmd/litestream/testdata/wal/ok/stdout @@ -0,0 +1,5 @@ +replica generation index offset size created +file 0000000000000001 00000000 00000000 93 2000-01-04T00:00:00Z +file 0000000000000000 00000001 00000000 93 2000-01-03T00:00:00Z +file 0000000000000000 00000000 00000001 93 2000-01-02T00:00:00Z +file 0000000000000000 00000000 00000000 93 2000-01-01T00:00:00Z diff --git a/cmd/litestream/testdata/wal/replica-name/Makefile b/cmd/litestream/testdata/wal/replica-name/Makefile new file mode 100644 index 00000000..5556bc8f --- /dev/null +++ b/cmd/litestream/testdata/wal/replica-name/Makefile @@ -0,0 +1,6 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001010000 replica0/generations/0000000000000000/wal/00000000/00000000.wal.lz4 + TZ=UTC touch -ct 200001020000 replica1/generations/0000000000000000/wal/00000000/00000001.wal.lz4 + TZ=UTC touch -ct 200001030000 replica1/generations/0000000000000000/wal/00000001/00000000.wal.lz4 + TZ=UTC touch -ct 200001040000 replica1/generations/0000000000000001/wal/00000000/00000000.wal.lz4 diff --git a/cmd/litestream/testdata/wal/replica-name/litestream.yml b/cmd/litestream/testdata/wal/replica-name/litestream.yml new file mode 100644 index 00000000..8511213a --- /dev/null +++ b/cmd/litestream/testdata/wal/replica-name/litestream.yml @@ -0,0 +1,7 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - name: replica0 + path: $LITESTREAM_TESTDIR/replica0 + - name: replica1 + path: $LITESTREAM_TESTDIR/replica1 diff --git a/cmd/litestream/testdata/wal/replica-name/replica0/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/cmd/litestream/testdata/wal/replica-name/replica0/generations/0000000000000000/wal/00000000/00000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/wal/replica-name/replica0/generations/0000000000000000/wal/00000000/00000001.wal.lz4 b/cmd/litestream/testdata/wal/replica-name/replica0/generations/0000000000000000/wal/00000000/00000001.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/wal/replica-name/replica0/generations/0000000000000000/wal/00000001/00000000.wal.lz4 b/cmd/litestream/testdata/wal/replica-name/replica0/generations/0000000000000000/wal/00000001/00000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/wal/replica-name/replica1/generations/0000000000000001/wal/00000000/00000000.wal.lz4 b/cmd/litestream/testdata/wal/replica-name/replica1/generations/0000000000000001/wal/00000000/00000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/wal/replica-name/stdout b/cmd/litestream/testdata/wal/replica-name/stdout new file mode 100644 index 00000000..2e9f9d94 --- /dev/null +++ b/cmd/litestream/testdata/wal/replica-name/stdout @@ -0,0 +1,2 @@ +replica generation index offset size created +replica1 0000000000000001 00000000 00000000 93 2000-01-04T00:00:00Z diff --git a/cmd/litestream/testdata/wal/replica-not-found/litestream.yml b/cmd/litestream/testdata/wal/replica-not-found/litestream.yml new file mode 100644 index 00000000..5d911bd6 --- /dev/null +++ b/cmd/litestream/testdata/wal/replica-not-found/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - url: s3://bkt/db diff --git a/cmd/litestream/testdata/wal/replica-url/Makefile b/cmd/litestream/testdata/wal/replica-url/Makefile new file mode 100644 index 00000000..2bb5a8e1 --- /dev/null +++ b/cmd/litestream/testdata/wal/replica-url/Makefile @@ -0,0 +1,7 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001010000 replica/generations/0000000000000000/wal/00000000/00000000.wal.lz4 + TZ=UTC touch -ct 200001020000 replica/generations/0000000000000000/wal/00000000/00000001.wal.lz4 + TZ=UTC touch -ct 200001030000 replica/generations/0000000000000000/wal/00000001/00000000.wal.lz4 + TZ=UTC touch -ct 200001040000 replica/generations/0000000000000001/wal/00000000/00000000.wal.lz4 + diff --git a/cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000000/wal/00000000/00000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000000/wal/00000000/00000001.wal.lz4 b/cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000000/wal/00000000/00000001.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000000/wal/00000001/00000000.wal.lz4 b/cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000000/wal/00000001/00000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000001/wal/00000000/00000000.wal.lz4 b/cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000001/wal/00000000/00000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/cmd/litestream/testdata/wal/replica-url/stdout b/cmd/litestream/testdata/wal/replica-url/stdout new file mode 100644 index 00000000..90a58f59 --- /dev/null +++ b/cmd/litestream/testdata/wal/replica-url/stdout @@ -0,0 +1,5 @@ +replica generation index offset size created +file 0000000000000001 00000000 00000000 93 2000-01-04T00:00:00Z +file 0000000000000000 00000001 00000000 93 2000-01-03T00:00:00Z +file 0000000000000000 00000000 00000001 93 2000-01-02T00:00:00Z +file 0000000000000000 00000000 00000000 93 2000-01-01T00:00:00Z diff --git a/cmd/litestream/version.go b/cmd/litestream/version.go index 46698610..ccfae6d9 100644 --- a/cmd/litestream/version.go +++ b/cmd/litestream/version.go @@ -4,10 +4,24 @@ import ( "context" "flag" "fmt" + "io" ) // VersionCommand represents a command to print the current version. -type VersionCommand struct{} +type VersionCommand struct { + stdin io.Reader + stdout io.Writer + stderr io.Writer +} + +// NewVersionCommand returns a new instance of VersionCommand. +func NewVersionCommand(stdin io.Reader, stdout, stderr io.Writer) *VersionCommand { + return &VersionCommand{ + stdin: stdin, + stdout: stdout, + stderr: stderr, + } +} // Run executes the command. func (c *VersionCommand) Run(ctx context.Context, args []string) (err error) { @@ -17,14 +31,14 @@ func (c *VersionCommand) Run(ctx context.Context, args []string) (err error) { return err } - fmt.Println(Version) + fmt.Fprintln(c.stdout, Version) return nil } // Usage prints the help screen to STDOUT. func (c *VersionCommand) Usage() { - fmt.Println(` + fmt.Fprintln(c.stdout, ` Prints the version. Usage: diff --git a/cmd/litestream/wal.go b/cmd/litestream/wal.go index d3cc6818..fa28107c 100644 --- a/cmd/litestream/wal.go +++ b/cmd/litestream/wal.go @@ -4,8 +4,9 @@ import ( "context" "flag" "fmt" + "io" "log" - "os" + "sort" "text/tabwriter" "time" @@ -14,82 +15,63 @@ import ( // WALCommand represents a command to list WAL files for a database. type WALCommand struct { + stdin io.Reader + stdout io.Writer + stderr io.Writer + configPath string noExpandEnv bool + + replicaName string + generation string +} + +// NewWALCommand returns a new instance of WALCommand. +func NewWALCommand(stdin io.Reader, stdout, stderr io.Writer) *WALCommand { + return &WALCommand{ + stdin: stdin, + stdout: stdout, + stderr: stderr, + } } // Run executes the command. -func (c *WALCommand) Run(ctx context.Context, args []string) (err error) { +func (c *WALCommand) Run(ctx context.Context, args []string) (ret error) { fs := flag.NewFlagSet("litestream-wal", flag.ContinueOnError) registerConfigFlag(fs, &c.configPath, &c.noExpandEnv) - replicaName := fs.String("replica", "", "replica name") - generation := fs.String("generation", "", "generation name") + fs.StringVar(&c.replicaName, "replica", "", "replica name") + fs.StringVar(&c.generation, "generation", "", "generation name") fs.Usage = c.Usage if err := fs.Parse(args); err != nil { return err } else if fs.NArg() == 0 || fs.Arg(0) == "" { - return fmt.Errorf("database path required") + return fmt.Errorf("database path or replica URL required") } else if fs.NArg() > 1 { return fmt.Errorf("too many arguments") } - var db *litestream.DB - var r *litestream.Replica - if isURL(fs.Arg(0)) { - if c.configPath != "" { - return fmt.Errorf("cannot specify a replica URL and the -config flag") - } - if r, err = NewReplicaFromConfig(&ReplicaConfig{URL: fs.Arg(0)}, nil); err != nil { - return err - } - } else { - if c.configPath == "" { - c.configPath = DefaultConfigPath() - } - - // Load configuration. - config, err := ReadConfigFile(c.configPath, !c.noExpandEnv) - if err != nil { - return err - } - - // Lookup database from configuration file by path. - if path, err := expand(fs.Arg(0)); err != nil { - return err - } else if dbc := config.DBConfig(path); dbc == nil { - return fmt.Errorf("database not found in config: %s", path) - } else if db, err = NewDBFromConfig(dbc); err != nil { - return err - } - - // Filter by replica, if specified. - if *replicaName != "" { - if r = db.Replica(*replicaName); r == nil { - return fmt.Errorf("replica %q not found for database %q", *replicaName, db.Path()) - } - } + // Load configuration. + config, err := ReadConfigFile(c.configPath, !c.noExpandEnv) + if err != nil { + return err } - // Find WAL files by db or replica. - var replicas []*litestream.Replica - if r != nil { - replicas = []*litestream.Replica{r} - } else { - replicas = db.Replicas + // Build list of replicas from CLI flags. + replicas, _, err := loadReplicas(ctx, config, fs.Arg(0), c.replicaName) + if err != nil { + return err } - // List all WAL files. - w := tabwriter.NewWriter(os.Stdout, 0, 8, 2, ' ', 0) - defer w.Flush() - - fmt.Fprintln(w, "replica\tgeneration\tindex\toffset\tsize\tcreated") + // Build list of WAL metadata with associated replica. + var infos []replicaWALSegmentInfo for _, r := range replicas { var generations []string - if *generation != "" { - generations = []string{*generation} + if c.generation != "" { + generations = []string{c.generation} } else { if generations, err = r.Client.Generations(ctx); err != nil { log.Printf("%s: cannot determine generations: %s", r.Name(), err) + ret = errExit // signal error return without printing message continue } } @@ -103,31 +85,45 @@ func (c *WALCommand) Run(ctx context.Context, args []string) (err error) { defer itr.Close() for itr.Next() { - info := itr.WALSegment() - - fmt.Fprintf(w, "%s\t%s\t%d\t%d\t%d\t%s\n", - r.Name(), - info.Generation, - info.Index, - info.Offset, - info.Size, - info.CreatedAt.Format(time.RFC3339), - ) + infos = append(infos, replicaWALSegmentInfo{ + WALSegmentInfo: itr.WALSegment(), + replicaName: r.Name(), + }) } return itr.Close() }(); err != nil { log.Printf("%s: cannot fetch wal segments: %s", r.Name(), err) + ret = errExit // signal error return without printing message continue } } } - return nil + // Sort WAL segments by creation time from newest to oldest. + sort.Slice(infos, func(i, j int) bool { return infos[i].CreatedAt.After(infos[j].CreatedAt) }) + + // List all WAL files. + w := tabwriter.NewWriter(c.stdout, 0, 8, 2, ' ', 0) + defer w.Flush() + + fmt.Fprintln(w, "replica\tgeneration\tindex\toffset\tsize\tcreated") + for _, info := range infos { + fmt.Fprintf(w, "%s\t%s\t%08x\t%08x\t%d\t%s\n", + info.replicaName, + info.Generation, + info.Index, + info.Offset, + info.Size, + info.CreatedAt.Format(time.RFC3339), + ) + } + + return ret } // Usage prints the help screen to STDOUT. func (c *WALCommand) Usage() { - fmt.Printf(` + fmt.Fprintf(c.stdout, ` The wal command lists all wal segments available for a database. Usage: @@ -166,3 +162,9 @@ Examples: DefaultConfigPath(), ) } + +// replicaWALSegmentInfo represents WAL segment metadata with associated replica name. +type replicaWALSegmentInfo struct { + litestream.WALSegmentInfo + replicaName string +} diff --git a/cmd/litestream/wal_test.go b/cmd/litestream/wal_test.go new file mode 100644 index 00000000..f313e013 --- /dev/null +++ b/cmd/litestream/wal_test.go @@ -0,0 +1,128 @@ +package main_test + +import ( + "context" + "flag" + "path/filepath" + "strings" + "testing" + + "github.com/benbjohnson/litestream/internal/testingutil" +) + +func TestWALCommand(t *testing.T) { + t.Run("OK", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal", "ok") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + + m, _, stdout, _ := newMain() + if err := m.Run(context.Background(), []string{"wal", "-config", filepath.Join(testDir, "litestream.yml"), filepath.Join(testDir, "db")}); err != nil { + t.Fatal(err) + } else if got, want := stdout.String(), string(testingutil.MustReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + t.Fatalf("stdout=%q, want %q", got, want) + } + }) + + t.Run("ReplicaName", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal", "replica-name") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + + m, _, stdout, _ := newMain() + if err := m.Run(context.Background(), []string{"wal", "-config", filepath.Join(testDir, "litestream.yml"), "-replica", "replica1", filepath.Join(testDir, "db")}); err != nil { + t.Fatal(err) + } else if got, want := stdout.String(), string(testingutil.MustReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + t.Fatalf("stdout=%q, want %q", got, want) + } + }) + + t.Run("ReplicaURL", func(t *testing.T) { + testDir := filepath.Join(testingutil.Getwd(t), "testdata", "wal", "replica-url") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + replicaURL := "file://" + filepath.ToSlash(testDir) + "/replica" + + m, _, stdout, _ := newMain() + if err := m.Run(context.Background(), []string{"wal", replicaURL}); err != nil { + t.Fatal(err) + } else if got, want := stdout.String(), string(testingutil.MustReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + t.Fatalf("stdout=%q, want %q", got, want) + } + }) + + t.Run("ErrDatabaseOrReplicaRequired", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"wal"}) + if err == nil || err.Error() != `database path or replica URL required` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrTooManyArguments", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"wal", "abc", "123"}) + if err == nil || err.Error() != `too many arguments` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrInvalidFlags", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"wal", "-no-such-flag"}) + if err == nil || err.Error() != `flag provided but not defined: -no-such-flag` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrConfigFileNotFound", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"wal", "-config", "/no/such/file", "/var/lib/db"}) + if err == nil || err.Error() != `config file not found: /no/such/file` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrInvalidConfig", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal", "invalid-config") + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"wal", "-config", filepath.Join(testDir, "litestream.yml"), "/var/lib/db"}) + if err == nil || !strings.Contains(err.Error(), `replica path cannot be a url`) { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrDatabaseNotFound", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal", "database-not-found") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"wal", "-config", filepath.Join(testDir, "litestream.yml"), "/no/such/db"}) + if err == nil || err.Error() != `database not found in config: /no/such/db` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrReplicaNotFound", func(t *testing.T) { + testDir := filepath.Join(testingutil.Getwd(t), "testdata", "wal", "replica-not-found") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"wal", "-config", filepath.Join(testDir, "litestream.yml"), "-replica", "no_such_replica", filepath.Join(testDir, "db")}) + if err == nil || err.Error() != `replica "no_such_replica" not found for database "`+filepath.Join(testDir, "db")+`"` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrInvalidReplicaURL", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"wal", "xyz://xyz"}) + if err == nil || !strings.Contains(err.Error(), `unknown replica type in config: "xyz"`) { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("Usage", func(t *testing.T) { + m, _, _, _ := newMain() + if err := m.Run(context.Background(), []string{"wal", "-h"}); err != flag.ErrHelp { + t.Fatalf("unexpected error: %s", err) + } + }) +} diff --git a/internal/internal.go b/internal/internal.go index f8e5c60b..36598d4a 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -7,6 +7,7 @@ import ( "regexp" "strconv" "syscall" + "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -190,3 +191,30 @@ var ( Help: "The number of bytes used by replica operations", }, []string{"replica_type", "operation"}) ) + +// TruncateDuration truncates d to the nearest major unit (s, ms, µs, ns). +func TruncateDuration(d time.Duration) time.Duration { + if d < 0 { + if d < -10*time.Second { + return d.Truncate(time.Second) + } else if d < -time.Second { + return d.Truncate(time.Second / 10) + } else if d < -time.Millisecond { + return d.Truncate(time.Millisecond) + } else if d < -time.Microsecond { + return d.Truncate(time.Microsecond) + } + return d + } + + if d > 10*time.Second { + return d.Truncate(time.Second) + } else if d > time.Second { + return d.Truncate(time.Second / 10) + } else if d > time.Millisecond { + return d.Truncate(time.Millisecond) + } else if d > time.Microsecond { + return d.Truncate(time.Microsecond) + } + return d +} diff --git a/internal/internal_test.go b/internal/internal_test.go index a8eda5d5..5b661dea 100644 --- a/internal/internal_test.go +++ b/internal/internal_test.go @@ -4,6 +4,7 @@ import ( "fmt" "reflect" "testing" + "time" "github.com/benbjohnson/litestream/internal" ) @@ -59,3 +60,41 @@ func TestParseWALSegmentPath(t *testing.T) { }) } } + +func TestTruncateDuration(t *testing.T) { + for _, tt := range []struct { + input, output time.Duration + }{ + {0, 0 * time.Nanosecond}, + + {1, 1 * time.Nanosecond}, + {12, 12 * time.Nanosecond}, + {123, 123 * time.Nanosecond}, + {1234, 1 * time.Microsecond}, + {12345, 12 * time.Microsecond}, + {123456, 123 * time.Microsecond}, + {1234567, 1 * time.Millisecond}, + {12345678, 12 * time.Millisecond}, + {123456789, 123 * time.Millisecond}, + {1234567890, 1200 * time.Millisecond}, + {12345678900, 12 * time.Second}, + + {-1, -1 * time.Nanosecond}, + {-12, -12 * time.Nanosecond}, + {-123, -123 * time.Nanosecond}, + {-1234, -1 * time.Microsecond}, + {-12345, -12 * time.Microsecond}, + {-123456, -123 * time.Microsecond}, + {-1234567, -1 * time.Millisecond}, + {-12345678, -12 * time.Millisecond}, + {-123456789, -123 * time.Millisecond}, + {-1234567890, -1200 * time.Millisecond}, + {-12345678900, -12 * time.Second}, + } { + t.Run(fmt.Sprint(int(tt.input)), func(t *testing.T) { + if got, want := internal.TruncateDuration(tt.input), tt.output; got != want { + t.Fatalf("duration=%s, want %s", got, want) + } + }) + } +} diff --git a/internal/testingutil/testingutil.go b/internal/testingutil/testingutil.go new file mode 100644 index 00000000..99bc47f4 --- /dev/null +++ b/internal/testingutil/testingutil.go @@ -0,0 +1,43 @@ +package testingutil + +import ( + "os" + "testing" +) + +// MustReadFile reads all data from filename. Fail on error. +func MustReadFile(tb testing.TB, filename string) []byte { + tb.Helper() + b, err := os.ReadFile(filename) + if err != nil { + tb.Fatal(err) + } + return b +} + +// Getpwd returns the working directory. Fail on error. +func Getwd(tb testing.TB) string { + tb.Helper() + + dir, err := os.Getwd() + if err != nil { + tb.Fatal(err) + } + return dir +} + +// Setenv sets the environment variable key to value. The returned function reverts it. +func Setenv(tb testing.TB, key, value string) func() { + tb.Helper() + + prevValue := os.Getenv(key) + if err := os.Setenv(key, value); err != nil { + tb.Fatal(err) + } + + return func() { + if err := os.Setenv(key, prevValue); err != nil { + tb.Fatal(tb) + } + } +} diff --git a/testdata/find-latest-generation/ok/Makefile b/testdata/find-latest-generation/ok/Makefile index 847b844e..c71ce141 100644 --- a/testdata/find-latest-generation/ok/Makefile +++ b/testdata/find-latest-generation/ok/Makefile @@ -1,7 +1,7 @@ .PHONY: default default: - TZ=UTC touch -t 200001010000 generations/0000000000000000/snapshots/00000000.snapshot.lz4 - TZ=UTC touch -t 200001020000 generations/0000000000000000/snapshots/00000001.snapshot.lz4 - TZ=UTC touch -t 200001010000 generations/0000000000000001/snapshots/00000000.snapshot.lz4 - TZ=UTC touch -t 200001030000 generations/0000000000000001/snapshots/00000001.snapshot.lz4 - TZ=UTC touch -t 200001010000 generations/0000000000000002/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -ct 200001010000 generations/0000000000000000/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 generations/0000000000000000/snapshots/00000001.snapshot.lz4 + TZ=UTC touch -ct 200001010000 generations/0000000000000001/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -ct 200001030000 generations/0000000000000001/snapshots/00000001.snapshot.lz4 + TZ=UTC touch -ct 200001010000 generations/0000000000000002/snapshots/00000000.snapshot.lz4 diff --git a/testdata/generation-time-bounds/ok/Makefile b/testdata/generation-time-bounds/ok/Makefile index 06d50442..e29f9e4e 100644 --- a/testdata/generation-time-bounds/ok/Makefile +++ b/testdata/generation-time-bounds/ok/Makefile @@ -1,8 +1,8 @@ .PHONY: default default: - TZ=UTC touch -t 200001010000 generations/0000000000000000/snapshots/00000000.snapshot.lz4 - TZ=UTC touch -t 200001020000 generations/0000000000000000/snapshots/00000001.snapshot.lz4 - TZ=UTC touch -t 200001010000 generations/0000000000000000/wal/00000000/00000000.wal.lz4 - TZ=UTC touch -t 200001020000 generations/0000000000000000/wal/00000000/00000001.wal.lz4 - TZ=UTC touch -t 200001030000 generations/0000000000000000/wal/00000001/00000000.wal.lz4 + TZ=UTC touch -ct 200001010000 generations/0000000000000000/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 generations/0000000000000000/snapshots/00000001.snapshot.lz4 + TZ=UTC touch -ct 200001010000 generations/0000000000000000/wal/00000000/00000000.wal.lz4 + TZ=UTC touch -ct 200001020000 generations/0000000000000000/wal/00000000/00000001.wal.lz4 + TZ=UTC touch -ct 200001030000 generations/0000000000000000/wal/00000001/00000000.wal.lz4 diff --git a/testdata/generation-time-bounds/snapshots-only/Makefile b/testdata/generation-time-bounds/snapshots-only/Makefile index 18b382a8..6405068a 100644 --- a/testdata/generation-time-bounds/snapshots-only/Makefile +++ b/testdata/generation-time-bounds/snapshots-only/Makefile @@ -1,5 +1,5 @@ .PHONY: default default: - TZ=UTC touch -t 200001010000 generations/0000000000000000/snapshots/00000000.snapshot.lz4 - TZ=UTC touch -t 200001020000 generations/0000000000000000/snapshots/00000001.snapshot.lz4 + TZ=UTC touch -ct 200001010000 generations/0000000000000000/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 generations/0000000000000000/snapshots/00000001.snapshot.lz4 diff --git a/testdata/max-snapshot-index/ok/Makefile b/testdata/max-snapshot-index/ok/Makefile index 3d808b7d..d7b4d6c7 100644 --- a/testdata/max-snapshot-index/ok/Makefile +++ b/testdata/max-snapshot-index/ok/Makefile @@ -1,6 +1,6 @@ .PHONY: default default: - TZ=UTC touch -t 200001020000 generations/0000000000000000/snapshots/00000000.snapshot.lz4 - TZ=UTC touch -t 200001010000 generations/0000000000000001/snapshots/00000000.snapshot.lz4 - TZ=UTC touch -t 200001030000 generations/0000000000000001/snapshots/00000001.snapshot.lz4 - TZ=UTC touch -t 200001010000 generations/0000000000000002/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 generations/0000000000000000/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -ct 200001010000 generations/0000000000000001/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -ct 200001030000 generations/0000000000000001/snapshots/00000001.snapshot.lz4 + TZ=UTC touch -ct 200001010000 generations/0000000000000002/snapshots/00000000.snapshot.lz4 diff --git a/testdata/replica-client-time-bounds/ok/Makefile b/testdata/replica-client-time-bounds/ok/Makefile index 3d808b7d..d7b4d6c7 100644 --- a/testdata/replica-client-time-bounds/ok/Makefile +++ b/testdata/replica-client-time-bounds/ok/Makefile @@ -1,6 +1,6 @@ .PHONY: default default: - TZ=UTC touch -t 200001020000 generations/0000000000000000/snapshots/00000000.snapshot.lz4 - TZ=UTC touch -t 200001010000 generations/0000000000000001/snapshots/00000000.snapshot.lz4 - TZ=UTC touch -t 200001030000 generations/0000000000000001/snapshots/00000001.snapshot.lz4 - TZ=UTC touch -t 200001010000 generations/0000000000000002/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 generations/0000000000000000/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -ct 200001010000 generations/0000000000000001/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -ct 200001030000 generations/0000000000000001/snapshots/00000001.snapshot.lz4 + TZ=UTC touch -ct 200001010000 generations/0000000000000002/snapshots/00000000.snapshot.lz4 diff --git a/testdata/snapshot-time-bounds/ok/Makefile b/testdata/snapshot-time-bounds/ok/Makefile index 0a3ea137..6c7e69a0 100644 --- a/testdata/snapshot-time-bounds/ok/Makefile +++ b/testdata/snapshot-time-bounds/ok/Makefile @@ -1,6 +1,6 @@ .PHONY: default default: - TZ=UTC touch -t 200001010000 generations/0000000000000000/snapshots/00000000.snapshot.lz4 - TZ=UTC touch -t 200001020000 generations/0000000000000000/snapshots/00000001.snapshot.lz4 - TZ=UTC touch -t 200001030000 generations/0000000000000000/snapshots/00000002.snapshot.lz4 + TZ=UTC touch -ct 200001010000 generations/0000000000000000/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 generations/0000000000000000/snapshots/00000001.snapshot.lz4 + TZ=UTC touch -ct 200001030000 generations/0000000000000000/snapshots/00000002.snapshot.lz4 diff --git a/testdata/wal-time-bounds/ok/Makefile b/testdata/wal-time-bounds/ok/Makefile index 875381c9..fa7ab332 100644 --- a/testdata/wal-time-bounds/ok/Makefile +++ b/testdata/wal-time-bounds/ok/Makefile @@ -1,6 +1,6 @@ .PHONY: default default: - TZ=UTC touch -t 200001010000 generations/0000000000000000/wal/00000000/00000000.wal.lz4 - TZ=UTC touch -t 200001020000 generations/0000000000000000/wal/00000000/00000001.wal.lz4 - TZ=UTC touch -t 200001030000 generations/0000000000000000/wal/00000001/00000000.wal.lz4 + TZ=UTC touch -ct 200001010000 generations/0000000000000000/wal/00000000/00000000.wal.lz4 + TZ=UTC touch -ct 200001020000 generations/0000000000000000/wal/00000000/00000001.wal.lz4 + TZ=UTC touch -ct 200001030000 generations/0000000000000000/wal/00000001/00000000.wal.lz4 From 84d08f547a282f16145657c6af994d346c7d5ed3 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Fri, 14 Jan 2022 15:31:04 -0700 Subject: [PATCH 13/95] Add end-to-end replication/restore testing --- .github/workflows/test.yml | 20 +- cmd/litestream/databases_test.go | 4 +- cmd/litestream/generations.go | 4 +- cmd/litestream/generations_test.go | 8 +- cmd/litestream/main.go | 58 +-- cmd/litestream/main_test.go | 10 +- cmd/litestream/main_windows.go | 2 +- cmd/litestream/replicate.go | 4 +- cmd/litestream/restore.go | 8 +- cmd/litestream/restore_test.go | 8 +- cmd/litestream/snapshots_test.go | 6 +- cmd/litestream/wal.go | 4 +- cmd/litestream/wal_test.go | 6 +- db.go | 25 +- db_test.go | 10 +- integration/cmd_test.go | 411 ++++++++++++++++++ .../replicate/high-load/litestream.yml | 7 + .../replicate/long-running/litestream.yml | 4 + .../testdata/replicate/ok/litestream.yml | 7 + .../litestream.yml | 4 + .../resume-with-new-generation/litestream.yml | 4 + .../testdata/replicate/resume/litestream.yml | 7 + internal/locking_buffer.go | 145 ++++++ internal/testingutil/testingutil.go | 25 +- litestream.go | 10 +- replica.go | 52 ++- replica_test.go | 11 +- 27 files changed, 751 insertions(+), 113 deletions(-) create mode 100644 integration/cmd_test.go create mode 100644 integration/testdata/replicate/high-load/litestream.yml create mode 100644 integration/testdata/replicate/long-running/litestream.yml create mode 100644 integration/testdata/replicate/ok/litestream.yml create mode 100644 integration/testdata/replicate/resume-with-current-generation/litestream.yml create mode 100644 integration/testdata/replicate/resume-with-new-generation/litestream.yml create mode 100644 integration/testdata/replicate/resume/litestream.yml create mode 100644 internal/locking_buffer.go diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index aabaa6ea..cd533d35 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -29,6 +29,9 @@ jobs: env: LITESTREAM_SFTP_KEY: ${{secrets.LITESTREAM_SFTP_KEY}} + - name: Build binary + run: go install ./cmd/litestream + - name: Run unit tests run: make testdata && go test -v ./... @@ -53,10 +56,13 @@ jobs: LITESTREAM_ABS_ACCOUNT_KEY: ${{ secrets.LITESTREAM_ABS_ACCOUNT_KEY }} LITESTREAM_ABS_BUCKET: integration - - name: Run sftp tests - run: go test -v -run=TestReplicaClient ./integration -replica-type sftp - env: - LITESTREAM_SFTP_HOST: ${{ secrets.LITESTREAM_SFTP_HOST }} - LITESTREAM_SFTP_USER: ${{ secrets.LITESTREAM_SFTP_USER }} - LITESTREAM_SFTP_KEY_PATH: /opt/id_ed25519 - LITESTREAM_SFTP_PATH: ${{ secrets.LITESTREAM_SFTP_PATH }} + #- name: Run sftp tests + # run: go test -v -run=TestReplicaClient ./integration -replica-type sftp + # env: + # LITESTREAM_SFTP_HOST: ${{ secrets.LITESTREAM_SFTP_HOST }} + # LITESTREAM_SFTP_USER: ${{ secrets.LITESTREAM_SFTP_USER }} + # LITESTREAM_SFTP_KEY_PATH: /opt/id_ed25519 + # LITESTREAM_SFTP_PATH: ${{ secrets.LITESTREAM_SFTP_PATH }} + + - name: Run long-running test + run: go test -v ./integration -long-running-duration 1m diff --git a/cmd/litestream/databases_test.go b/cmd/litestream/databases_test.go index 9499dc67..25aef5ed 100644 --- a/cmd/litestream/databases_test.go +++ b/cmd/litestream/databases_test.go @@ -16,7 +16,7 @@ func TestDatabasesCommand(t *testing.T) { m, _, stdout, _ := newMain() if err := m.Run(context.Background(), []string{"databases", "-config", filepath.Join(testDir, "litestream.yml")}); err != nil { t.Fatal(err) - } else if got, want := stdout.String(), string(testingutil.MustReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + } else if got, want := stdout.String(), string(testingutil.ReadFile(t, filepath.Join(testDir, "stdout"))); got != want { t.Fatalf("stdout=%q, want %q", got, want) } }) @@ -26,7 +26,7 @@ func TestDatabasesCommand(t *testing.T) { m, _, stdout, _ := newMain() if err := m.Run(context.Background(), []string{"databases", "-config", filepath.Join(testDir, "litestream.yml")}); err != nil { t.Fatal(err) - } else if got, want := stdout.String(), string(testingutil.MustReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + } else if got, want := stdout.String(), string(testingutil.ReadFile(t, filepath.Join(testDir, "stdout"))); got != want { t.Fatalf("stdout=%q, want %q", got, want) } }) diff --git a/cmd/litestream/generations.go b/cmd/litestream/generations.go index da740998..5d237a2e 100644 --- a/cmd/litestream/generations.go +++ b/cmd/litestream/generations.go @@ -74,7 +74,7 @@ func (c *GenerationsCommand) Run(ctx context.Context, args []string) (ret error) fmt.Fprintln(w, "name\tgeneration\tlag\tstart\tend") for _, r := range replicas { - generations, err := r.Client.Generations(ctx) + generations, err := r.Client().Generations(ctx) if err != nil { fmt.Fprintf(c.stderr, "%s: cannot list generations: %s", r.Name(), err) ret = errExit // signal error return without printing message @@ -83,7 +83,7 @@ func (c *GenerationsCommand) Run(ctx context.Context, args []string) (ret error) // Iterate over each generation for the replica. for _, generation := range generations { - createdAt, updatedAt, err := litestream.GenerationTimeBounds(ctx, r.Client, generation) + createdAt, updatedAt, err := litestream.GenerationTimeBounds(ctx, r.Client(), generation) if err != nil { fmt.Fprintf(c.stderr, "%s: cannot determine generation time bounds: %s", r.Name(), err) ret = errExit // signal error return without printing message diff --git a/cmd/litestream/generations_test.go b/cmd/litestream/generations_test.go index 097bd35b..1da23e43 100644 --- a/cmd/litestream/generations_test.go +++ b/cmd/litestream/generations_test.go @@ -18,7 +18,7 @@ func TestGenerationsCommand(t *testing.T) { m, _, stdout, _ := newMain() if err := m.Run(context.Background(), []string{"generations", "-config", filepath.Join(testDir, "litestream.yml"), filepath.Join(testDir, "db")}); err != nil { t.Fatal(err) - } else if got, want := stdout.String(), string(testingutil.MustReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + } else if got, want := stdout.String(), string(testingutil.ReadFile(t, filepath.Join(testDir, "stdout"))); got != want { t.Fatalf("stdout=%q, want %q", got, want) } }) @@ -30,7 +30,7 @@ func TestGenerationsCommand(t *testing.T) { m, _, stdout, _ := newMain() if err := m.Run(context.Background(), []string{"generations", "-config", filepath.Join(testDir, "litestream.yml"), "-replica", "replica1", filepath.Join(testDir, "db")}); err != nil { t.Fatal(err) - } else if got, want := stdout.String(), string(testingutil.MustReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + } else if got, want := stdout.String(), string(testingutil.ReadFile(t, filepath.Join(testDir, "stdout"))); got != want { t.Fatalf("stdout=%q, want %q", got, want) } }) @@ -43,7 +43,7 @@ func TestGenerationsCommand(t *testing.T) { m, _, stdout, _ := newMain() if err := m.Run(context.Background(), []string{"generations", replicaURL}); err != nil { t.Fatal(err) - } else if got, want := stdout.String(), string(testingutil.MustReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + } else if got, want := stdout.String(), string(testingutil.ReadFile(t, filepath.Join(testDir, "stdout"))); got != want { t.Fatalf("stdout=%q, want %q", got, want) } }) @@ -55,7 +55,7 @@ func TestGenerationsCommand(t *testing.T) { m, _, stdout, _ := newMain() if err := m.Run(context.Background(), []string{"generations", "-config", filepath.Join(testDir, "litestream.yml"), filepath.Join(testDir, "db")}); err != nil { t.Fatal(err) - } else if got, want := stdout.String(), string(testingutil.MustReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + } else if got, want := stdout.String(), string(testingutil.ReadFile(t, filepath.Join(testDir, "stdout"))); got != want { t.Fatalf("stdout=%q, want %q", got, want) } }) diff --git a/cmd/litestream/main.go b/cmd/litestream/main.go index 176ec991..db491c40 100644 --- a/cmd/litestream/main.go +++ b/cmd/litestream/main.go @@ -38,6 +38,7 @@ var errExit = errors.New("exit") func main() { log.SetFlags(0) + log.SetOutput(os.Stdout) m := NewMain(os.Stdin, os.Stdout, os.Stderr) if err := m.Run(context.Background(), os.Args[1:]); err == flag.ErrHelp || err == errExit { @@ -354,55 +355,56 @@ func NewReplicaFromConfig(c *ReplicaConfig, db *litestream.DB) (_ *litestream.Re return nil, fmt.Errorf("replica path cannot be a url, please use the 'url' field instead: %s", c.Path) } - // Build replica. - r := litestream.NewReplica(db, c.Name) - if v := c.Retention; v != nil { - r.Retention = *v - } - if v := c.RetentionCheckInterval; v != nil { - r.RetentionCheckInterval = *v - } - if v := c.SyncInterval; v != nil { - r.SyncInterval = *v - } - if v := c.SnapshotInterval; v != nil { - r.SnapshotInterval = *v - } - if v := c.ValidationInterval; v != nil { - r.ValidationInterval = *v - } - // Build and set client on replica. + var client litestream.ReplicaClient switch typ := c.ReplicaType(); typ { case "file": - if r.Client, err = newFileReplicaClientFromConfig(c, r); err != nil { + if client, err = newFileReplicaClientFromConfig(c); err != nil { return nil, err } case "s3": - if r.Client, err = newS3ReplicaClientFromConfig(c, r); err != nil { + if client, err = newS3ReplicaClientFromConfig(c); err != nil { return nil, err } case "gcs": - if r.Client, err = newGCSReplicaClientFromConfig(c, r); err != nil { + if client, err = newGCSReplicaClientFromConfig(c); err != nil { return nil, err } case "abs": - if r.Client, err = newABSReplicaClientFromConfig(c, r); err != nil { + if client, err = newABSReplicaClientFromConfig(c); err != nil { return nil, err } case "sftp": - if r.Client, err = newSFTPReplicaClientFromConfig(c, r); err != nil { + if client, err = newSFTPReplicaClientFromConfig(c); err != nil { return nil, err } default: return nil, fmt.Errorf("unknown replica type in config: %q", typ) } + // Build replica. + r := litestream.NewReplica(db, c.Name, client) + if v := c.Retention; v != nil { + r.Retention = *v + } + if v := c.RetentionCheckInterval; v != nil { + r.RetentionCheckInterval = *v + } + if v := c.SyncInterval; v != nil { + r.SyncInterval = *v + } + if v := c.SnapshotInterval; v != nil { + r.SnapshotInterval = *v + } + if v := c.ValidationInterval; v != nil { + r.ValidationInterval = *v + } + return r, nil } // newFileReplicaClientFromConfig returns a new instance of FileReplicaClient built from config. -func newFileReplicaClientFromConfig(c *ReplicaConfig, r *litestream.Replica) (_ *litestream.FileReplicaClient, err error) { +func newFileReplicaClientFromConfig(c *ReplicaConfig) (_ *litestream.FileReplicaClient, err error) { // Ensure URL & path are not both specified. if c.URL != "" && c.Path != "" { return nil, fmt.Errorf("cannot specify url & path for file replica") @@ -431,7 +433,7 @@ func newFileReplicaClientFromConfig(c *ReplicaConfig, r *litestream.Replica) (_ } // newS3ReplicaClientFromConfig returns a new instance of s3.ReplicaClient built from config. -func newS3ReplicaClientFromConfig(c *ReplicaConfig, r *litestream.Replica) (_ *s3.ReplicaClient, err error) { +func newS3ReplicaClientFromConfig(c *ReplicaConfig) (_ *s3.ReplicaClient, err error) { // Ensure URL & constituent parts are not both specified. if c.URL != "" && c.Path != "" { return nil, fmt.Errorf("cannot specify url & path for s3 replica") @@ -494,7 +496,7 @@ func newS3ReplicaClientFromConfig(c *ReplicaConfig, r *litestream.Replica) (_ *s } // newGCSReplicaClientFromConfig returns a new instance of gcs.ReplicaClient built from config. -func newGCSReplicaClientFromConfig(c *ReplicaConfig, r *litestream.Replica) (_ *gcs.ReplicaClient, err error) { +func newGCSReplicaClientFromConfig(c *ReplicaConfig) (_ *gcs.ReplicaClient, err error) { // Ensure URL & constituent parts are not both specified. if c.URL != "" && c.Path != "" { return nil, fmt.Errorf("cannot specify url & path for gcs replica") @@ -533,7 +535,7 @@ func newGCSReplicaClientFromConfig(c *ReplicaConfig, r *litestream.Replica) (_ * } // newABSReplicaClientFromConfig returns a new instance of abs.ReplicaClient built from config. -func newABSReplicaClientFromConfig(c *ReplicaConfig, r *litestream.Replica) (_ *abs.ReplicaClient, err error) { +func newABSReplicaClientFromConfig(c *ReplicaConfig) (_ *abs.ReplicaClient, err error) { // Ensure URL & constituent parts are not both specified. if c.URL != "" && c.Path != "" { return nil, fmt.Errorf("cannot specify url & path for abs replica") @@ -576,7 +578,7 @@ func newABSReplicaClientFromConfig(c *ReplicaConfig, r *litestream.Replica) (_ * } // newSFTPReplicaClientFromConfig returns a new instance of sftp.ReplicaClient built from config. -func newSFTPReplicaClientFromConfig(c *ReplicaConfig, r *litestream.Replica) (_ *sftp.ReplicaClient, err error) { +func newSFTPReplicaClientFromConfig(c *ReplicaConfig) (_ *sftp.ReplicaClient, err error) { // Ensure URL & constituent parts are not both specified. if c.URL != "" && c.Path != "" { return nil, fmt.Errorf("cannot specify url & path for sftp replica") diff --git a/cmd/litestream/main_test.go b/cmd/litestream/main_test.go index f3e9fb1b..d3d0af8c 100644 --- a/cmd/litestream/main_test.go +++ b/cmd/litestream/main_test.go @@ -104,7 +104,7 @@ func TestNewFileReplicaFromConfig(t *testing.T) { r, err := main.NewReplicaFromConfig(&main.ReplicaConfig{Path: "/foo"}, nil) if err != nil { t.Fatal(err) - } else if client, ok := r.Client.(*litestream.FileReplicaClient); !ok { + } else if client, ok := r.Client().(*litestream.FileReplicaClient); !ok { t.Fatal("unexpected replica type") } else if got, want := client.Path(), "/foo"; got != want { t.Fatalf("Path=%s, want %s", got, want) @@ -116,7 +116,7 @@ func TestNewS3ReplicaFromConfig(t *testing.T) { r, err := main.NewReplicaFromConfig(&main.ReplicaConfig{URL: "s3://foo/bar"}, nil) if err != nil { t.Fatal(err) - } else if client, ok := r.Client.(*s3.ReplicaClient); !ok { + } else if client, ok := r.Client().(*s3.ReplicaClient); !ok { t.Fatal("unexpected replica type") } else if got, want := client.Bucket, "foo"; got != want { t.Fatalf("Bucket=%s, want %s", got, want) @@ -135,7 +135,7 @@ func TestNewS3ReplicaFromConfig(t *testing.T) { r, err := main.NewReplicaFromConfig(&main.ReplicaConfig{URL: "s3://foo.localhost:9000/bar"}, nil) if err != nil { t.Fatal(err) - } else if client, ok := r.Client.(*s3.ReplicaClient); !ok { + } else if client, ok := r.Client().(*s3.ReplicaClient); !ok { t.Fatal("unexpected replica type") } else if got, want := client.Bucket, "foo"; got != want { t.Fatalf("Bucket=%s, want %s", got, want) @@ -154,7 +154,7 @@ func TestNewS3ReplicaFromConfig(t *testing.T) { r, err := main.NewReplicaFromConfig(&main.ReplicaConfig{URL: "s3://foo.s3.us-west-000.backblazeb2.com/bar"}, nil) if err != nil { t.Fatal(err) - } else if client, ok := r.Client.(*s3.ReplicaClient); !ok { + } else if client, ok := r.Client().(*s3.ReplicaClient); !ok { t.Fatal("unexpected replica type") } else if got, want := client.Bucket, "foo"; got != want { t.Fatalf("Bucket=%s, want %s", got, want) @@ -174,7 +174,7 @@ func TestNewGCSReplicaFromConfig(t *testing.T) { r, err := main.NewReplicaFromConfig(&main.ReplicaConfig{URL: "gcs://foo/bar"}, nil) if err != nil { t.Fatal(err) - } else if client, ok := r.Client.(*gcs.ReplicaClient); !ok { + } else if client, ok := r.Client().(*gcs.ReplicaClient); !ok { t.Fatal("unexpected replica type") } else if got, want := client.Bucket, "foo"; got != want { t.Fatalf("Bucket=%s, want %s", got, want) diff --git a/cmd/litestream/main_windows.go b/cmd/litestream/main_windows.go index d437c1bc..e6276eb4 100644 --- a/cmd/litestream/main_windows.go +++ b/cmd/litestream/main_windows.go @@ -37,7 +37,7 @@ func runWindowsService(ctx context.Context) error { // Set eventlog as log writer while running. log.SetOutput((*eventlogWriter)(elog)) - defer log.SetOutput(os.Stderr) + defer log.SetOutput(os.Stdout) log.Print("Litestream service starting") diff --git a/cmd/litestream/replicate.go b/cmd/litestream/replicate.go index 5d32db74..7f9a9aa4 100644 --- a/cmd/litestream/replicate.go +++ b/cmd/litestream/replicate.go @@ -121,7 +121,7 @@ func (c *ReplicateCommand) Run(ctx context.Context) (err error) { for _, db := range c.DBs { log.Printf("initialized db: %s", db.Path()) for _, r := range db.Replicas { - switch client := r.Client.(type) { + switch client := r.Client().(type) { case *litestream.FileReplicaClient: log.Printf("replicating to: name=%q type=%q path=%q", r.Name(), client.Type(), client.Path()) case *s3.ReplicaClient: @@ -173,6 +173,8 @@ func (c *ReplicateCommand) Run(ctx context.Context) (err error) { go func() { c.execCh <- c.cmd.Wait() }() } + log.Printf("litestream initialization complete") + return nil } diff --git a/cmd/litestream/restore.go b/cmd/litestream/restore.go index 1a0f5fd2..d2a5d1d0 100644 --- a/cmd/litestream/restore.go +++ b/cmd/litestream/restore.go @@ -104,7 +104,7 @@ func (c *RestoreCommand) Run(ctx context.Context, args []string) (err error) { // Determine latest generation if one is not specified. if c.generation == "" { - if c.generation, err = litestream.FindLatestGeneration(ctx, r.Client); err == litestream.ErrNoGeneration { + if c.generation, err = litestream.FindLatestGeneration(ctx, r.Client()); err == litestream.ErrNoGeneration { // Return an error if no matching targets found. // If optional flag set, return success. Useful for automated recovery. if c.ifReplicaExists { @@ -119,14 +119,14 @@ func (c *RestoreCommand) Run(ctx context.Context, args []string) (err error) { // Determine the maximum available index for the generation if one is not specified. if c.targetIndex == -1 { - if c.targetIndex, err = litestream.FindMaxIndexByGeneration(ctx, r.Client, c.generation); err != nil { + if c.targetIndex, err = litestream.FindMaxIndexByGeneration(ctx, r.Client(), c.generation); err != nil { return fmt.Errorf("cannot determine latest index in generation %q: %w", c.generation, err) } } // Find lastest snapshot that occurs before the index. // TODO: Optionally allow -snapshot-index - if c.snapshotIndex, err = litestream.FindSnapshotForIndex(ctx, r.Client, c.generation, c.targetIndex); err != nil { + if c.snapshotIndex, err = litestream.FindSnapshotForIndex(ctx, r.Client(), c.generation, c.targetIndex); err != nil { return fmt.Errorf("cannot find snapshot index: %w", err) } @@ -137,7 +137,7 @@ func (c *RestoreCommand) Run(ctx context.Context, args []string) (err error) { c.opt.Logger = log.New(c.stdout, "", log.LstdFlags|log.Lmicroseconds) - return litestream.Restore(ctx, r.Client, c.outputPath, c.generation, c.snapshotIndex, c.targetIndex, c.opt) + return litestream.Restore(ctx, r.Client(), c.outputPath, c.generation, c.snapshotIndex, c.targetIndex, c.opt) } func (c *RestoreCommand) loadReplica(ctx context.Context, config Config, arg string) (*litestream.Replica, error) { diff --git a/cmd/litestream/restore_test.go b/cmd/litestream/restore_test.go index 4d0770cf..9469b5a1 100644 --- a/cmd/litestream/restore_test.go +++ b/cmd/litestream/restore_test.go @@ -120,7 +120,7 @@ func TestRestoreCommand(t *testing.T) { err := m.Run(context.Background(), []string{"restore", "-config", filepath.Join(testDir, "litestream.yml"), "-if-db-not-exists", filepath.Join(testDir, "db")}) if err != nil { t.Fatal(err) - } else if got, want := stdout.String(), string(testingutil.MustReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + } else if got, want := stdout.String(), string(testingutil.ReadFile(t, filepath.Join(testDir, "stdout"))); got != want { t.Fatalf("stdout=%q, want %q", got, want) } }) @@ -133,7 +133,7 @@ func TestRestoreCommand(t *testing.T) { err := m.Run(context.Background(), []string{"restore", "-config", filepath.Join(testDir, "litestream.yml"), "-if-replica-exists", filepath.Join(testDir, "db")}) if err != nil { t.Fatal(err) - } else if got, want := stdout.String(), string(testingutil.MustReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + } else if got, want := stdout.String(), string(testingutil.ReadFile(t, filepath.Join(testDir, "stdout"))); got != want { t.Fatalf("stdout=%q, want %q", got, want) } }) @@ -147,9 +147,9 @@ func TestRestoreCommand(t *testing.T) { err := m.Run(context.Background(), []string{"restore", "-config", filepath.Join(testDir, "litestream.yml"), "-o", filepath.Join(tempDir, "db"), filepath.Join(testDir, "db")}) if err == nil || err.Error() != `no matching backups found` { t.Fatalf("unexpected error: %s", err) - } else if got, want := stdout.String(), string(testingutil.MustReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + } else if got, want := stdout.String(), string(testingutil.ReadFile(t, filepath.Join(testDir, "stdout"))); got != want { t.Fatalf("stdout=%q, want %q", got, want) - } else if got, want := stderr.String(), string(testingutil.MustReadFile(t, filepath.Join(testDir, "stderr"))); got != want { + } else if got, want := stderr.String(), string(testingutil.ReadFile(t, filepath.Join(testDir, "stderr"))); got != want { t.Fatalf("stderr=%q, want %q", got, want) } }) diff --git a/cmd/litestream/snapshots_test.go b/cmd/litestream/snapshots_test.go index 3dc92884..f845cdc6 100644 --- a/cmd/litestream/snapshots_test.go +++ b/cmd/litestream/snapshots_test.go @@ -18,7 +18,7 @@ func TestSnapshotsCommand(t *testing.T) { m, _, stdout, _ := newMain() if err := m.Run(context.Background(), []string{"snapshots", "-config", filepath.Join(testDir, "litestream.yml"), filepath.Join(testDir, "db")}); err != nil { t.Fatal(err) - } else if got, want := stdout.String(), string(testingutil.MustReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + } else if got, want := stdout.String(), string(testingutil.ReadFile(t, filepath.Join(testDir, "stdout"))); got != want { t.Fatalf("stdout=%q, want %q", got, want) } }) @@ -30,7 +30,7 @@ func TestSnapshotsCommand(t *testing.T) { m, _, stdout, _ := newMain() if err := m.Run(context.Background(), []string{"snapshots", "-config", filepath.Join(testDir, "litestream.yml"), "-replica", "replica1", filepath.Join(testDir, "db")}); err != nil { t.Fatal(err) - } else if got, want := stdout.String(), string(testingutil.MustReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + } else if got, want := stdout.String(), string(testingutil.ReadFile(t, filepath.Join(testDir, "stdout"))); got != want { t.Fatalf("stdout=%q, want %q", got, want) } }) @@ -43,7 +43,7 @@ func TestSnapshotsCommand(t *testing.T) { m, _, stdout, _ := newMain() if err := m.Run(context.Background(), []string{"snapshots", replicaURL}); err != nil { t.Fatal(err) - } else if got, want := stdout.String(), string(testingutil.MustReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + } else if got, want := stdout.String(), string(testingutil.ReadFile(t, filepath.Join(testDir, "stdout"))); got != want { t.Fatalf("stdout=%q, want %q", got, want) } }) diff --git a/cmd/litestream/wal.go b/cmd/litestream/wal.go index fa28107c..1124c030 100644 --- a/cmd/litestream/wal.go +++ b/cmd/litestream/wal.go @@ -69,7 +69,7 @@ func (c *WALCommand) Run(ctx context.Context, args []string) (ret error) { if c.generation != "" { generations = []string{c.generation} } else { - if generations, err = r.Client.Generations(ctx); err != nil { + if generations, err = r.Client().Generations(ctx); err != nil { log.Printf("%s: cannot determine generations: %s", r.Name(), err) ret = errExit // signal error return without printing message continue @@ -78,7 +78,7 @@ func (c *WALCommand) Run(ctx context.Context, args []string) (ret error) { for _, generation := range generations { if err := func() error { - itr, err := r.Client.WALSegments(ctx, generation) + itr, err := r.Client().WALSegments(ctx, generation) if err != nil { return err } diff --git a/cmd/litestream/wal_test.go b/cmd/litestream/wal_test.go index f313e013..6fbe0b02 100644 --- a/cmd/litestream/wal_test.go +++ b/cmd/litestream/wal_test.go @@ -18,7 +18,7 @@ func TestWALCommand(t *testing.T) { m, _, stdout, _ := newMain() if err := m.Run(context.Background(), []string{"wal", "-config", filepath.Join(testDir, "litestream.yml"), filepath.Join(testDir, "db")}); err != nil { t.Fatal(err) - } else if got, want := stdout.String(), string(testingutil.MustReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + } else if got, want := stdout.String(), string(testingutil.ReadFile(t, filepath.Join(testDir, "stdout"))); got != want { t.Fatalf("stdout=%q, want %q", got, want) } }) @@ -30,7 +30,7 @@ func TestWALCommand(t *testing.T) { m, _, stdout, _ := newMain() if err := m.Run(context.Background(), []string{"wal", "-config", filepath.Join(testDir, "litestream.yml"), "-replica", "replica1", filepath.Join(testDir, "db")}); err != nil { t.Fatal(err) - } else if got, want := stdout.String(), string(testingutil.MustReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + } else if got, want := stdout.String(), string(testingutil.ReadFile(t, filepath.Join(testDir, "stdout"))); got != want { t.Fatalf("stdout=%q, want %q", got, want) } }) @@ -43,7 +43,7 @@ func TestWALCommand(t *testing.T) { m, _, stdout, _ := newMain() if err := m.Run(context.Background(), []string{"wal", replicaURL}); err != nil { t.Fatal(err) - } else if got, want := stdout.String(), string(testingutil.MustReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + } else if got, want := stdout.String(), string(testingutil.ReadFile(t, filepath.Join(testDir, "stdout"))); got != want { t.Fatalf("stdout=%q, want %q", got, want) } }) diff --git a/db.go b/db.go index f56bdc57..ed5a97f0 100644 --- a/db.go +++ b/db.go @@ -121,7 +121,7 @@ func NewDB(path string) *DB { CheckpointInterval: DefaultCheckpointInterval, MonitorInterval: DefaultMonitorInterval, - Logger: log.New(LogWriter, fmt.Sprintf("%s: ", path), LogFlags), + Logger: log.New(LogWriter, fmt.Sprintf("%s: ", logPrefixPath(path)), LogFlags), } db.dbSizeGauge = dbSizeGaugeVec.WithLabelValues(db.path) @@ -300,7 +300,7 @@ func (db *DB) invalidateChecksum(ctx context.Context) error { r := &io.LimitedReader{R: rc, N: db.pos.Offset} // Determine cache values from the current WAL file. - db.salt0, db.salt1, db.chksum0, db.chksum1, db.byteOrder, db.frame, err = ReadWALFields(r, db.pageSize) + db.salt0, db.salt1, db.chksum0, db.chksum1, db.byteOrder, db.hdr, db.frame, err = ReadWALFields(r, db.pageSize) if err != nil { return fmt.Errorf("calc checksum: %w", err) } @@ -1621,11 +1621,11 @@ var walPathRegex = regexp.MustCompile(`^([0-9a-f]{8})\.wal$`) // Returns salt, checksum, byte order & the last frame. WAL data must start // from the beginning of the WAL header and must end on either the WAL header // or at the end of a WAL frame. -func ReadWALFields(r io.Reader, pageSize int) (salt0, salt1, chksum0, chksum1 uint32, byteOrder binary.ByteOrder, frame []byte, err error) { +func ReadWALFields(r io.Reader, pageSize int) (salt0, salt1, chksum0, chksum1 uint32, byteOrder binary.ByteOrder, hdr, frame []byte, err error) { // Read header. - hdr := make([]byte, WALHeaderSize) + hdr = make([]byte, WALHeaderSize) if _, err := io.ReadFull(r, hdr); err != nil { - return 0, 0, 0, 0, nil, nil, fmt.Errorf("short wal header: %w", err) + return 0, 0, 0, 0, nil, nil, nil, fmt.Errorf("short wal header: %w", err) } // Save salt, initial checksum, & byte order. @@ -1634,7 +1634,7 @@ func ReadWALFields(r io.Reader, pageSize int) (salt0, salt1, chksum0, chksum1 ui chksum0 = binary.BigEndian.Uint32(hdr[24:]) chksum1 = binary.BigEndian.Uint32(hdr[28:]) if byteOrder, err = headerByteOrder(hdr); err != nil { - return 0, 0, 0, 0, nil, nil, err + return 0, 0, 0, 0, nil, nil, nil, err } // Iterate over each page in the WAL and save the checksum. @@ -1645,7 +1645,7 @@ func ReadWALFields(r io.Reader, pageSize int) (salt0, salt1, chksum0, chksum1 ui if n, err := io.ReadFull(r, frame); err == io.EOF { break // end of WAL file } else if err != nil { - return 0, 0, 0, 0, nil, nil, fmt.Errorf("short wal frame (n=%d): %w", n, err) + return 0, 0, 0, 0, nil, nil, nil, fmt.Errorf("short wal frame (n=%d): %w", n, err) } // Update checksum on each successful frame. @@ -1659,7 +1659,7 @@ func ReadWALFields(r io.Reader, pageSize int) (salt0, salt1, chksum0, chksum1 ui frame = nil } - return salt0, salt1, chksum0, chksum1, byteOrder, frame, nil + return salt0, salt1, chksum0, chksum1, byteOrder, hdr, frame, nil } // Database metrics. @@ -1731,3 +1731,12 @@ func headerByteOrder(hdr []byte) (binary.ByteOrder, error) { return nil, fmt.Errorf("invalid wal header magic: %x", magic) } } + +// logPrefixPath returns the path to be used for logging. +// The path is reduced to its base if it appears to be a temporary test path. +func logPrefixPath(path string) string { + if strings.Contains(path, "TestCmd") { + return filepath.Base(path) + } + return path +} diff --git a/db_test.go b/db_test.go index 5c3f51c1..fe742251 100644 --- a/db_test.go +++ b/db_test.go @@ -482,7 +482,7 @@ func TestReadWALFields(t *testing.T) { } t.Run("OK", func(t *testing.T) { - if salt0, salt1, chksum0, chksum1, byteOrder, frame, err := litestream.ReadWALFields(bytes.NewReader(b), 4096); err != nil { + if salt0, salt1, chksum0, chksum1, byteOrder, _, frame, err := litestream.ReadWALFields(bytes.NewReader(b), 4096); err != nil { t.Fatal(err) } else if got, want := salt0, uint32(0x4F7598FD); got != want { t.Fatalf("salt0=%x, want %x", got, want) @@ -500,7 +500,7 @@ func TestReadWALFields(t *testing.T) { }) t.Run("HeaderOnly", func(t *testing.T) { - if salt0, salt1, chksum0, chksum1, byteOrder, frame, err := litestream.ReadWALFields(bytes.NewReader(b[:32]), 4096); err != nil { + if salt0, salt1, chksum0, chksum1, byteOrder, _, frame, err := litestream.ReadWALFields(bytes.NewReader(b[:32]), 4096); err != nil { t.Fatal(err) } else if got, want := salt0, uint32(0x4F7598FD); got != want { t.Fatalf("salt0=%x, want %x", got, want) @@ -518,19 +518,19 @@ func TestReadWALFields(t *testing.T) { }) t.Run("ErrShortHeader", func(t *testing.T) { - if _, _, _, _, _, _, err := litestream.ReadWALFields(bytes.NewReader([]byte{}), 4096); err == nil || err.Error() != `short wal header: EOF` { + if _, _, _, _, _, _, _, err := litestream.ReadWALFields(bytes.NewReader([]byte{}), 4096); err == nil || err.Error() != `short wal header: EOF` { t.Fatal(err) } }) t.Run("ErrBadMagic", func(t *testing.T) { - if _, _, _, _, _, _, err := litestream.ReadWALFields(bytes.NewReader(make([]byte, 32)), 4096); err == nil || err.Error() != `invalid wal header magic: 0` { + if _, _, _, _, _, _, _, err := litestream.ReadWALFields(bytes.NewReader(make([]byte, 32)), 4096); err == nil || err.Error() != `invalid wal header magic: 0` { t.Fatal(err) } }) t.Run("ErrShortFrame", func(t *testing.T) { - if _, _, _, _, _, _, err := litestream.ReadWALFields(bytes.NewReader(b[:100]), 4096); err == nil || err.Error() != `short wal frame (n=68): unexpected EOF` { + if _, _, _, _, _, _, _, err := litestream.ReadWALFields(bytes.NewReader(b[:100]), 4096); err == nil || err.Error() != `short wal frame (n=68): unexpected EOF` { t.Fatal(err) } }) diff --git a/integration/cmd_test.go b/integration/cmd_test.go new file mode 100644 index 00000000..ab9c1c06 --- /dev/null +++ b/integration/cmd_test.go @@ -0,0 +1,411 @@ +package integration_test + +import ( + "bytes" + "context" + "database/sql" + "flag" + "fmt" + "io" + "math/rand" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/benbjohnson/litestream/internal" + "github.com/benbjohnson/litestream/internal/testingutil" + _ "github.com/mattn/go-sqlite3" +) + +var longRunningDuration = flag.Duration("long-running-duration", 0, "") + +func init() { + fmt.Fprintln(os.Stderr, "# ") + fmt.Fprintln(os.Stderr, "# NOTE: Build litestream to your PATH before running integration tests") + fmt.Fprintln(os.Stderr, "#") + fmt.Fprintln(os.Stderr, "") +} + +// Ensure the default configuration works with light database load. +func TestCmd_Replicate_OK(t *testing.T) { + ctx := context.Background() + testDir, tempDir := filepath.Join("testdata", "replicate", "ok"), t.TempDir() + env := []string{"LITESTREAM_TEMPDIR=" + tempDir} + + cmd, stdout, _ := commandContext(ctx, env, "replicate", "-config", filepath.Join(testDir, "litestream.yml")) + if err := cmd.Start(); err != nil { + t.Fatal(err) + } + + db, err := sql.Open("sqlite3", filepath.Join(tempDir, "db")) + if err != nil { + t.Fatal(err) + } else if _, err := db.ExecContext(ctx, `CREATE TABLE t (id INTEGER PRIMARY KEY)`); err != nil { + t.Fatal(err) + } + defer db.Close() + + // Execute writes periodically. + for i := 0; i < 100; i++ { + t.Logf("[exec] INSERT INTO t (id) VALUES (%d)", i) + if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (?)`, i); err != nil { + t.Fatal(err) + } + time.Sleep(10 * time.Millisecond) + } + + // Stop & wait for Litestream command. + killLitestreamCmd(t, cmd, stdout) + + // Ensure signal and shutdown are logged. + if s := stdout.String(); !strings.Contains(s, `signal received, litestream shutting down`) { + t.Fatal("missing log output for signal received") + } else if s := stdout.String(); !strings.Contains(s, `litestream shut down`) { + t.Fatal("missing log output for shut down") + } + + // Checkpoint & verify original SQLite database. + if _, err := db.ExecContext(ctx, `PRAGMA wal_checkpoint(TRUNCATE)`); err != nil { + t.Fatal(err) + } + restoreAndVerify(t, ctx, env, filepath.Join(testDir, "litestream.yml"), filepath.Join(tempDir, "db")) +} + +// Ensure that stopping and restarting Litestream before an application-induced +// checkpoint will cause Litestream to continue replicating using the same generation. +func TestCmd_Replicate_ResumeWithCurrentGeneration(t *testing.T) { + ctx := context.Background() + testDir, tempDir := filepath.Join("testdata", "replicate", "resume-with-current-generation"), t.TempDir() + env := []string{"LITESTREAM_TEMPDIR=" + tempDir} + + cmd, stdout, _ := commandContext(ctx, env, "replicate", "-config", filepath.Join(testDir, "litestream.yml")) + if err := cmd.Start(); err != nil { + t.Fatal(err) + } + + t.Log("writing to database during replication") + + db, err := sql.Open("sqlite3", filepath.Join(tempDir, "db")) + if err != nil { + t.Fatal(err) + } else if _, err := db.ExecContext(ctx, `CREATE TABLE t (id INTEGER PRIMARY KEY)`); err != nil { + t.Fatal(err) + } + defer db.Close() + + // Execute a few writes to populate the WAL. + if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (1)`); err != nil { + t.Fatal(err) + } else if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (2)`); err != nil { + t.Fatal(err) + } + + // Wait for replication to occur & shutdown. + waitForLogMessage(t, stdout, `wal segment written`) + killLitestreamCmd(t, cmd, stdout) + t.Log("replication shutdown, continuing database writes") + + // Execute a few more writes while replication is stopped. + if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (3)`); err != nil { + t.Fatal(err) + } else if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (4)`); err != nil { + t.Fatal(err) + } + + t.Log("restarting replication") + + cmd, stdout, _ = commandContext(ctx, env, "replicate", "-config", filepath.Join(testDir, "litestream.yml")) + if err := cmd.Start(); err != nil { + t.Fatal(err) + } + + waitForLogMessage(t, stdout, `wal segment written`) + killLitestreamCmd(t, cmd, stdout) + + t.Log("replication shutdown again") + + // Litestream should resume replication from the previous generation. + if s := stdout.String(); strings.Contains(s, "no generation exists") { + t.Fatal("expected existing generation to resume; started new generation instead") + } + + // Checkpoint & verify original SQLite database. + if _, err := db.ExecContext(ctx, `PRAGMA wal_checkpoint(TRUNCATE)`); err != nil { + t.Fatal(err) + } + restoreAndVerify(t, ctx, env, filepath.Join(testDir, "litestream.yml"), filepath.Join(tempDir, "db")) +} + +// Ensure that restarting Litestream after a full checkpoint has occurred will +// cause it to begin a new generation. +func TestCmd_Replicate_ResumeWithNewGeneration(t *testing.T) { + ctx := context.Background() + testDir, tempDir := filepath.Join("testdata", "replicate", "resume-with-new-generation"), t.TempDir() + env := []string{"LITESTREAM_TEMPDIR=" + tempDir} + + cmd, stdout, _ := commandContext(ctx, env, "replicate", "-config", filepath.Join(testDir, "litestream.yml")) + if err := cmd.Start(); err != nil { + t.Fatal(err) + } + + t.Log("writing to database during replication") + + db, err := sql.Open("sqlite3", filepath.Join(tempDir, "db")) + if err != nil { + t.Fatal(err) + } else if _, err := db.ExecContext(ctx, `CREATE TABLE t (id INTEGER PRIMARY KEY)`); err != nil { + t.Fatal(err) + } + defer db.Close() + + // Execute a few writes to populate the WAL. + if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (1)`); err != nil { + t.Fatal(err) + } else if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (2)`); err != nil { + t.Fatal(err) + } + + // Wait for replication to occur & shutdown. + waitForLogMessage(t, stdout, `wal segment written`) + killLitestreamCmd(t, cmd, stdout) + t.Log("replication shutdown, continuing database writes") + + // Execute a few more writes while replication is stopped. + if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (3)`); err != nil { + t.Fatal(err) + } else if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (4)`); err != nil { + t.Fatal(err) + } + + t.Log("issuing checkpoint") + + // Issue a checkpoint to restart WAL. + if _, err := db.ExecContext(ctx, `PRAGMA wal_checkpoint(RESTART)`); err != nil { + t.Fatal(err) + } else if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (5)`); err != nil { + t.Fatal(err) + } + + t.Log("restarting replication") + + cmd, stdout, _ = commandContext(ctx, env, "replicate", "-config", filepath.Join(testDir, "litestream.yml")) + if err := cmd.Start(); err != nil { + t.Fatal(err) + } + + waitForLogMessage(t, stdout, `wal segment written`) + killLitestreamCmd(t, cmd, stdout) + + t.Log("replication shutdown again") + + // Litestream should resume replication from the previous generation. + if s := stdout.String(); !strings.Contains(s, "no generation exists") { + t.Fatal("expected new generation to start; continued existing generation instead") + } + + // Checkpoint & verify original SQLite database. + if _, err := db.ExecContext(ctx, `PRAGMA wal_checkpoint(TRUNCATE)`); err != nil { + t.Fatal(err) + } + restoreAndVerify(t, ctx, env, filepath.Join(testDir, "litestream.yml"), filepath.Join(tempDir, "db")) +} + +// Ensure the default configuration works with heavy write load. +func TestCmd_Replicate_HighLoad(t *testing.T) { + if testing.Short() { + t.Skip("short mode enabled, skipping") + } + + const writeDuration = 30 * time.Second + + ctx := context.Background() + testDir, tempDir := filepath.Join("testdata", "replicate", "high-load"), t.TempDir() + env := []string{"LITESTREAM_TEMPDIR=" + tempDir} + + cmd, stdout, _ := commandContext(ctx, env, "replicate", "-config", filepath.Join(testDir, "litestream.yml")) + if err := cmd.Start(); err != nil { + t.Fatal(err) + } + + db, err := sql.Open("sqlite3", filepath.Join(tempDir, "db")) + if err != nil { + t.Fatal(err) + } else if _, err := db.ExecContext(ctx, `PRAGMA journal_mode = WAL`); err != nil { + t.Fatal(err) + } else if _, err := db.ExecContext(ctx, `PRAGMA synchronous = NORMAL`); err != nil { + t.Fatal(err) + } else if _, err := db.ExecContext(ctx, `PRAGMA wal_autocheckpoint = 0`); err != nil { + t.Fatal(err) + } else if _, err := db.ExecContext(ctx, `CREATE TABLE t (id INTEGER PRIMARY KEY)`); err != nil { + t.Fatal(err) + } + defer db.Close() + + // Execute writes as fast as possible for a period of time. + timer := time.NewTimer(writeDuration) + defer timer.Stop() + + t.Logf("executing writes for %s", writeDuration) + +LOOP: + for i := 0; ; i++ { + select { + case <-timer.C: + break LOOP + default: + if i%1000 == 0 { + t.Logf("[exec] INSERT INTO t (id) VALUES (%d)", i) + } + if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (?)`, i); err != nil { + t.Fatal(err) + } + } + } + + t.Logf("writes complete, shutting down") + + // Stop & wait for Litestream command. + time.Sleep(5 * time.Second) + killLitestreamCmd(t, cmd, stdout) + + // Checkpoint & verify original SQLite database. + if _, err := db.ExecContext(ctx, `PRAGMA wal_checkpoint(TRUNCATE)`); err != nil { + t.Fatal(err) + } + restoreAndVerify(t, ctx, env, filepath.Join(testDir, "litestream.yml"), filepath.Join(tempDir, "db")) +} + +// Ensure replication works for an extended period. +func TestCmd_Replicate_LongRunning(t *testing.T) { + if *longRunningDuration == 0 { + t.Skip("long running test duration not specified, skipping") + } + + ctx := context.Background() + testDir, tempDir := filepath.Join("testdata", "replicate", "long-running"), t.TempDir() + env := []string{"LITESTREAM_TEMPDIR=" + tempDir} + + cmd, stdout, _ := commandContext(ctx, env, "replicate", "-config", filepath.Join(testDir, "litestream.yml")) + if err := cmd.Start(); err != nil { + t.Fatal(err) + } + + db, err := sql.Open("sqlite3", filepath.Join(tempDir, "db")) + if err != nil { + t.Fatal(err) + } else if _, err := db.ExecContext(ctx, `PRAGMA journal_mode = WAL`); err != nil { + t.Fatal(err) + } else if _, err := db.ExecContext(ctx, `PRAGMA synchronous = NORMAL`); err != nil { + t.Fatal(err) + } else if _, err := db.ExecContext(ctx, `CREATE TABLE t (id INTEGER PRIMARY KEY)`); err != nil { + t.Fatal(err) + } + defer db.Close() + + // Execute writes as fast as possible for a period of time. + timer := time.NewTimer(*longRunningDuration) + defer timer.Stop() + + t.Logf("executing writes for %s", longRunningDuration) + +LOOP: + for i := 0; ; i++ { + select { + case <-timer.C: + break LOOP + default: + t.Logf("[exec] INSERT INTO t (id) VALUES (%d)", i) + if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (?)`, i); err != nil { + t.Fatal(err) + } + + time.Sleep(time.Duration(rand.Intn(int(time.Second)))) + } + } + + t.Logf("writes complete, shutting down") + + // Stop & wait for Litestream command. + killLitestreamCmd(t, cmd, stdout) + + // Checkpoint & verify original SQLite database. + if _, err := db.ExecContext(ctx, `PRAGMA wal_checkpoint(TRUNCATE)`); err != nil { + t.Fatal(err) + } + restoreAndVerify(t, ctx, env, filepath.Join(testDir, "litestream.yml"), filepath.Join(tempDir, "db")) +} + +// commandContext returns a "litestream" command with stdout/stderr buffers. +func commandContext(ctx context.Context, env []string, arg ...string) (cmd *exec.Cmd, stdout, stderr *internal.LockingBuffer) { + cmd = exec.CommandContext(ctx, "litestream", arg...) + cmd.Env = env + var outBuf, errBuf internal.LockingBuffer + + // Split stdout/stderr to terminal if verbose flag set. + cmd.Stdout, cmd.Stderr = &outBuf, &errBuf + if testing.Verbose() { + cmd.Stdout = io.MultiWriter(&outBuf, os.Stdout) + cmd.Stderr = io.MultiWriter(&errBuf, os.Stderr) + } + + return cmd, &outBuf, &errBuf +} + +// waitForLogMessage continuously checks b for a message and returns when it occurs. +func waitForLogMessage(tb testing.TB, b *internal.LockingBuffer, msg string) { + timer := time.NewTimer(30 * time.Second) + defer timer.Stop() + ticker := time.NewTicker(10 * time.Millisecond) + defer ticker.Stop() + + for { + select { + case <-timer.C: + tb.Fatal("timed out waiting for cmd initialization") + case <-ticker.C: + if strings.Contains(b.String(), msg) { + return + } + } + } +} + +// killLitestreamCmd interrupts the process and waits for a clean shutdown. +func killLitestreamCmd(tb testing.TB, cmd *exec.Cmd, stdout *internal.LockingBuffer) { + if err := cmd.Process.Signal(os.Interrupt); err != nil { + tb.Fatal(err) + } else if err := cmd.Wait(); err != nil { + tb.Fatal(err) + } +} + +// restoreAndVerify executes a "restore" and compares byte with the original database. +func restoreAndVerify(tb testing.TB, ctx context.Context, env []string, configPath, dbPath string) { + restorePath := filepath.Join(tb.TempDir(), "db") + + // Restore database. + cmd, _, _ := commandContext(ctx, env, "restore", "-config", configPath, "-o", restorePath, dbPath) + if err := cmd.Run(); err != nil { + tb.Fatalf("error running 'restore' command: %s", err) + } + + // Compare original database & restored database. + buf0 := testingutil.ReadFile(tb, dbPath) + buf1 := testingutil.ReadFile(tb, restorePath) + if bytes.Equal(buf0, buf1) { + return // ok, exit + } + + // On mismatch, copy out original & restored DBs. + dir, err := os.MkdirTemp("", "litestream-*") + if err != nil { + tb.Fatal(err) + } + testingutil.CopyFile(tb, dbPath, filepath.Join(dir, "original.db")) + testingutil.CopyFile(tb, restorePath, filepath.Join(dir, "restored.db")) + + tb.Fatalf("database mismatch; databases copied to %s", dir) +} diff --git a/integration/testdata/replicate/high-load/litestream.yml b/integration/testdata/replicate/high-load/litestream.yml new file mode 100644 index 00000000..26fb1195 --- /dev/null +++ b/integration/testdata/replicate/high-load/litestream.yml @@ -0,0 +1,7 @@ +dbs: + - path: $LITESTREAM_TEMPDIR/db + replicas: + - path: $LITESTREAM_TEMPDIR/replica + + monitor-interval: 100ms + max-checkpoint-page-count: 20 diff --git a/integration/testdata/replicate/long-running/litestream.yml b/integration/testdata/replicate/long-running/litestream.yml new file mode 100644 index 00000000..b7d0e0ee --- /dev/null +++ b/integration/testdata/replicate/long-running/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: $LITESTREAM_TEMPDIR/db + replicas: + - path: $LITESTREAM_TEMPDIR/replica diff --git a/integration/testdata/replicate/ok/litestream.yml b/integration/testdata/replicate/ok/litestream.yml new file mode 100644 index 00000000..26fb1195 --- /dev/null +++ b/integration/testdata/replicate/ok/litestream.yml @@ -0,0 +1,7 @@ +dbs: + - path: $LITESTREAM_TEMPDIR/db + replicas: + - path: $LITESTREAM_TEMPDIR/replica + + monitor-interval: 100ms + max-checkpoint-page-count: 20 diff --git a/integration/testdata/replicate/resume-with-current-generation/litestream.yml b/integration/testdata/replicate/resume-with-current-generation/litestream.yml new file mode 100644 index 00000000..b7d0e0ee --- /dev/null +++ b/integration/testdata/replicate/resume-with-current-generation/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: $LITESTREAM_TEMPDIR/db + replicas: + - path: $LITESTREAM_TEMPDIR/replica diff --git a/integration/testdata/replicate/resume-with-new-generation/litestream.yml b/integration/testdata/replicate/resume-with-new-generation/litestream.yml new file mode 100644 index 00000000..b7d0e0ee --- /dev/null +++ b/integration/testdata/replicate/resume-with-new-generation/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: $LITESTREAM_TEMPDIR/db + replicas: + - path: $LITESTREAM_TEMPDIR/replica diff --git a/integration/testdata/replicate/resume/litestream.yml b/integration/testdata/replicate/resume/litestream.yml new file mode 100644 index 00000000..0bdd84ed --- /dev/null +++ b/integration/testdata/replicate/resume/litestream.yml @@ -0,0 +1,7 @@ +dbs: + - path: $LITESTREAM_TEMPDIR/db + replicas: + - path: $LITESTREAM_TEMPDIR/replica + + monitor-interval: 100ms + max-checkpoint-page-count: 10 diff --git a/internal/locking_buffer.go b/internal/locking_buffer.go new file mode 100644 index 00000000..5a95df92 --- /dev/null +++ b/internal/locking_buffer.go @@ -0,0 +1,145 @@ +package internal + +import ( + "bytes" + "io" + "sync" +) + +// LockingBuffer wraps a bytes.Buffer with a mutex. +type LockingBuffer struct { + mu sync.Mutex + b bytes.Buffer +} + +func (b *LockingBuffer) Bytes() []byte { + b.mu.Lock() + defer b.mu.Unlock() + buf := b.b.Bytes() + other := make([]byte, len(buf)) + copy(other, buf) + return other +} + +func (b *LockingBuffer) Cap() int { + b.mu.Lock() + defer b.mu.Unlock() + return b.b.Cap() +} + +func (b *LockingBuffer) Grow(n int) { + b.mu.Lock() + defer b.mu.Unlock() + b.b.Grow(n) +} + +func (b *LockingBuffer) Len() int { + b.mu.Lock() + defer b.mu.Unlock() + return b.b.Len() +} + +func (b *LockingBuffer) Next(n int) []byte { + b.mu.Lock() + defer b.mu.Unlock() + buf := b.b.Next(n) + other := make([]byte, len(buf)) + copy(other, buf) + return other +} + +func (b *LockingBuffer) Read(p []byte) (n int, err error) { + b.mu.Lock() + defer b.mu.Unlock() + return b.b.Read(p) +} + +func (b *LockingBuffer) ReadByte() (byte, error) { + b.mu.Lock() + defer b.mu.Unlock() + return b.b.ReadByte() +} + +func (b *LockingBuffer) ReadBytes(delim byte) (line []byte, err error) { + b.mu.Lock() + defer b.mu.Unlock() + return b.b.ReadBytes(delim) +} + +func (b *LockingBuffer) ReadFrom(r io.Reader) (n int64, err error) { + b.mu.Lock() + defer b.mu.Unlock() + return b.b.ReadFrom(r) +} + +func (b *LockingBuffer) ReadRune() (r rune, size int, err error) { + b.mu.Lock() + defer b.mu.Unlock() + return b.b.ReadRune() +} + +func (b *LockingBuffer) ReadString(delim byte) (line string, err error) { + b.mu.Lock() + defer b.mu.Unlock() + return b.b.ReadString(delim) +} + +func (b *LockingBuffer) Reset() { + b.mu.Lock() + defer b.mu.Unlock() + b.b.Reset() +} + +func (b *LockingBuffer) String() string { + b.mu.Lock() + defer b.mu.Unlock() + return b.b.String() +} + +func (b *LockingBuffer) Truncate(n int) { + b.mu.Lock() + defer b.mu.Unlock() + b.b.Truncate(n) +} + +func (b *LockingBuffer) UnreadByte() error { + b.mu.Lock() + defer b.mu.Unlock() + return b.b.UnreadByte() +} + +func (b *LockingBuffer) UnreadRune() error { + b.mu.Lock() + defer b.mu.Unlock() + return b.b.UnreadRune() +} + +func (b *LockingBuffer) Write(p []byte) (n int, err error) { + b.mu.Lock() + defer b.mu.Unlock() + return b.b.Write(p) +} + +func (b *LockingBuffer) WriteByte(c byte) error { + b.mu.Lock() + defer b.mu.Unlock() + return b.b.WriteByte(c) +} + +func (b *LockingBuffer) WriteRune(r rune) (n int, err error) { + b.mu.Lock() + defer b.mu.Unlock() + return b.b.WriteRune(r) +} + +func (b *LockingBuffer) WriteString(s string) (n int, err error) { + b.mu.Lock() + defer b.mu.Unlock() + return b.b.WriteString(s) +} + +func (b *LockingBuffer) WriteTo(w io.Writer) (n int64, err error) { + b.mu.Lock() + defer b.mu.Unlock() + return b.b.WriteTo(w) +} diff --git a/internal/testingutil/testingutil.go b/internal/testingutil/testingutil.go index 99bc47f4..22636f27 100644 --- a/internal/testingutil/testingutil.go +++ b/internal/testingutil/testingutil.go @@ -1,12 +1,13 @@ package testingutil import ( + "io" "os" "testing" ) -// MustReadFile reads all data from filename. Fail on error. -func MustReadFile(tb testing.TB, filename string) []byte { +// ReadFile reads all data from filename. Fail on error. +func ReadFile(tb testing.TB, filename string) []byte { tb.Helper() b, err := os.ReadFile(filename) if err != nil { @@ -15,6 +16,26 @@ func MustReadFile(tb testing.TB, filename string) []byte { return b } +// CopyFile copies all data from src to dst. Fail on error. +func CopyFile(tb testing.TB, src, dst string) { + tb.Helper() + r, err := os.Open(src) + if err != nil { + tb.Fatal(err) + } + defer r.Close() + + w, err := os.Create(dst) + if err != nil { + tb.Fatal(err) + } + defer w.Close() + + if _, err := io.Copy(w, r); err != nil { + tb.Fatal(err) + } +} + // Getpwd returns the working directory. Fail on error. func Getwd(tb testing.TB) string { tb.Helper() diff --git a/litestream.go b/litestream.go index e962f141..98f94a8b 100644 --- a/litestream.go +++ b/litestream.go @@ -1,8 +1,10 @@ package litestream import ( + "crypto/md5" "database/sql" "encoding/binary" + "encoding/hex" "errors" "fmt" "io" @@ -43,7 +45,7 @@ var ( var ( // LogWriter is the destination writer for all logging. - LogWriter = os.Stderr + LogWriter = os.Stdout // LogFlags are the flags passed to log.New(). LogFlags = 0 @@ -460,6 +462,12 @@ func isHexChar(ch rune) bool { return (ch >= '0' && ch <= '9') || (ch >= 'a' && ch <= 'f') } +// md5hash returns a hex-encoded MD5 hash of b. +func md5hash(b []byte) string { + sum := md5.Sum(b) + return hex.EncodeToString(sum[:]) +} + // Tracef is used for low-level tracing. var Tracef = func(format string, a ...interface{}) {} diff --git a/replica.go b/replica.go index 67e9d141..8547e61e 100644 --- a/replica.go +++ b/replica.go @@ -43,7 +43,7 @@ type Replica struct { cancel func() // Client used to connect to the remote replica. - Client ReplicaClient + client ReplicaClient // Time between syncs with the shadow WAL. SyncInterval time.Duration @@ -68,10 +68,11 @@ type Replica struct { Logger *log.Logger } -func NewReplica(db *DB, name string) *Replica { +func NewReplica(db *DB, name string, client ReplicaClient) *Replica { r := &Replica{ db: db, name: name, + client: client, cancel: func() {}, SyncInterval: DefaultSyncInterval, @@ -82,7 +83,7 @@ func NewReplica(db *DB, name string) *Replica { prefix := fmt.Sprintf("%s: ", r.Name()) if db != nil { - prefix = fmt.Sprintf("%s(%s): ", db.Path(), r.Name()) + prefix = fmt.Sprintf("%s(%s): ", logPrefixPath(db.Path()), r.Name()) } r.Logger = log.New(LogWriter, prefix, LogFlags) @@ -91,8 +92,8 @@ func NewReplica(db *DB, name string) *Replica { // Name returns the name of the replica. func (r *Replica) Name() string { - if r.name == "" && r.Client != nil { - return r.Client.Type() + if r.name == "" && r.client != nil { + return r.client.Type() } return r.name } @@ -100,6 +101,9 @@ func (r *Replica) Name() string { // DB returns a reference to the database the replica is attached to, if any. func (r *Replica) DB() *DB { return r.db } +// Client returns the client the replica was initialized with. +func (r *Replica) Client() ReplicaClient { return r.client } + // Starts replicating in a background goroutine. func (r *Replica) Start(ctx context.Context) error { // Ignore if replica is being used sychronously. @@ -265,7 +269,7 @@ func (r *Replica) writeIndexSegments(ctx context.Context, segments []WALSegmentI // Copy through pipe into client from the starting position. var g errgroup.Group g.Go(func() error { - _, err := r.Client.WriteWALSegment(ctx, initialPos, pr) + _, err := r.client.WriteWALSegment(ctx, initialPos, pr) return err }) @@ -332,7 +336,7 @@ func (r *Replica) writeIndexSegments(ctx context.Context, segments []WALSegmentI // snapshotN returns the number of snapshots for a generation. func (r *Replica) snapshotN(generation string) (int, error) { - itr, err := r.Client.Snapshots(context.Background(), generation) + itr, err := r.client.Snapshots(context.Background(), generation) if err != nil { return 0, err } @@ -364,7 +368,7 @@ func (r *Replica) calcPos(ctx context.Context, generation string) (pos Pos, err } // Read segment to determine size to add to offset. - rd, err := r.Client.WALSegmentReader(ctx, segment.Pos()) + rd, err := r.client.WALSegmentReader(ctx, segment.Pos()) if err != nil { return pos, fmt.Errorf("wal segment reader: %w", err) } @@ -385,7 +389,7 @@ func (r *Replica) calcPos(ctx context.Context, generation string) (pos Pos, err // maxSnapshot returns the last snapshot in a generation. func (r *Replica) maxSnapshot(ctx context.Context, generation string) (*SnapshotInfo, error) { - itr, err := r.Client.Snapshots(ctx, generation) + itr, err := r.client.Snapshots(ctx, generation) if err != nil { return nil, err } @@ -402,7 +406,7 @@ func (r *Replica) maxSnapshot(ctx context.Context, generation string) (*Snapshot // maxWALSegment returns the highest WAL segment in a generation. func (r *Replica) maxWALSegment(ctx context.Context, generation string) (*WALSegmentInfo, error) { - itr, err := r.Client.WALSegments(ctx, generation) + itr, err := r.client.WALSegments(ctx, generation) if err != nil { return nil, err } @@ -427,7 +431,7 @@ func (r *Replica) Pos() Pos { // Snapshots returns a list of all snapshots across all generations. func (r *Replica) Snapshots(ctx context.Context) ([]SnapshotInfo, error) { - generations, err := r.Client.Generations(ctx) + generations, err := r.client.Generations(ctx) if err != nil { return nil, fmt.Errorf("cannot fetch generations: %w", err) } @@ -435,7 +439,7 @@ func (r *Replica) Snapshots(ctx context.Context) ([]SnapshotInfo, error) { var a []SnapshotInfo for _, generation := range generations { if err := func() error { - itr, err := r.Client.Snapshots(ctx, generation) + itr, err := r.client.Snapshots(ctx, generation) if err != nil { return err } @@ -518,7 +522,7 @@ func (r *Replica) Snapshot(ctx context.Context) (info SnapshotInfo, err error) { }) // Delegate write to client & wait for writer goroutine to finish. - if info, err = r.Client.WriteSnapshot(ctx, pos.Generation, pos.Index, pr); err != nil { + if info, err = r.client.WriteSnapshot(ctx, pos.Generation, pos.Index, pr); err != nil { return info, err } else if err := g.Wait(); err != nil { return info, err @@ -549,7 +553,7 @@ func (r *Replica) EnforceRetention(ctx context.Context) (err error) { } // Loop over generations and delete unretained snapshots & WAL files. - generations, err := r.Client.Generations(ctx) + generations, err := r.client.Generations(ctx) if err != nil { return fmt.Errorf("generations: %w", err) } @@ -559,7 +563,7 @@ func (r *Replica) EnforceRetention(ctx context.Context) (err error) { // Delete entire generation if no snapshots are being retained. if snapshot == nil { - if err := r.Client.DeleteGeneration(ctx, generation); err != nil { + if err := r.client.DeleteGeneration(ctx, generation); err != nil { return fmt.Errorf("delete generation: %w", err) } continue @@ -577,7 +581,7 @@ func (r *Replica) EnforceRetention(ctx context.Context) (err error) { } func (r *Replica) deleteSnapshotsBeforeIndex(ctx context.Context, generation string, index int) error { - itr, err := r.Client.Snapshots(ctx, generation) + itr, err := r.client.Snapshots(ctx, generation) if err != nil { return fmt.Errorf("fetch snapshots: %w", err) } @@ -589,7 +593,7 @@ func (r *Replica) deleteSnapshotsBeforeIndex(ctx context.Context, generation str continue } - if err := r.Client.DeleteSnapshot(ctx, info.Generation, info.Index); err != nil { + if err := r.client.DeleteSnapshot(ctx, info.Generation, info.Index); err != nil { return fmt.Errorf("delete snapshot %s/%08x: %w", info.Generation, info.Index, err) } r.Logger.Printf("snapshot deleted %s/%08x", generation, index) @@ -599,7 +603,7 @@ func (r *Replica) deleteSnapshotsBeforeIndex(ctx context.Context, generation str } func (r *Replica) deleteWALSegmentsBeforeIndex(ctx context.Context, generation string, index int) error { - itr, err := r.Client.WALSegments(ctx, generation) + itr, err := r.client.WALSegments(ctx, generation) if err != nil { return fmt.Errorf("fetch wal segments: %w", err) } @@ -621,7 +625,7 @@ func (r *Replica) deleteWALSegmentsBeforeIndex(ctx context.Context, generation s return nil } - if err := r.Client.DeleteWALSegments(ctx, a); err != nil { + if err := r.client.DeleteWALSegments(ctx, a); err != nil { return fmt.Errorf("delete wal segments: %w", err) } @@ -774,7 +778,7 @@ func (r *Replica) Validate(ctx context.Context) error { } // Find lastest snapshot that occurs before the index. - snapshotIndex, err := FindSnapshotForIndex(ctx, r.Client, pos.Generation, pos.Index-1) + snapshotIndex, err := FindSnapshotForIndex(ctx, r.client, pos.Generation, pos.Index-1) if err != nil { return fmt.Errorf("cannot find snapshot index: %w", err) } @@ -784,7 +788,7 @@ func (r *Replica) Validate(ctx context.Context) error { Logger: log.New(os.Stderr, "", 0), LogPrefix: r.logPrefix(), } - if err := Restore(ctx, r.Client, restorePath, pos.Generation, snapshotIndex, pos.Index-1, opt); err != nil { + if err := Restore(ctx, r.client, restorePath, pos.Generation, snapshotIndex, pos.Index-1, opt); err != nil { return fmt.Errorf("cannot restore: %w", err) } @@ -880,7 +884,7 @@ func (r *Replica) waitForReplica(ctx context.Context, pos Pos) error { func (r *Replica) GenerationCreatedAt(ctx context.Context, generation string) (time.Time, error) { var min time.Time - itr, err := r.Client.Snapshots(ctx, generation) + itr, err := r.client.Snapshots(ctx, generation) if err != nil { return min, err } @@ -897,7 +901,7 @@ func (r *Replica) GenerationCreatedAt(ctx context.Context, generation string) (t // SnapshotIndexAt returns the highest index for a snapshot within a generation // that occurs before timestamp. If timestamp is zero, returns the latest snapshot. func (r *Replica) SnapshotIndexAt(ctx context.Context, generation string, timestamp time.Time) (int, error) { - itr, err := r.Client.Snapshots(ctx, generation) + itr, err := r.client.Snapshots(ctx, generation) if err != nil { return 0, err } @@ -929,7 +933,7 @@ func LatestReplica(ctx context.Context, replicas []*Replica) (*Replica, error) { var t time.Time var r *Replica for i := range replicas { - _, max, err := ReplicaClientTimeBounds(ctx, replicas[i].Client) + _, max, err := ReplicaClientTimeBounds(ctx, replicas[i].client) if err != nil { return nil, err } else if r == nil || max.After(t) { diff --git a/replica_test.go b/replica_test.go index a0220bbb..97455acc 100644 --- a/replica_test.go +++ b/replica_test.go @@ -14,13 +14,12 @@ import ( func TestReplica_Name(t *testing.T) { t.Run("WithName", func(t *testing.T) { - if got, want := litestream.NewReplica(nil, "NAME").Name(), "NAME"; got != want { + if got, want := litestream.NewReplica(nil, "NAME", nil).Name(), "NAME"; got != want { t.Fatalf("Name()=%v, want %v", got, want) } }) t.Run("WithoutName", func(t *testing.T) { - r := litestream.NewReplica(nil, "") - r.Client = &mock.ReplicaClient{} + r := litestream.NewReplica(nil, "", &mock.ReplicaClient{}) if got, want := r.Name(), "mock"; got != want { t.Fatalf("Name()=%v, want %v", got, want) } @@ -45,8 +44,7 @@ func TestReplica_Sync(t *testing.T) { dpos := db.Pos() c := litestream.NewFileReplicaClient(t.TempDir()) - r := litestream.NewReplica(db, "") - r.Client = c + r := litestream.NewReplica(db, "", c) if err := r.Sync(context.Background()); err != nil { t.Fatal(err) @@ -81,8 +79,7 @@ func TestReplica_Snapshot(t *testing.T) { defer MustCloseDBs(t, db, sqldb) c := litestream.NewFileReplicaClient(t.TempDir()) - r := litestream.NewReplica(db, "") - r.Client = c + r := litestream.NewReplica(db, "", c) // Execute a query to force a write to the WAL. if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil { From b8536fa4f3e801495c8366f17fb5dfc7a8ed752b Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Sat, 22 Jan 2022 10:02:32 -0700 Subject: [PATCH 14/95] dependabot.yml --- .github/dependabot.yml | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 .github/dependabot.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 00000000..eed53854 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,8 @@ +version: 2 +updates: + - package-ecosystem: "gomod" + directory: "/" + assignees: + - "benbjohnson" + schedule: + interval: "daily" From c7aa3635fda890ae05eda4e321240bb721f8ac51 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Fri, 21 Jan 2022 15:27:06 -0700 Subject: [PATCH 15/95] Parallelize GitHub Actions --- .github/workflows/commit.yml | 123 +++++++++++++++++++++++++++++++++++ .github/workflows/test.yml | 68 ------------------- 2 files changed, 123 insertions(+), 68 deletions(-) create mode 100644 .github/workflows/commit.yml delete mode 100644 .github/workflows/test.yml diff --git a/.github/workflows/commit.yml b/.github/workflows/commit.yml new file mode 100644 index 00000000..846da222 --- /dev/null +++ b/.github/workflows/commit.yml @@ -0,0 +1,123 @@ +on: push + +jobs: + build: + name: Build & Unit Test + runs-on: ubuntu-18.04 + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-go@v2 + with: + go-version: '1.17' + - uses: actions/cache@v2 + with: + path: ~/go/pkg/mod + key: ${{ inputs.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ inputs.os }}-go- + + - run: go env + + - run: go install ./cmd/litestream + + - run: make testdata + - run: go test -v ./... + + - name: Build integration test + run: go test -c ./integration + + - uses: actions/upload-artifact@v2 + with: + name: integration.test + path: integration.test + if-no-files-found: error + + long-running-test: + name: Run Long Running Unit Test + runs-on: ubuntu-18.04 + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-go@v2 + with: + go-version: '1.17' + - uses: actions/cache@v2 + with: + path: ~/go/pkg/mod + key: ${{ inputs.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ inputs.os }}-go- + + - run: go install ./cmd/litestream + - run: go test -v -run=TestCmd_Replicate_LongRunning ./integration -long-running-duration 1m + + s3-integration-test: + name: Run S3 Integration Tests + runs-on: ubuntu-18.04 + needs: build + steps: + - uses: actions/download-artifact@v2 + with: + name: integration.test + - run: chmod +x integration.test + + - run: ./integration.test -test.v -test.run=TestReplicaClient -replica-type s3 + env: + LITESTREAM_S3_ACCESS_KEY_ID: ${{ secrets.LITESTREAM_S3_ACCESS_KEY_ID }} + LITESTREAM_S3_SECRET_ACCESS_KEY: ${{ secrets.LITESTREAM_S3_SECRET_ACCESS_KEY }} + LITESTREAM_S3_REGION: us-east-1 + LITESTREAM_S3_BUCKET: integration.litestream.io + + gcp-integration-test: + name: Run GCP Integration Tests + runs-on: ubuntu-18.04 + needs: build + steps: + - name: Extract GCP credentials + run: 'echo "$GOOGLE_APPLICATION_CREDENTIALS" > /opt/gcp.json' + shell: bash + env: + GOOGLE_APPLICATION_CREDENTIALS: ${{secrets.GOOGLE_APPLICATION_CREDENTIALS}} + + - uses: actions/download-artifact@v2 + with: + name: integration.test + - run: chmod +x integration.test + + - run: ./integration.test -test.v -test.run=TestReplicaClient -replica-type gcs + env: + GOOGLE_APPLICATION_CREDENTIALS: /opt/gcp.json + LITESTREAM_GCS_BUCKET: integration.litestream.io + + abs-integration-test: + name: Run Azure Blob Store Integration Tests + runs-on: ubuntu-18.04 + needs: build + steps: + - uses: actions/download-artifact@v2 + with: + name: integration.test + - run: chmod +x integration.test + + - run: ./integration.test -test.v -test.run=TestReplicaClient -replica-type abs + env: + LITESTREAM_ABS_ACCOUNT_NAME: ${{ secrets.LITESTREAM_ABS_ACCOUNT_NAME }} + LITESTREAM_ABS_ACCOUNT_KEY: ${{ secrets.LITESTREAM_ABS_ACCOUNT_KEY }} + LITESTREAM_ABS_BUCKET: integration + +# sftp-integration-test: +# name: Run SFTP Integration Tests +# runs-on: ubuntu-18.04 +# needs: build +# steps: +# - name: Extract SSH key +# run: 'echo "$LITESTREAM_SFTP_KEY" > /opt/id_ed25519' +# shell: bash +# env: +# LITESTREAM_SFTP_KEY: ${{secrets.LITESTREAM_SFTP_KEY}} +# +# - name: Run sftp tests +# run: go test -v -run=TestReplicaClient ./integration -replica-type sftp +# env: +# LITESTREAM_SFTP_HOST: ${{ secrets.LITESTREAM_SFTP_HOST }} +# LITESTREAM_SFTP_USER: ${{ secrets.LITESTREAM_SFTP_USER }} +# LITESTREAM_SFTP_KEY_PATH: /opt/id_ed25519 +# LITESTREAM_SFTP_PATH: ${{ secrets.LITESTREAM_SFTP_PATH }} + diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml deleted file mode 100644 index cd533d35..00000000 --- a/.github/workflows/test.yml +++ /dev/null @@ -1,68 +0,0 @@ -on: push -name: test -jobs: - test: - runs-on: ubuntu-18.04 - steps: - - uses: actions/setup-go@v2 - with: - go-version: '1.16' - - - uses: actions/checkout@v2 - - - uses: actions/cache@v2 - with: - path: ~/go/pkg/mod - key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go- - - - name: Extract GCP credentials - run: 'echo "$GOOGLE_APPLICATION_CREDENTIALS" > /opt/gcp.json' - shell: bash - env: - GOOGLE_APPLICATION_CREDENTIALS: ${{secrets.GOOGLE_APPLICATION_CREDENTIALS}} - - - name: Extract SSH key - run: 'echo "$LITESTREAM_SFTP_KEY" > /opt/id_ed25519' - shell: bash - env: - LITESTREAM_SFTP_KEY: ${{secrets.LITESTREAM_SFTP_KEY}} - - - name: Build binary - run: go install ./cmd/litestream - - - name: Run unit tests - run: make testdata && go test -v ./... - - - name: Run aws s3 tests - run: go test -v -run=TestReplicaClient ./integration -replica-type s3 - env: - LITESTREAM_S3_ACCESS_KEY_ID: ${{ secrets.LITESTREAM_S3_ACCESS_KEY_ID }} - LITESTREAM_S3_SECRET_ACCESS_KEY: ${{ secrets.LITESTREAM_S3_SECRET_ACCESS_KEY }} - LITESTREAM_S3_REGION: us-east-1 - LITESTREAM_S3_BUCKET: integration.litestream.io - - - name: Run google cloud storage (gcs) tests - run: go test -v -run=TestReplicaClient ./integration -replica-type gcs - env: - GOOGLE_APPLICATION_CREDENTIALS: /opt/gcp.json - LITESTREAM_GCS_BUCKET: integration.litestream.io - - - name: Run azure blob storage (abs) tests - run: go test -v -run=TestReplicaClient ./integration -replica-type abs - env: - LITESTREAM_ABS_ACCOUNT_NAME: ${{ secrets.LITESTREAM_ABS_ACCOUNT_NAME }} - LITESTREAM_ABS_ACCOUNT_KEY: ${{ secrets.LITESTREAM_ABS_ACCOUNT_KEY }} - LITESTREAM_ABS_BUCKET: integration - - #- name: Run sftp tests - # run: go test -v -run=TestReplicaClient ./integration -replica-type sftp - # env: - # LITESTREAM_SFTP_HOST: ${{ secrets.LITESTREAM_SFTP_HOST }} - # LITESTREAM_SFTP_USER: ${{ secrets.LITESTREAM_SFTP_USER }} - # LITESTREAM_SFTP_KEY_PATH: /opt/id_ed25519 - # LITESTREAM_SFTP_PATH: ${{ secrets.LITESTREAM_SFTP_PATH }} - - - name: Run long-running test - run: go test -v ./integration -long-running-duration 1m From 0a6474fb2855df0d94b321ac4fa2c1bc3d1e977b Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Sat, 22 Jan 2022 11:00:38 -0700 Subject: [PATCH 16/95] Restrict CI jobs --- .github/workflows/commit.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/commit.yml b/.github/workflows/commit.yml index 846da222..e58491a5 100644 --- a/.github/workflows/commit.yml +++ b/.github/workflows/commit.yml @@ -50,6 +50,7 @@ jobs: s3-integration-test: name: Run S3 Integration Tests + if: ${{ github.actor == 'benbjohnson' }} runs-on: ubuntu-18.04 needs: build steps: @@ -67,6 +68,7 @@ jobs: gcp-integration-test: name: Run GCP Integration Tests + if: ${{ github.actor == 'benbjohnson' }} runs-on: ubuntu-18.04 needs: build steps: @@ -88,6 +90,7 @@ jobs: abs-integration-test: name: Run Azure Blob Store Integration Tests + if: ${{ github.actor == 'benbjohnson' }} runs-on: ubuntu-18.04 needs: build steps: @@ -104,6 +107,7 @@ jobs: # sftp-integration-test: # name: Run SFTP Integration Tests +# if: ${{ github.actor == 'benbjohnson' }} # runs-on: ubuntu-18.04 # needs: build # steps: From b8d04957a209d0f9f27b421c3dc6ca52f53c5211 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Sat, 22 Jan 2022 11:14:59 -0700 Subject: [PATCH 17/95] Update CI --- integration/cmd_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/integration/cmd_test.go b/integration/cmd_test.go index ab9c1c06..92d143b4 100644 --- a/integration/cmd_test.go +++ b/integration/cmd_test.go @@ -217,6 +217,8 @@ func TestCmd_Replicate_ResumeWithNewGeneration(t *testing.T) { func TestCmd_Replicate_HighLoad(t *testing.T) { if testing.Short() { t.Skip("short mode enabled, skipping") + } else if os.Getenv("CI") != "" { + t.Skip("ci, skipping") } const writeDuration = 30 * time.Second From 17831c7025b495d2cc30d832e3733aa4247e6f9a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 22 Jan 2022 18:16:09 +0000 Subject: [PATCH 18/95] Bump github.com/pierrec/lz4/v4 from 4.1.3 to 4.1.12 Bumps [github.com/pierrec/lz4/v4](https://github.com/pierrec/lz4) from 4.1.3 to 4.1.12. - [Release notes](https://github.com/pierrec/lz4/releases) - [Commits](https://github.com/pierrec/lz4/compare/v4.1.3...v4.1.12) --- updated-dependencies: - dependency-name: github.com/pierrec/lz4/v4 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 12 +++++------- go.sum | 32 +++++++++++++++++--------------- 2 files changed, 22 insertions(+), 22 deletions(-) diff --git a/go.mod b/go.mod index 1c5a0e81..6f8f6902 100644 --- a/go.mod +++ b/go.mod @@ -4,16 +4,14 @@ go 1.16 require ( cloud.google.com/go/storage v1.15.0 - github.com/Azure/azure-storage-blob-go v0.13.0 // indirect - github.com/Azure/go-autorest/autorest v0.9.0 // indirect + github.com/Azure/azure-storage-blob-go v0.13.0 github.com/aws/aws-sdk-go v1.27.0 - github.com/davecgh/go-spew v1.1.1 - github.com/mattn/go-shellwords v1.0.11 // indirect + github.com/mattn/go-shellwords v1.0.11 github.com/mattn/go-sqlite3 v1.14.5 - github.com/pierrec/lz4/v4 v4.1.3 - github.com/pkg/sftp v1.13.0 // indirect + github.com/pierrec/lz4/v4 v4.1.12 + github.com/pkg/sftp v1.13.0 github.com/prometheus/client_golang v1.9.0 - golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a // indirect + golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sys v0.0.0-20210412220455-f1c623a9e750 google.golang.org/api v0.45.0 diff --git a/go.sum b/go.sum index bda8ce98..af71e622 100644 --- a/go.sum +++ b/go.sum @@ -43,17 +43,14 @@ github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVt github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-storage-blob-go v0.13.0 h1:lgWHvFh+UYBNVQLFHXkvul2f6yOPA9PIH82RTG2cSwc= github.com/Azure/azure-storage-blob-go v0.13.0/go.mod h1:pA9kNqtjUeQF2zOSu4s//nUdBD+e64lEuc4sVnuOfNs= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.9.2 h1:Aze/GQeAN1RRbGmnUJvUj+tFGBzFdIg3293/A9rbxC4= github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= @@ -105,6 +102,7 @@ github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7Do github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= @@ -166,7 +164,6 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= @@ -185,10 +182,13 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0 h1:wCKgOCHuUEVfsaQLpPSJb7VdYCdTVZQAuOdYm1yc/60= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -266,6 +266,7 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= @@ -303,6 +304,7 @@ github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzE github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= @@ -326,9 +328,8 @@ github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9 github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4 v2.6.0+incompatible h1:Ix9yFKn1nSPBLFl/yZknTp8TU5G4Ps0JDmguYK6iH1A= -github.com/pierrec/lz4/v4 v4.1.3 h1:/dvQpkb0o1pVlSgKNQqfkavlnXaIK+hJ0LXsKRUN9D4= -github.com/pierrec/lz4/v4 v4.1.3/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.12 h1:44l88ehTZAUGW4VlO1QC4zkilL99M6Y9MXNwEs0uzP8= +github.com/pierrec/lz4/v4 v4.1.12/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -336,6 +337,7 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/sftp v1.13.0 h1:Riw6pgOKK41foc1I1Uu03CjvbLZDXeGpInycM4shXoI= github.com/pkg/sftp v1.13.0/go.mod h1:41g+FIPlQUTDCveupEmEA65IoiQFrtgCeDopC4ajGIM= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -391,6 +393,7 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= @@ -427,7 +430,6 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a h1:kr2P4QFmQr29mSLA43kwrOcgcReGTfbE9N577tCTuBc= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= @@ -579,7 +581,6 @@ golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e h1:AyodaIpKjppX+cBfTASF2E1US3H2JFBj920Ot3rtDjs= golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -590,6 +591,7 @@ golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210412220455-f1c623a9e750 h1:ZBu6861dZq7xBnG1bn5SRU0vA8nx42at4+kP07FMTog= golang.org/x/sys v0.0.0-20210412220455-f1c623a9e750/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -768,7 +770,6 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= @@ -777,10 +778,10 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= @@ -797,6 +798,7 @@ gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 0b533e5d7b150d88ac06d36b36869f6cc3516db1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 22 Jan 2022 18:20:45 +0000 Subject: [PATCH 19/95] Bump github.com/aws/aws-sdk-go from 1.27.0 to 1.42.39 Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.27.0 to 1.42.39. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.27.0...v1.42.39) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 4 ++-- go.sum | 18 +++++++++++++----- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index 6f8f6902..1d9f0179 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.16 require ( cloud.google.com/go/storage v1.15.0 github.com/Azure/azure-storage-blob-go v0.13.0 - github.com/aws/aws-sdk-go v1.27.0 + github.com/aws/aws-sdk-go v1.42.39 github.com/mattn/go-shellwords v1.0.11 github.com/mattn/go-sqlite3 v1.14.5 github.com/pierrec/lz4/v4 v4.1.12 @@ -13,7 +13,7 @@ require ( github.com/prometheus/client_golang v1.9.0 golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20210412220455-f1c623a9e750 + golang.org/x/sys v0.0.0-20210423082822-04245dca01da google.golang.org/api v0.45.0 gopkg.in/yaml.v2 v2.4.0 ) diff --git a/go.sum b/go.sum index af71e622..3f3ae62c 100644 --- a/go.sum +++ b/go.sum @@ -71,8 +71,9 @@ github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmV github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.27.0 h1:0xphMHGMLBrPMfxR2AmVjZKcMEESEgWF8Kru94BNByk= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.42.39 h1:6Lso73VoCI8Zmv3zAMv4BNg2gHAKNOlbLv1s/ew90SI= +github.com/aws/aws-sdk-go v1.42.39/go.mod h1:OGr6lGMAKGlG9CVrYnWYDKIyb829c6EVBRjxqjmPepc= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -243,8 +244,11 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= @@ -509,8 +513,9 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4 h1:b0LrWgu8+q7z4J+0Y3Umo5q1dL7NXBkKBWkaVkAq17E= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20211216030914-fe4d6282115f h1:hEYJvxw1lSnWIl8X9ofsYMklzaDs90JI2az5YMd4fPM= +golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -588,8 +593,9 @@ golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210412220455-f1c623a9e750 h1:ZBu6861dZq7xBnG1bn5SRU0vA8nx42at4+kP07FMTog= golang.org/x/sys v0.0.0-20210412220455-f1c623a9e750/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da h1:b3NXsE2LusjYGGjL5bxEVZZORm/YEFFrWFjR8eFrw/c= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -599,8 +605,9 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -795,6 +802,7 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= From 6c5fb2c44643ddd925c9feff42307e1069a94c8d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 22 Jan 2022 18:25:31 +0000 Subject: [PATCH 20/95] Bump cloud.google.com/go/storage from 1.15.0 to 1.18.2 Bumps [cloud.google.com/go/storage](https://github.com/googleapis/google-cloud-go) from 1.15.0 to 1.18.2. - [Release notes](https://github.com/googleapis/google-cloud-go/releases) - [Changelog](https://github.com/googleapis/google-cloud-go/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.15.0...storage/v1.18.2) --- updated-dependencies: - dependency-name: cloud.google.com/go/storage dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 6 +-- go.sum | 121 +++++++++++++++++++++++++++++++++++++++++++++++---------- 2 files changed, 103 insertions(+), 24 deletions(-) diff --git a/go.mod b/go.mod index 1d9f0179..79564a9f 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/benbjohnson/litestream go 1.16 require ( - cloud.google.com/go/storage v1.15.0 + cloud.google.com/go/storage v1.18.2 github.com/Azure/azure-storage-blob-go v0.13.0 github.com/aws/aws-sdk-go v1.42.39 github.com/mattn/go-shellwords v1.0.11 @@ -13,7 +13,7 @@ require ( github.com/prometheus/client_golang v1.9.0 golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20210423082822-04245dca01da - google.golang.org/api v0.45.0 + golang.org/x/sys v0.0.0-20210917161153-d61c044b1678 + google.golang.org/api v0.58.0 gopkg.in/yaml.v2 v2.4.0 ) diff --git a/go.sum b/go.sum index 3f3ae62c..0fdd16dc 100644 --- a/go.sum +++ b/go.sum @@ -17,8 +17,15 @@ cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKP cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0 h1:3DXvAyifywvq64LfkKaMOmkWPS1CikIQdMe2lY9vxU8= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -36,8 +43,8 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.15.0 h1:Ljj+ZXVEhCr/1+4ZhvtteN1ND7UUsNTlduGclLh8GO0= -cloud.google.com/go/storage v1.15.0/go.mod h1:mjjQMoxxyGH7Jr8K5qrx6N2O0AHsczI61sMNn03GIZI= +cloud.google.com/go/storage v1.18.2 h1:5NQw6tOn3eMm0oE8vTkfjau18kjL79FlMjy/CHTpmoY= +cloud.google.com/go/storage v1.18.2/go.mod h1:AiIj7BWXyhO5gGVmYJ+S8tbkCx3yb0IMjua8Aw4naVM= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= @@ -55,6 +62,7 @@ github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBp github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= @@ -64,6 +72,7 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= @@ -83,6 +92,8 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -93,6 +104,7 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -117,6 +129,7 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= @@ -152,6 +165,7 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -171,6 +185,7 @@ github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -183,14 +198,16 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0 h1:wCKgOCHuUEVfsaQLpPSJb7VdYCdTVZQAuOdYm1yc/60= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -202,14 +219,19 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1 h1:dp3bWCh+PPO1zjRRiCSczJav13sBvG4UhNyVTa1KqdU= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= @@ -218,6 +240,7 @@ github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -256,7 +279,6 @@ github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= @@ -373,6 +395,7 @@ github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULU github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -386,6 +409,7 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= @@ -407,6 +431,7 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= @@ -419,6 +444,7 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= @@ -459,8 +485,8 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 h1:2M3HP5CCK1Si9FQhwnzYhXdG6DXeebvUHFpre8QvbyI= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -470,8 +496,8 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1 h1:Kvvh58BN8Y9/lBi7hTekvtMpm07eUZ0ck5pRHpsMWrY= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -514,6 +540,8 @@ golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f h1:hEYJvxw1lSnWIl8X9ofsYMklzaDs90JI2az5YMd4fPM= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -527,8 +555,12 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78 h1:rPRtHfUb0UKZeZ6GH4K4Nt4YRbE9V1u+QZX5upZXqJQ= -golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1 h1:B333XXssMuKQeBwiNODx4TupZy7bf4sxFZnN2ZOcvUE= +golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -593,9 +625,18 @@ golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210412220455-f1c623a9e750/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da h1:b3NXsE2LusjYGGjL5bxEVZZORm/YEFFrWFjR8eFrw/c= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210917161153-d61c044b1678 h1:J27LZFQBFoihqXoegpscI10HpjZ7B5WQLLKL2FZXQKw= +golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -663,8 +704,12 @@ golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -692,8 +737,16 @@ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34q google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.45.0 h1:pqMffJFLBVUDIoYsHcqtxgQVTsmxMDpYLOc5MT4Jrww= -google.golang.org/api v0.45.0/go.mod h1:ISLIJCedJolbZvDfAk+Ctuq5hf+aJ33WgtUsfyFoLXA= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.58.0 h1:MDkAbYIB1JpSgCTOCYYoIec/coMlKK4oVbpnBLLcyT0= +google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -727,6 +780,7 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= @@ -743,9 +797,25 @@ google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210413151531-c14fb6ef47c3/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210420162539-3c870d7478d2 h1:g2sJMUGCpeHZqTx8p3wsAWRS64nFq20i4dvJWcKGqvY= -google.golang.org/genproto v0.0.0-20210420162539-3c870d7478d2/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211016002631-37fc39342514 h1:Rp1vYDPD4TdkMH5S/bZbopsGCsWhPcrLBUwOVhAQCxM= +google.golang.org/genproto v0.0.0-20211016002631-37fc39342514/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -764,13 +834,20 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0 h1:uSZWeQJX5j11bIQ4AJoj+McDBo29cY1MCoC1wO3ts+c= google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -782,8 +859,9 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -800,6 +878,7 @@ gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRN gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= From d045b7bef0d65a6be0e26c4a7df5c79c8bf72043 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 22 Jan 2022 18:29:29 +0000 Subject: [PATCH 21/95] Bump google.golang.org/api from 0.45.0 to 0.65.0 Bumps [google.golang.org/api](https://github.com/googleapis/google-api-go-client) from 0.45.0 to 0.65.0. - [Release notes](https://github.com/googleapis/google-api-go-client/releases) - [Changelog](https://github.com/googleapis/google-api-go-client/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-api-go-client/compare/v0.45.0...v0.65.0) --- updated-dependencies: - dependency-name: google.golang.org/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 5 +++-- go.sum | 35 +++++++++++++++++++++++++++++------ 2 files changed, 32 insertions(+), 8 deletions(-) diff --git a/go.mod b/go.mod index 79564a9f..58704eac 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,7 @@ module github.com/benbjohnson/litestream go 1.16 require ( + cloud.google.com/go/iam v0.1.1 // indirect cloud.google.com/go/storage v1.18.2 github.com/Azure/azure-storage-blob-go v0.13.0 github.com/aws/aws-sdk-go v1.42.39 @@ -13,7 +14,7 @@ require ( github.com/prometheus/client_golang v1.9.0 golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20210917161153-d61c044b1678 - google.golang.org/api v0.58.0 + golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e + google.golang.org/api v0.65.0 gopkg.in/yaml.v2 v2.4.0 ) diff --git a/go.sum b/go.sum index 0fdd16dc..f5155ae7 100644 --- a/go.sum +++ b/go.sum @@ -24,16 +24,23 @@ cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWc cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0 h1:3DXvAyifywvq64LfkKaMOmkWPS1CikIQdMe2lY9vxU8= cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= +cloud.google.com/go v0.100.2 h1:t9Iw5QH5v4XtlEQaCtUY7x6sCABps8sW0acw7e2WQ6Y= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute v0.1.0 h1:rSUBvAyVwNJ5uQCKNJFMwPtTvJkfN38b6Pvb9zZoqJ8= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/iam v0.1.1 h1:4CapQyNFjiksks1/x7jsvsygFPhihslYk5GptIrlX68= +cloud.google.com/go/iam v0.1.1/go.mod h1:CKqrcnI/suGpybEHxZ7BMehL0oA4LpdyJdUlTl9jVMw= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -559,8 +566,9 @@ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1 h1:B333XXssMuKQeBwiNODx4TupZy7bf4sxFZnN2ZOcvUE= golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -635,8 +643,11 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210917161153-d61c044b1678 h1:J27LZFQBFoihqXoegpscI10HpjZ7B5WQLLKL2FZXQKw= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -745,8 +756,12 @@ google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6 google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.58.0 h1:MDkAbYIB1JpSgCTOCYYoIec/coMlKK4oVbpnBLLcyT0= google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.64.0/go.mod h1:931CdxA8Rm4t6zqTFGSsgwbAEZ2+GMYurbndwSimebM= +google.golang.org/api v0.65.0 h1:MTW9c+LIBAbwoS1Gb+YV7NjFBt2f7GtAS5hIzh2NjgQ= +google.golang.org/api v0.65.0/go.mod h1:ArYhxgGadlWmqO1IqVujw6Cs8IdD33bTmzKo2Sh+cbg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -814,8 +829,15 @@ google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEc google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211016002631-37fc39342514 h1:Rp1vYDPD4TdkMH5S/bZbopsGCsWhPcrLBUwOVhAQCxM= google.golang.org/genproto v0.0.0-20211016002631-37fc39342514/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220111164026-67b88f271998 h1:g/x+MYjJYDEP3OBCYYmwIbt4x6k3gryb+ohyOR7PXfI= +google.golang.org/genproto v0.0.0-20220111164026-67b88f271998/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -845,8 +867,9 @@ google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1 h1:pnP7OclFFFgFi4VHQDQDaoXUVauOFyktqTsqqgzFKbc= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= From 79b50c6944ccb07bcbaf4bcc8b854e1d93ad5531 Mon Sep 17 00:00:00 2001 From: Matt Joiner Date: Sun, 23 Jan 2022 10:46:10 +1100 Subject: [PATCH 22/95] Update sqlite 3.36 --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 58704eac..002e358c 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/Azure/azure-storage-blob-go v0.13.0 github.com/aws/aws-sdk-go v1.42.39 github.com/mattn/go-shellwords v1.0.11 - github.com/mattn/go-sqlite3 v1.14.5 + github.com/mattn/go-sqlite3 v1.14.10 github.com/pierrec/lz4/v4 v4.1.12 github.com/pkg/sftp v1.13.0 github.com/prometheus/client_golang v1.9.0 diff --git a/go.sum b/go.sum index f5155ae7..d29c18c1 100644 --- a/go.sum +++ b/go.sum @@ -312,8 +312,8 @@ github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-shellwords v1.0.11 h1:vCoR9VPpsk/TZFW2JwK5I9S0xdrtUq2bph6/YjEPnaw= github.com/mattn/go-shellwords v1.0.11/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= -github.com/mattn/go-sqlite3 v1.14.5 h1:1IdxlwTNazvbKJQSxoJ5/9ECbEeaTTyeU7sEAZ5KKTQ= -github.com/mattn/go-sqlite3 v1.14.5/go.mod h1:WVKg1VTActs4Qso6iwGbiFih2UIHo0ENGwNd0Lj+XmI= +github.com/mattn/go-sqlite3 v1.14.10 h1:MLn+5bFRlWMGoSRmJour3CL1w/qL96mvipqpwQW/Sfk= +github.com/mattn/go-sqlite3 v1.14.10/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= From 90715ef8f303625fb092d25b9e818f00d1a9d9df Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Sun, 23 Jan 2022 09:04:35 -0700 Subject: [PATCH 23/95] Upgrade azure-storage-blob-go to v0.14.0 --- go.mod | 2 +- go.sum | 26 +++++++++++++++----------- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index 002e358c..c545d8e5 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.16 require ( cloud.google.com/go/iam v0.1.1 // indirect cloud.google.com/go/storage v1.18.2 - github.com/Azure/azure-storage-blob-go v0.13.0 + github.com/Azure/azure-storage-blob-go v0.14.0 github.com/aws/aws-sdk-go v1.42.39 github.com/mattn/go-shellwords v1.0.11 github.com/mattn/go-sqlite3 v1.14.10 diff --git a/go.sum b/go.sum index d29c18c1..f7ee6521 100644 --- a/go.sum +++ b/go.sum @@ -55,15 +55,17 @@ cloud.google.com/go/storage v1.18.2/go.mod h1:AiIj7BWXyhO5gGVmYJ+S8tbkCx3yb0IMju dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= -github.com/Azure/azure-storage-blob-go v0.13.0 h1:lgWHvFh+UYBNVQLFHXkvul2f6yOPA9PIH82RTG2cSwc= -github.com/Azure/azure-storage-blob-go v0.13.0/go.mod h1:pA9kNqtjUeQF2zOSu4s//nUdBD+e64lEuc4sVnuOfNs= +github.com/Azure/azure-storage-blob-go v0.14.0 h1:1BCg74AmVdYwO3dlKwtFU1V0wU2PZdREkXvAmZJRUlM= +github.com/Azure/azure-storage-blob-go v0.14.0/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest/adal v0.9.2 h1:Aze/GQeAN1RRbGmnUJvUj+tFGBzFdIg3293/A9rbxC4= -github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= +github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q= +github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -122,7 +124,6 @@ github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7Do github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= @@ -139,6 +140,8 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -231,9 +234,9 @@ github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= @@ -298,6 +301,8 @@ github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -337,8 +342,6 @@ github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzE github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= @@ -467,6 +470,7 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a h1:kr2P4QFmQr29mSLA43kwrOcgcReGTfbE9N577tCTuBc= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= @@ -889,8 +893,8 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= From 500cfd8bf47cde5b55fca05996da21cdec795fa2 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Sun, 23 Jan 2022 09:11:03 -0700 Subject: [PATCH 24/95] Upgrade shellwords, golang.org/x --- go.mod | 6 +++--- go.sum | 13 ++++++++----- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/go.mod b/go.mod index c545d8e5..a22930dd 100644 --- a/go.mod +++ b/go.mod @@ -7,14 +7,14 @@ require ( cloud.google.com/go/storage v1.18.2 github.com/Azure/azure-storage-blob-go v0.14.0 github.com/aws/aws-sdk-go v1.42.39 - github.com/mattn/go-shellwords v1.0.11 + github.com/mattn/go-shellwords v1.0.12 github.com/mattn/go-sqlite3 v1.14.10 github.com/pierrec/lz4/v4 v4.1.12 github.com/pkg/sftp v1.13.0 github.com/prometheus/client_golang v1.9.0 - golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a + golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e + golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 google.golang.org/api v0.65.0 gopkg.in/yaml.v2 v2.4.0 ) diff --git a/go.sum b/go.sum index f7ee6521..bea40490 100644 --- a/go.sum +++ b/go.sum @@ -315,8 +315,8 @@ github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/ github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-shellwords v1.0.11 h1:vCoR9VPpsk/TZFW2JwK5I9S0xdrtUq2bph6/YjEPnaw= -github.com/mattn/go-shellwords v1.0.11/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= +github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-sqlite3 v1.14.10 h1:MLn+5bFRlWMGoSRmJour3CL1w/qL96mvipqpwQW/Sfk= github.com/mattn/go-sqlite3 v1.14.10/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= @@ -472,8 +472,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a h1:kr2P4QFmQr29mSLA43kwrOcgcReGTfbE9N577tCTuBc= -golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce h1:Roh6XWxHFKrPgC/EQhVubSAGQ6Ozk6IdxHSzt1mR0EI= +golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -553,6 +553,7 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f h1:hEYJvxw1lSnWIl8X9ofsYMklzaDs90JI2az5YMd4fPM= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -642,6 +643,7 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -650,8 +652,9 @@ golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= From 55c475e3fe617bd53a01df3c665a4825edf8f447 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Sun, 23 Jan 2022 09:20:03 -0700 Subject: [PATCH 25/95] Upgrade github.com/pkg/sftp@v1.13.4 --- go.mod | 2 +- go.sum | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index a22930dd..51902de9 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/mattn/go-shellwords v1.0.12 github.com/mattn/go-sqlite3 v1.14.10 github.com/pierrec/lz4/v4 v4.1.12 - github.com/pkg/sftp v1.13.0 + github.com/pkg/sftp v1.13.4 github.com/prometheus/client_golang v1.9.0 golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce golang.org/x/sync v0.0.0-20210220032951-036812b2e83c diff --git a/go.sum b/go.sum index bea40490..a35ba23c 100644 --- a/go.sum +++ b/go.sum @@ -371,8 +371,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= -github.com/pkg/sftp v1.13.0 h1:Riw6pgOKK41foc1I1Uu03CjvbLZDXeGpInycM4shXoI= -github.com/pkg/sftp v1.13.0/go.mod h1:41g+FIPlQUTDCveupEmEA65IoiQFrtgCeDopC4ajGIM= +github.com/pkg/sftp v1.13.4 h1:Lb0RYJCmgUcBgZosfoi9Y9sbl6+LJgOIgk/2Y4YjMFg= +github.com/pkg/sftp v1.13.4/go.mod h1:LzqnAvaD5TWeNBsZpfKxSYn1MbjWwOsCIAFFJbpIsK8= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= @@ -431,8 +431,9 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= @@ -471,7 +472,7 @@ golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce h1:Roh6XWxHFKrPgC/EQhVubSAGQ6Ozk6IdxHSzt1mR0EI= golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -605,7 +606,6 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -640,6 +640,7 @@ golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -655,7 +656,6 @@ golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= From 5d24f91ea70bdd9f51e45b059d67bee7dc183f60 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Sun, 23 Jan 2022 09:25:29 -0700 Subject: [PATCH 26/95] Upgrade github.com/prometheus/client_golang@v1.12.0 --- go.mod | 2 +- go.sum | 231 +++++---------------------------------------------------- 2 files changed, 19 insertions(+), 214 deletions(-) diff --git a/go.mod b/go.mod index 51902de9..0d88e1f2 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/mattn/go-sqlite3 v1.14.10 github.com/pierrec/lz4/v4 v4.1.12 github.com/pkg/sftp v1.13.4 - github.com/prometheus/client_golang v1.9.0 + github.com/prometheus/client_golang v1.12.0 golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 diff --git a/go.sum b/go.sum index a35ba23c..dfacbf71 100644 --- a/go.sum +++ b/go.sum @@ -70,67 +70,36 @@ github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUM github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= -github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.42.39 h1:6Lso73VoCI8Zmv3zAMv4BNg2gHAKNOlbLv1s/ew90SI= github.com/aws/aws-sdk-go v1.42.39/go.mod h1:OGr6lGMAKGlG9CVrYnWYDKIyb829c6EVBRjxqjmPepc= -github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -139,30 +108,21 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= -github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= @@ -194,7 +154,6 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -233,7 +192,6 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -242,58 +200,24 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.1.1 h1:dp3bWCh+PPO1zjRRiCSczJav13sBvG4UhNyVTa1KqdU= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -306,125 +230,59 @@ github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfn github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= -github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI= github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-sqlite3 v1.14.10 h1:MLn+5bFRlWMGoSRmJour3CL1w/qL96mvipqpwQW/Sfk= github.com/mattn/go-sqlite3 v1.14.10/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= -github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= -github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= -github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4/v4 v4.1.12 h1:44l88ehTZAUGW4VlO1QC4zkilL99M6Y9MXNwEs0uzP8= github.com/pierrec/lz4/v4 v4.1.12/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/sftp v1.13.4 h1:Lb0RYJCmgUcBgZosfoi9Y9sbl6+LJgOIgk/2Y4YjMFg= github.com/pkg/sftp v1.13.4/go.mod h1:LzqnAvaD5TWeNBsZpfKxSYn1MbjWwOsCIAFFJbpIsK8= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.9.0 h1:Rrch9mh17XcxvEu9D9DEpb4isxjGBtcevQjKvxPRQIU= -github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.0 h1:C+UIj/QWtmqY13Arb8kwMt5j34/0Z2iKamrJ+ryC0Gg= +github.com/prometheus/client_golang v1.12.0/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.15.0 h1:4fgOnadei3EZvgRwxJ7RMpG1k1pOZth5Pc13tyspaKM= -github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4= -github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -434,19 +292,11 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -456,19 +306,10 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -512,13 +353,8 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -529,7 +365,6 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -554,6 +389,7 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f h1:hEYJvxw1lSnWIl8X9ofsYMklzaDs90JI2az5YMd4fPM= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -587,14 +423,9 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -604,11 +435,9 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -631,9 +460,9 @@ golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -643,6 +472,7 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -667,19 +497,15 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -689,8 +515,6 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -698,7 +522,6 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -733,7 +556,6 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -770,7 +592,6 @@ google.golang.org/api v0.64.0/go.mod h1:931CdxA8Rm4t6zqTFGSsgwbAEZ2+GMYurbndwSim google.golang.org/api v0.65.0 h1:MTW9c+LIBAbwoS1Gb+YV7NjFBt2f7GtAS5hIzh2NjgQ= google.golang.org/api v0.65.0/go.mod h1:ArYhxgGadlWmqO1IqVujw6Cs8IdD33bTmzKo2Sh+cbg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -783,7 +604,6 @@ google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= @@ -845,15 +665,10 @@ google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220111164026-67b88f271998 h1:g/x+MYjJYDEP3OBCYYmwIbt4x6k3gryb+ohyOR7PXfI= google.golang.org/genproto v0.0.0-20220111164026-67b88f271998/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= @@ -898,14 +713,7 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -917,7 +725,6 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -928,5 +735,3 @@ honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= From 39114502f3959cf6ddf068148c4ff92e450dd831 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Sun, 23 Jan 2022 09:30:17 -0700 Subject: [PATCH 27/95] Create codeql-analysis.yml --- .github/workflows/codeql-analysis.yml | 38 +++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 .github/workflows/codeql-analysis.yml diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml new file mode 100644 index 00000000..d06b47f6 --- /dev/null +++ b/.github/workflows/codeql-analysis.yml @@ -0,0 +1,38 @@ +name: "CodeQL" + +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + schedule: + - cron: '20 16 * * 4' + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: [ 'go' ] + + steps: + - name: Checkout repository + uses: actions/checkout@v2 + + - name: Initialize CodeQL + uses: github/codeql-action/init@v1 + with: + languages: ${{ matrix.language }} + + - name: Autobuild + uses: github/codeql-action/autobuild@v1 + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v1 From 8950de8f7e9d52582e886e509e4d9079698a00ad Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Mon, 24 Jan 2022 13:09:32 -0700 Subject: [PATCH 28/95] Update dependabot.yml --- .github/dependabot.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index eed53854..4ff45e18 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -5,4 +5,4 @@ updates: assignees: - "benbjohnson" schedule: - interval: "daily" + interval: "weekly" From 8d759bb0b8b1c9210343e5ae6679c382884266aa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Jan 2022 19:43:13 +0000 Subject: [PATCH 29/95] Bump github.com/aws/aws-sdk-go from 1.42.39 to 1.42.40 Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.42.39 to 1.42.40. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.42.39...v1.42.40) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 0d88e1f2..286a1d01 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( cloud.google.com/go/iam v0.1.1 // indirect cloud.google.com/go/storage v1.18.2 github.com/Azure/azure-storage-blob-go v0.14.0 - github.com/aws/aws-sdk-go v1.42.39 + github.com/aws/aws-sdk-go v1.42.40 github.com/mattn/go-shellwords v1.0.12 github.com/mattn/go-sqlite3 v1.14.10 github.com/pierrec/lz4/v4 v4.1.12 diff --git a/go.sum b/go.sum index dfacbf71..ef3f492d 100644 --- a/go.sum +++ b/go.sum @@ -77,8 +77,8 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/aws/aws-sdk-go v1.42.39 h1:6Lso73VoCI8Zmv3zAMv4BNg2gHAKNOlbLv1s/ew90SI= -github.com/aws/aws-sdk-go v1.42.39/go.mod h1:OGr6lGMAKGlG9CVrYnWYDKIyb829c6EVBRjxqjmPepc= +github.com/aws/aws-sdk-go v1.42.40 h1:oZ+hyhorrkYdT23YO8s0eWBp9Fg8k4HsAFL3n0V25WA= +github.com/aws/aws-sdk-go v1.42.40/go.mod h1:OGr6lGMAKGlG9CVrYnWYDKIyb829c6EVBRjxqjmPepc= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= From ffaba87b406f31fcf30e72b831f013ad353b391d Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Tue, 25 Jan 2022 16:06:47 -0700 Subject: [PATCH 30/95] Separate out GitHub Actions --- .github/workflows/abs_integration_test.yml | 25 ++++ .github/workflows/build_and_test.yml | 25 ++++ .github/workflows/commit.yml | 127 ------------------ .github/workflows/gcp_integration_test.yml | 30 +++++ .github/workflows/long_running_test.yml | 23 ++++ .github/workflows/s3_integration_test.yml | 28 ++++ .../workflows/sftp_integration_test.yml.bak | 34 +++++ 7 files changed, 165 insertions(+), 127 deletions(-) create mode 100644 .github/workflows/abs_integration_test.yml create mode 100644 .github/workflows/build_and_test.yml delete mode 100644 .github/workflows/commit.yml create mode 100644 .github/workflows/gcp_integration_test.yml create mode 100644 .github/workflows/long_running_test.yml create mode 100644 .github/workflows/s3_integration_test.yml create mode 100644 .github/workflows/sftp_integration_test.yml.bak diff --git a/.github/workflows/abs_integration_test.yml b/.github/workflows/abs_integration_test.yml new file mode 100644 index 00000000..991ecec4 --- /dev/null +++ b/.github/workflows/abs_integration_test.yml @@ -0,0 +1,25 @@ +name: Azure Blob Store Integration Tests +on: pull_request + +jobs: + abs-integration-test: + name: Run Integration Tests + runs-on: ubuntu-18.04 + steps: + - uses: actions/checkout@v2 + + - uses: actions/setup-go@v2 + with: + go-version: '1.17' + + - uses: actions/cache@v2 + with: + path: ~/go/pkg/mod + key: ${{ inputs.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ inputs.os }}-go- + + - run: go test -v -run=TestReplicaClient ./integration -replica-type abs + env: + LITESTREAM_ABS_ACCOUNT_NAME: ${{ secrets.LITESTREAM_ABS_ACCOUNT_NAME }} + LITESTREAM_ABS_ACCOUNT_KEY: ${{ secrets.LITESTREAM_ABS_ACCOUNT_KEY }} + LITESTREAM_ABS_BUCKET: integration diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml new file mode 100644 index 00000000..34565a7f --- /dev/null +++ b/.github/workflows/build_and_test.yml @@ -0,0 +1,25 @@ +name: "Build and Unit Test" +on: pull_request + +jobs: + build: + name: Build + runs-on: ubuntu-18.04 + steps: + - uses: actions/checkout@v2 + + - uses: actions/setup-go@v2 + with: + go-version: '1.17' + + - uses: actions/cache@v2 + with: + path: ~/go/pkg/mod + key: ${{ inputs.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ inputs.os }}-go- + + - name: Build binary + run: go install ./cmd/litestream + + - name: Run unit tests + run: make testdata && go test -v ./... diff --git a/.github/workflows/commit.yml b/.github/workflows/commit.yml deleted file mode 100644 index e58491a5..00000000 --- a/.github/workflows/commit.yml +++ /dev/null @@ -1,127 +0,0 @@ -on: push - -jobs: - build: - name: Build & Unit Test - runs-on: ubuntu-18.04 - steps: - - uses: actions/checkout@v2 - - uses: actions/setup-go@v2 - with: - go-version: '1.17' - - uses: actions/cache@v2 - with: - path: ~/go/pkg/mod - key: ${{ inputs.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: ${{ inputs.os }}-go- - - - run: go env - - - run: go install ./cmd/litestream - - - run: make testdata - - run: go test -v ./... - - - name: Build integration test - run: go test -c ./integration - - - uses: actions/upload-artifact@v2 - with: - name: integration.test - path: integration.test - if-no-files-found: error - - long-running-test: - name: Run Long Running Unit Test - runs-on: ubuntu-18.04 - steps: - - uses: actions/checkout@v2 - - uses: actions/setup-go@v2 - with: - go-version: '1.17' - - uses: actions/cache@v2 - with: - path: ~/go/pkg/mod - key: ${{ inputs.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: ${{ inputs.os }}-go- - - - run: go install ./cmd/litestream - - run: go test -v -run=TestCmd_Replicate_LongRunning ./integration -long-running-duration 1m - - s3-integration-test: - name: Run S3 Integration Tests - if: ${{ github.actor == 'benbjohnson' }} - runs-on: ubuntu-18.04 - needs: build - steps: - - uses: actions/download-artifact@v2 - with: - name: integration.test - - run: chmod +x integration.test - - - run: ./integration.test -test.v -test.run=TestReplicaClient -replica-type s3 - env: - LITESTREAM_S3_ACCESS_KEY_ID: ${{ secrets.LITESTREAM_S3_ACCESS_KEY_ID }} - LITESTREAM_S3_SECRET_ACCESS_KEY: ${{ secrets.LITESTREAM_S3_SECRET_ACCESS_KEY }} - LITESTREAM_S3_REGION: us-east-1 - LITESTREAM_S3_BUCKET: integration.litestream.io - - gcp-integration-test: - name: Run GCP Integration Tests - if: ${{ github.actor == 'benbjohnson' }} - runs-on: ubuntu-18.04 - needs: build - steps: - - name: Extract GCP credentials - run: 'echo "$GOOGLE_APPLICATION_CREDENTIALS" > /opt/gcp.json' - shell: bash - env: - GOOGLE_APPLICATION_CREDENTIALS: ${{secrets.GOOGLE_APPLICATION_CREDENTIALS}} - - - uses: actions/download-artifact@v2 - with: - name: integration.test - - run: chmod +x integration.test - - - run: ./integration.test -test.v -test.run=TestReplicaClient -replica-type gcs - env: - GOOGLE_APPLICATION_CREDENTIALS: /opt/gcp.json - LITESTREAM_GCS_BUCKET: integration.litestream.io - - abs-integration-test: - name: Run Azure Blob Store Integration Tests - if: ${{ github.actor == 'benbjohnson' }} - runs-on: ubuntu-18.04 - needs: build - steps: - - uses: actions/download-artifact@v2 - with: - name: integration.test - - run: chmod +x integration.test - - - run: ./integration.test -test.v -test.run=TestReplicaClient -replica-type abs - env: - LITESTREAM_ABS_ACCOUNT_NAME: ${{ secrets.LITESTREAM_ABS_ACCOUNT_NAME }} - LITESTREAM_ABS_ACCOUNT_KEY: ${{ secrets.LITESTREAM_ABS_ACCOUNT_KEY }} - LITESTREAM_ABS_BUCKET: integration - -# sftp-integration-test: -# name: Run SFTP Integration Tests -# if: ${{ github.actor == 'benbjohnson' }} -# runs-on: ubuntu-18.04 -# needs: build -# steps: -# - name: Extract SSH key -# run: 'echo "$LITESTREAM_SFTP_KEY" > /opt/id_ed25519' -# shell: bash -# env: -# LITESTREAM_SFTP_KEY: ${{secrets.LITESTREAM_SFTP_KEY}} -# -# - name: Run sftp tests -# run: go test -v -run=TestReplicaClient ./integration -replica-type sftp -# env: -# LITESTREAM_SFTP_HOST: ${{ secrets.LITESTREAM_SFTP_HOST }} -# LITESTREAM_SFTP_USER: ${{ secrets.LITESTREAM_SFTP_USER }} -# LITESTREAM_SFTP_KEY_PATH: /opt/id_ed25519 -# LITESTREAM_SFTP_PATH: ${{ secrets.LITESTREAM_SFTP_PATH }} - diff --git a/.github/workflows/gcp_integration_test.yml b/.github/workflows/gcp_integration_test.yml new file mode 100644 index 00000000..1cab2f87 --- /dev/null +++ b/.github/workflows/gcp_integration_test.yml @@ -0,0 +1,30 @@ +name: GCP Integration Tests +on: pull_request + +jobs: + gcp-integration-test: + name: Run GCP Integration Tests + runs-on: ubuntu-18.04 + steps: + - uses: actions/checkout@v2 + + - uses: actions/setup-go@v2 + with: + go-version: '1.17' + + - uses: actions/cache@v2 + with: + path: ~/go/pkg/mod + key: ${{ inputs.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ inputs.os }}-go- + + - name: Extract GCP credentials + run: 'echo "$GOOGLE_APPLICATION_CREDENTIALS" > /opt/gcp.json' + shell: bash + env: + GOOGLE_APPLICATION_CREDENTIALS: ${{secrets.GOOGLE_APPLICATION_CREDENTIALS}} + + - run: go test -v -run=TestReplicaClient ./integration -replica-type gcs + env: + GOOGLE_APPLICATION_CREDENTIALS: /opt/gcp.json + LITESTREAM_GCS_BUCKET: integration.litestream.io diff --git a/.github/workflows/long_running_test.yml b/.github/workflows/long_running_test.yml new file mode 100644 index 00000000..3827b1af --- /dev/null +++ b/.github/workflows/long_running_test.yml @@ -0,0 +1,23 @@ +name: Long-Running Unit Test +on: pull_request + +jobs: + test: + name: Run Long Running Unit Test + runs-on: ubuntu-18.04 + steps: + - uses: actions/checkout@v2 + + - uses: actions/setup-go@v2 + with: + go-version: '1.17' + + - uses: actions/cache@v2 + with: + path: ~/go/pkg/mod + key: ${{ inputs.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ inputs.os }}-go- + + - run: go install ./cmd/litestream + + - run: go test -v -run=TestCmd_Replicate_LongRunning ./integration -long-running-duration 1m diff --git a/.github/workflows/s3_integration_test.yml b/.github/workflows/s3_integration_test.yml new file mode 100644 index 00000000..6fac1b3d --- /dev/null +++ b/.github/workflows/s3_integration_test.yml @@ -0,0 +1,28 @@ +name: S3 Integration Tests +on: pull_request + +jobs: + test: + name: Run S3 Integration Tests + runs-on: ubuntu-18.04 + steps: + - uses: actions/checkout@v2 + + - uses: actions/setup-go@v2 + with: + go-version: '1.17' + + - uses: actions/cache@v2 + with: + path: ~/go/pkg/mod + key: ${{ inputs.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ inputs.os }}-go- + + - run: go install ./cmd/litestream + + - run: go test -v -run=TestReplicaClient ./integration -replica-type s3 + env: + LITESTREAM_S3_ACCESS_KEY_ID: ${{ secrets.LITESTREAM_S3_ACCESS_KEY_ID }} + LITESTREAM_S3_SECRET_ACCESS_KEY: ${{ secrets.LITESTREAM_S3_SECRET_ACCESS_KEY }} + LITESTREAM_S3_REGION: us-east-1 + LITESTREAM_S3_BUCKET: integration.litestream.io diff --git a/.github/workflows/sftp_integration_test.yml.bak b/.github/workflows/sftp_integration_test.yml.bak new file mode 100644 index 00000000..3c8bc895 --- /dev/null +++ b/.github/workflows/sftp_integration_test.yml.bak @@ -0,0 +1,34 @@ +#name: SFTP Integration Tests +#on: pull_request +# +#jobs: +# sftp-integration-test: +# name: Run SFTP Integration Tests +# runs-on: ubuntu-18.04 +# steps: +# - uses: actions/checkout@v2 +# +# - uses: actions/setup-go@v2 +# with: +# go-version: '1.17' +# +# - uses: actions/cache@v2 +# with: +# path: ~/go/pkg/mod +# key: ${{ inputs.os }}-go-${{ hashFiles('**/go.sum') }} +# restore-keys: ${{ inputs.os }}-go- +# +# - name: Extract SSH key +# run: 'echo "$LITESTREAM_SFTP_KEY" > /opt/id_ed25519' +# shell: bash +# env: +# LITESTREAM_SFTP_KEY: ${{secrets.LITESTREAM_SFTP_KEY}} +# +# - name: Run sftp tests +# run: go test -v -run=TestReplicaClient ./integration -replica-type sftp +# env: +# LITESTREAM_SFTP_HOST: ${{ secrets.LITESTREAM_SFTP_HOST }} +# LITESTREAM_SFTP_USER: ${{ secrets.LITESTREAM_SFTP_USER }} +# LITESTREAM_SFTP_KEY_PATH: /opt/id_ed25519 +# LITESTREAM_SFTP_PATH: ${{ secrets.LITESTREAM_SFTP_PATH }} + From 1741c82839119f88c3b418e250475ff30dd37995 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Tue, 25 Jan 2022 18:10:13 -0700 Subject: [PATCH 31/95] Produce build for every pull request --- .github/workflows/abs_integration_test.yml | 25 ---- .github/workflows/gcp_integration_test.yml | 30 ----- .github/workflows/integration_test.yml | 127 ++++++++++++++++++ .github/workflows/long_running_test.yml | 23 ---- .github/workflows/release.linux.yml | 84 +++++++++--- .github/workflows/release.linux_static.yml | 62 --------- .github/workflows/s3_integration_test.yml | 28 ---- .../workflows/sftp_integration_test.yml.bak | 34 ----- 8 files changed, 191 insertions(+), 222 deletions(-) delete mode 100644 .github/workflows/abs_integration_test.yml delete mode 100644 .github/workflows/gcp_integration_test.yml create mode 100644 .github/workflows/integration_test.yml delete mode 100644 .github/workflows/long_running_test.yml delete mode 100644 .github/workflows/release.linux_static.yml delete mode 100644 .github/workflows/s3_integration_test.yml delete mode 100644 .github/workflows/sftp_integration_test.yml.bak diff --git a/.github/workflows/abs_integration_test.yml b/.github/workflows/abs_integration_test.yml deleted file mode 100644 index 991ecec4..00000000 --- a/.github/workflows/abs_integration_test.yml +++ /dev/null @@ -1,25 +0,0 @@ -name: Azure Blob Store Integration Tests -on: pull_request - -jobs: - abs-integration-test: - name: Run Integration Tests - runs-on: ubuntu-18.04 - steps: - - uses: actions/checkout@v2 - - - uses: actions/setup-go@v2 - with: - go-version: '1.17' - - - uses: actions/cache@v2 - with: - path: ~/go/pkg/mod - key: ${{ inputs.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: ${{ inputs.os }}-go- - - - run: go test -v -run=TestReplicaClient ./integration -replica-type abs - env: - LITESTREAM_ABS_ACCOUNT_NAME: ${{ secrets.LITESTREAM_ABS_ACCOUNT_NAME }} - LITESTREAM_ABS_ACCOUNT_KEY: ${{ secrets.LITESTREAM_ABS_ACCOUNT_KEY }} - LITESTREAM_ABS_BUCKET: integration diff --git a/.github/workflows/gcp_integration_test.yml b/.github/workflows/gcp_integration_test.yml deleted file mode 100644 index 1cab2f87..00000000 --- a/.github/workflows/gcp_integration_test.yml +++ /dev/null @@ -1,30 +0,0 @@ -name: GCP Integration Tests -on: pull_request - -jobs: - gcp-integration-test: - name: Run GCP Integration Tests - runs-on: ubuntu-18.04 - steps: - - uses: actions/checkout@v2 - - - uses: actions/setup-go@v2 - with: - go-version: '1.17' - - - uses: actions/cache@v2 - with: - path: ~/go/pkg/mod - key: ${{ inputs.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: ${{ inputs.os }}-go- - - - name: Extract GCP credentials - run: 'echo "$GOOGLE_APPLICATION_CREDENTIALS" > /opt/gcp.json' - shell: bash - env: - GOOGLE_APPLICATION_CREDENTIALS: ${{secrets.GOOGLE_APPLICATION_CREDENTIALS}} - - - run: go test -v -run=TestReplicaClient ./integration -replica-type gcs - env: - GOOGLE_APPLICATION_CREDENTIALS: /opt/gcp.json - LITESTREAM_GCS_BUCKET: integration.litestream.io diff --git a/.github/workflows/integration_test.yml b/.github/workflows/integration_test.yml new file mode 100644 index 00000000..1ac40fb5 --- /dev/null +++ b/.github/workflows/integration_test.yml @@ -0,0 +1,127 @@ +name: Integration Tests +on: pull_request + +jobs: + s3-integration-test: + name: Run S3 Integration Tests + runs-on: ubuntu-18.04 + steps: + - uses: actions/checkout@v2 + + - uses: actions/setup-go@v2 + with: + go-version: '1.17' + + - uses: actions/cache@v2 + with: + path: ~/go/pkg/mod + key: ${{ inputs.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ inputs.os }}-go- + + - run: go install ./cmd/litestream + + - run: go test -v -run=TestReplicaClient ./integration -replica-type s3 + env: + LITESTREAM_S3_ACCESS_KEY_ID: ${{ secrets.LITESTREAM_S3_ACCESS_KEY_ID }} + LITESTREAM_S3_SECRET_ACCESS_KEY: ${{ secrets.LITESTREAM_S3_SECRET_ACCESS_KEY }} + LITESTREAM_S3_REGION: us-east-1 + LITESTREAM_S3_BUCKET: integration.litestream.io + + gcp-integration-test: + name: Run GCP Integration Tests + runs-on: ubuntu-18.04 + steps: + - uses: actions/checkout@v2 + + - uses: actions/setup-go@v2 + with: + go-version: '1.17' + + - uses: actions/cache@v2 + with: + path: ~/go/pkg/mod + key: ${{ inputs.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ inputs.os }}-go- + + - name: Extract GCP credentials + run: 'echo "$GOOGLE_APPLICATION_CREDENTIALS" > /opt/gcp.json' + shell: bash + env: + GOOGLE_APPLICATION_CREDENTIALS: ${{secrets.GOOGLE_APPLICATION_CREDENTIALS}} + + - run: go test -v -run=TestReplicaClient ./integration -replica-type gcs + env: + GOOGLE_APPLICATION_CREDENTIALS: /opt/gcp.json + LITESTREAM_GCS_BUCKET: integration.litestream.io + + abs-integration-test: + name: Run Azure Blob Store Integration Tests + runs-on: ubuntu-18.04 + steps: + - uses: actions/checkout@v2 + + - uses: actions/setup-go@v2 + with: + go-version: '1.17' + + - uses: actions/cache@v2 + with: + path: ~/go/pkg/mod + key: ${{ inputs.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ inputs.os }}-go- + + - run: go test -v -run=TestReplicaClient ./integration -replica-type abs + env: + LITESTREAM_ABS_ACCOUNT_NAME: ${{ secrets.LITESTREAM_ABS_ACCOUNT_NAME }} + LITESTREAM_ABS_ACCOUNT_KEY: ${{ secrets.LITESTREAM_ABS_ACCOUNT_KEY }} + LITESTREAM_ABS_BUCKET: integration + +# sftp-integration-test: +# name: Run SFTP Integration Tests +# runs-on: ubuntu-18.04 +# steps: +# - uses: actions/checkout@v2 +# +# - uses: actions/setup-go@v2 +# with: +# go-version: '1.17' +# +# - uses: actions/cache@v2 +# with: +# path: ~/go/pkg/mod +# key: ${{ inputs.os }}-go-${{ hashFiles('**/go.sum') }} +# restore-keys: ${{ inputs.os }}-go- +# +# - name: Extract SSH key +# run: 'echo "$LITESTREAM_SFTP_KEY" > /opt/id_ed25519' +# shell: bash +# env: +# LITESTREAM_SFTP_KEY: ${{secrets.LITESTREAM_SFTP_KEY}} +# +# - name: Run sftp tests +# run: go test -v -run=TestReplicaClient ./integration -replica-type sftp +# env: +# LITESTREAM_SFTP_HOST: ${{ secrets.LITESTREAM_SFTP_HOST }} +# LITESTREAM_SFTP_USER: ${{ secrets.LITESTREAM_SFTP_USER }} +# LITESTREAM_SFTP_KEY_PATH: /opt/id_ed25519 +# LITESTREAM_SFTP_PATH: ${{ secrets.LITESTREAM_SFTP_PATH }} + + long-running-test: + name: Run Long-Running Test + runs-on: ubuntu-18.04 + steps: + - uses: actions/checkout@v2 + + - uses: actions/setup-go@v2 + with: + go-version: '1.17' + + - uses: actions/cache@v2 + with: + path: ~/go/pkg/mod + key: ${{ inputs.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ inputs.os }}-go- + + - run: go install ./cmd/litestream + + - run: go test -v -run=TestCmd_Replicate_LongRunning ./integration -long-running-duration 1m diff --git a/.github/workflows/long_running_test.yml b/.github/workflows/long_running_test.yml deleted file mode 100644 index 3827b1af..00000000 --- a/.github/workflows/long_running_test.yml +++ /dev/null @@ -1,23 +0,0 @@ -name: Long-Running Unit Test -on: pull_request - -jobs: - test: - name: Run Long Running Unit Test - runs-on: ubuntu-18.04 - steps: - - uses: actions/checkout@v2 - - - uses: actions/setup-go@v2 - with: - go-version: '1.17' - - - uses: actions/cache@v2 - with: - path: ~/go/pkg/mod - key: ${{ inputs.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: ${{ inputs.os }}-go- - - - run: go install ./cmd/litestream - - - run: go test -v -run=TestCmd_Replicate_LongRunning ./integration -long-running-duration 1m diff --git a/.github/workflows/release.linux.yml b/.github/workflows/release.linux.yml index ce80a956..89509130 100644 --- a/.github/workflows/release.linux.yml +++ b/.github/workflows/release.linux.yml @@ -2,41 +2,68 @@ on: release: types: - created + pull_request: + types: + - opened + - synchronize + - reopened -name: release (linux) +name: Release (Linux) jobs: build: runs-on: ubuntu-18.04 strategy: matrix: include: - - arch: amd64 - cc: gcc + - arch: amd64 + cc: gcc + + - arch: amd64 + cc: gcc + static: true + - arch: arm64 cc: aarch64-linux-gnu-gcc + + - arch: arm64 + cc: aarch64-linux-gnu-gcc + static: true + - arch: arm arm: 6 cc: arm-linux-gnueabi-gcc + + - arch: arm + arm: 6 + cc: arm-linux-gnueabi-gcc + static: true + + - arch: arm + arm: 7 + cc: arm-linux-gnueabihf-gcc + - arch: arm arm: 7 cc: arm-linux-gnueabihf-gcc + static: true env: GOOS: linux GOARCH: ${{ matrix.arch }} GOARM: ${{ matrix.arm }} CC: ${{ matrix.cc }} + LDFLAGS: ${{ matrix.static && '-extldflags "-static"' || '' }} + TAGS: ${{ matrix.static && 'osusergo,netgo,sqlite_omit_load_extension' || '' }} + SUFFIX: "${{ matrix.static && '-static' || ''}}" + VERSION: "${{ github.event_name == 'release' && github.event.release.name || github.sha }}" + steps: - uses: actions/checkout@v2 + - uses: actions/setup-go@v2 with: - go-version: '1.16' - - - id: release - uses: bruceadams/get-release@v1.2.2 - env: - GITHUB_TOKEN: ${{ github.token }} + go-version: '1.17' - name: Install cross-compilers run: | @@ -50,32 +77,49 @@ jobs: - name: Build litestream run: | - rm -rf dist - mkdir -p dist + rm -rf dist && mkdir -p dist + cp etc/litestream.yml etc/litestream.service dist - cat etc/nfpm.yml | LITESTREAM_VERSION=${{ steps.release.outputs.tag_name }} envsubst > dist/nfpm.yml - CGO_ENABLED=1 go build -ldflags "-s -w -X 'main.Version=${{ steps.release.outputs.tag_name }}'" -o dist/litestream ./cmd/litestream - + cat etc/nfpm.yml | LITESTREAM_VERSION=${{ env.VERSION }} envsubst > dist/nfpm.yml + + CGO_ENABLED=1 go build -ldflags "-s -w ${{ env.LDFLAGS }} -X 'main.Version=${{ env.VERSION }}'" -tags "${{ env.TAGS }}" -o dist/litestream ./cmd/litestream + cd dist - tar -czvf litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}.tar.gz litestream - ../nfpm pkg --config nfpm.yml --packager deb --target litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}.deb + tar -czvf litestream-${{ env.VERSION }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}${{ env.SUFFIX }}.tar.gz litestream + ../nfpm pkg --config nfpm.yml --packager deb --target litestream-${{ env.VERSION }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}${{ env.SUFFIX }}.deb + + - name: Upload binary artifact + uses: actions/upload-artifact@v2 + with: + name: litestream-${{ env.VERSION }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}${{ env.SUFFIX }}.tar.gz + path: dist/litestream-${{ env.VERSION }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}${{ env.SUFFIX }}.tar.gz + if-no-files-found: error + + - name: Upload debian artifact + uses: actions/upload-artifact@v2 + with: + name: litestream-${{ env.VERSION }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}${{ env.SUFFIX }}.deb + path: dist/litestream-${{ env.VERSION }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}${{ env.SUFFIX }}.deb + if-no-files-found: error - name: Upload release tarball uses: actions/upload-release-asset@v1.0.2 + if: github.event_name == 'release' env: GITHUB_TOKEN: ${{ github.token }} with: upload_url: ${{ steps.release.outputs.upload_url }} - asset_path: ./dist/litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}.tar.gz - asset_name: litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}.tar.gz + asset_path: ./dist/litestream-${{ env.VERSION }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}${{ env.SUFFIX }}.tar.gz + asset_name: litestream-${{ env.VERSION }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}${{ env.SUFFIX }}.tar.gz asset_content_type: application/gzip - name: Upload debian package uses: actions/upload-release-asset@v1.0.2 + if: github.event_name == 'release' env: GITHUB_TOKEN: ${{ github.token }} with: upload_url: ${{ steps.release.outputs.upload_url }} - asset_path: ./dist/litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}.deb - asset_name: litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}.deb + asset_path: ./dist/litestream-${{ env.VERSION }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}${{ env.SUFFIX }}.deb + asset_name: litestream-${{ env.VERSION }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}${{ env.SUFFIX }}.deb asset_content_type: application/octet-stream diff --git a/.github/workflows/release.linux_static.yml b/.github/workflows/release.linux_static.yml deleted file mode 100644 index ddc90b9a..00000000 --- a/.github/workflows/release.linux_static.yml +++ /dev/null @@ -1,62 +0,0 @@ -on: - release: - types: - - created - -name: release (linux/static) -jobs: - build: - runs-on: ubuntu-18.04 - strategy: - matrix: - include: - - arch: amd64 - cc: gcc - - arch: arm64 - cc: aarch64-linux-gnu-gcc - - arch: arm - arm: 6 - cc: arm-linux-gnueabi-gcc - - arch: arm - arm: 7 - cc: arm-linux-gnueabihf-gcc - - env: - GOOS: linux - GOARCH: ${{ matrix.arch }} - GOARM: ${{ matrix.arm }} - CC: ${{ matrix.cc }} - - steps: - - uses: actions/checkout@v2 - - uses: actions/setup-go@v2 - with: - go-version: '1.16' - - - id: release - uses: bruceadams/get-release@v1.2.2 - env: - GITHUB_TOKEN: ${{ github.token }} - - - name: Install cross-compilers - run: | - sudo apt-get update - sudo apt-get install -y gcc-aarch64-linux-gnu gcc-arm-linux-gnueabihf gcc-arm-linux-gnueabi - - - name: Build litestream - run: | - rm -rf dist - mkdir -p dist - CGO_ENABLED=1 go build -ldflags "-s -w -extldflags "-static" -X 'main.Version=${{ steps.release.outputs.tag_name }}'" -tags osusergo,netgo,sqlite_omit_load_extension -o dist/litestream ./cmd/litestream - cd dist - tar -czvf litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}-static.tar.gz litestream - - - name: Upload release tarball - uses: actions/upload-release-asset@v1.0.2 - env: - GITHUB_TOKEN: ${{ github.token }} - with: - upload_url: ${{ steps.release.outputs.upload_url }} - asset_path: ./dist/litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}-static.tar.gz - asset_name: litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}-static.tar.gz - asset_content_type: application/gzip diff --git a/.github/workflows/s3_integration_test.yml b/.github/workflows/s3_integration_test.yml deleted file mode 100644 index 6fac1b3d..00000000 --- a/.github/workflows/s3_integration_test.yml +++ /dev/null @@ -1,28 +0,0 @@ -name: S3 Integration Tests -on: pull_request - -jobs: - test: - name: Run S3 Integration Tests - runs-on: ubuntu-18.04 - steps: - - uses: actions/checkout@v2 - - - uses: actions/setup-go@v2 - with: - go-version: '1.17' - - - uses: actions/cache@v2 - with: - path: ~/go/pkg/mod - key: ${{ inputs.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: ${{ inputs.os }}-go- - - - run: go install ./cmd/litestream - - - run: go test -v -run=TestReplicaClient ./integration -replica-type s3 - env: - LITESTREAM_S3_ACCESS_KEY_ID: ${{ secrets.LITESTREAM_S3_ACCESS_KEY_ID }} - LITESTREAM_S3_SECRET_ACCESS_KEY: ${{ secrets.LITESTREAM_S3_SECRET_ACCESS_KEY }} - LITESTREAM_S3_REGION: us-east-1 - LITESTREAM_S3_BUCKET: integration.litestream.io diff --git a/.github/workflows/sftp_integration_test.yml.bak b/.github/workflows/sftp_integration_test.yml.bak deleted file mode 100644 index 3c8bc895..00000000 --- a/.github/workflows/sftp_integration_test.yml.bak +++ /dev/null @@ -1,34 +0,0 @@ -#name: SFTP Integration Tests -#on: pull_request -# -#jobs: -# sftp-integration-test: -# name: Run SFTP Integration Tests -# runs-on: ubuntu-18.04 -# steps: -# - uses: actions/checkout@v2 -# -# - uses: actions/setup-go@v2 -# with: -# go-version: '1.17' -# -# - uses: actions/cache@v2 -# with: -# path: ~/go/pkg/mod -# key: ${{ inputs.os }}-go-${{ hashFiles('**/go.sum') }} -# restore-keys: ${{ inputs.os }}-go- -# -# - name: Extract SSH key -# run: 'echo "$LITESTREAM_SFTP_KEY" > /opt/id_ed25519' -# shell: bash -# env: -# LITESTREAM_SFTP_KEY: ${{secrets.LITESTREAM_SFTP_KEY}} -# -# - name: Run sftp tests -# run: go test -v -run=TestReplicaClient ./integration -replica-type sftp -# env: -# LITESTREAM_SFTP_HOST: ${{ secrets.LITESTREAM_SFTP_HOST }} -# LITESTREAM_SFTP_USER: ${{ secrets.LITESTREAM_SFTP_USER }} -# LITESTREAM_SFTP_KEY_PATH: /opt/id_ed25519 -# LITESTREAM_SFTP_PATH: ${{ secrets.LITESTREAM_SFTP_PATH }} - From dbdde21341236fc3de4affec3a27259bf5ced060 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Fri, 28 Jan 2022 15:05:21 -0700 Subject: [PATCH 32/95] Use sqlite3_file_control(SQLITE_FCNTL_PERSIST_WAL) to persist WAL Previously, Litestream would avoid closing the SQLite3 connection in order to ensure that the WAL file was not cleaned up by the database if it was the last connection. This commit changes the behavior by introducing a file control call to perform the same action. This allows us to close the database file normally in all cases. --- cmd/litestream/replicate.go | 2 +- db.go | 28 ++++++++-------------------- db_test.go | 4 ++-- go.mod | 2 +- go.sum | 2 ++ litestream.go | 13 +++++++++++++ 6 files changed, 27 insertions(+), 24 deletions(-) diff --git a/cmd/litestream/replicate.go b/cmd/litestream/replicate.go index 7f9a9aa4..fa849073 100644 --- a/cmd/litestream/replicate.go +++ b/cmd/litestream/replicate.go @@ -181,7 +181,7 @@ func (c *ReplicateCommand) Run(ctx context.Context) (err error) { // Close closes all open databases. func (c *ReplicateCommand) Close() (err error) { for _, db := range c.DBs { - if e := db.SoftClose(); e != nil { + if e := db.Close(); e != nil { log.Printf("error closing db: path=%s err=%s", db.Path(), e) if err == nil { err = e diff --git a/db.go b/db.go index ed5a97f0..49fbf219 100644 --- a/db.go +++ b/db.go @@ -405,20 +405,9 @@ func (db *DB) Open() (err error) { return nil } -// Close releases the read lock & closes the database. This method should only -// be called by tests as it causes the underlying database to be checkpointed. +// Close flushes outstanding WAL writes to replicas, releases the read lock, +// and closes the database. func (db *DB) Close() (err error) { - return db.close(false) -} - -// SoftClose closes everything but the underlying db connection. This method -// is available because the binary needs to avoid closing the database on exit -// to prevent autocheckpointing. -func (db *DB) SoftClose() (err error) { - return db.close(true) -} - -func (db *DB) close(soft bool) (err error) { db.cancel() db.wg.Wait() @@ -439,7 +428,7 @@ func (db *DB) close(soft bool) (err error) { err = e } } - r.Stop(!soft) + r.Stop(true) } // Release the read lock to allow other applications to handle checkpointing. @@ -449,9 +438,7 @@ func (db *DB) close(soft bool) (err error) { } } - // Only perform full close if this is not a soft close. - // This closes the underlying database connection which can clean up the WAL. - if !soft && db.db != nil { + if db.db != nil { if e := db.db.Close(); e != nil && err == nil { err = e } @@ -507,8 +494,9 @@ func (db *DB) init() (err error) { dsn := db.path dsn += fmt.Sprintf("?_busy_timeout=%d", BusyTimeout.Milliseconds()) - // Connect to SQLite database. - if db.db, err = sql.Open("sqlite3", dsn); err != nil { + // Connect to SQLite database. Use the driver registered with a hook to + // prevent WAL files from being removed. + if db.db, err = sql.Open("litestream-sqlite3", dsn); err != nil { return err } @@ -1536,7 +1524,7 @@ func ApplyWAL(ctx context.Context, dbPath, walPath string) error { } // Open SQLite database and force a truncating checkpoint. - d, err := sql.Open("sqlite3", dbPath) + d, err := sql.Open("litestream-sqlite3", dbPath) if err != nil { return err } diff --git a/db_test.go b/db_test.go index fe742251..a9dbb585 100644 --- a/db_test.go +++ b/db_test.go @@ -254,8 +254,8 @@ func TestDB_Sync(t *testing.T) { t.Fatal(err) } - // Verify WAL does not exist. - if _, err := os.Stat(db.WALPath()); !os.IsNotExist(err) { + // Remove WAL file. + if err := os.Remove(db.WALPath()); err != nil { t.Fatal(err) } diff --git a/go.mod b/go.mod index 286a1d01..169ea193 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/Azure/azure-storage-blob-go v0.14.0 github.com/aws/aws-sdk-go v1.42.40 github.com/mattn/go-shellwords v1.0.12 - github.com/mattn/go-sqlite3 v1.14.10 + github.com/mattn/go-sqlite3 v1.14.11 github.com/pierrec/lz4/v4 v4.1.12 github.com/pkg/sftp v1.13.4 github.com/prometheus/client_golang v1.12.0 diff --git a/go.sum b/go.sum index ef3f492d..7c575947 100644 --- a/go.sum +++ b/go.sum @@ -236,6 +236,8 @@ github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebG github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-sqlite3 v1.14.10 h1:MLn+5bFRlWMGoSRmJour3CL1w/qL96mvipqpwQW/Sfk= github.com/mattn/go-sqlite3 v1.14.10/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.11 h1:gt+cp9c0XGqe9S/wAHTL3n/7MqY+siPWgWJgqdsFrzQ= +github.com/mattn/go-sqlite3 v1.14.11/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= diff --git a/litestream.go b/litestream.go index 98f94a8b..5381e057 100644 --- a/litestream.go +++ b/litestream.go @@ -13,6 +13,8 @@ import ( "strconv" "strings" "time" + + "github.com/mattn/go-sqlite3" ) // Naming constants. @@ -51,6 +53,17 @@ var ( LogFlags = 0 ) +func init() { + sql.Register("litestream-sqlite3", &sqlite3.SQLiteDriver{ + ConnectHook: func(conn *sqlite3.SQLiteConn) error { + if err := conn.SetFileControlInt("main", sqlite3.SQLITE_FCNTL_PERSIST_WAL, 1); err != nil { + return fmt.Errorf("cannot set file control: %w", err) + } + return nil + }, + }) +} + // SnapshotIterator represents an iterator over a collection of snapshot metadata. type SnapshotIterator interface { io.Closer From f8382cfa15ee7e6f7e80d8b73e3ddd58a36f51d6 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Fri, 28 Jan 2022 15:16:35 -0700 Subject: [PATCH 33/95] Dispatch test runner in CI --- .github/workflows/release.linux.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.github/workflows/release.linux.yml b/.github/workflows/release.linux.yml index 89509130..5133bfaf 100644 --- a/.github/workflows/release.linux.yml +++ b/.github/workflows/release.linux.yml @@ -21,6 +21,7 @@ jobs: - arch: amd64 cc: gcc static: true + deploy_test_runner: true - arch: arm64 cc: aarch64-linux-gnu-gcc @@ -123,3 +124,10 @@ jobs: asset_path: ./dist/litestream-${{ env.VERSION }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}${{ env.SUFFIX }}.deb asset_name: litestream-${{ env.VERSION }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}${{ env.SUFFIX }}.deb asset_content_type: application/octet-stream + + - name: Dispatch test runner + if: matrix.deploy_test_runner + run: sleep 60 && gh workflow run deploy.yml -R benbjohnson/litestream-test-runner -f run_id=${{ github.run_id }} -f litestream_version=${{ github.sha }} + env: + GITHUB_TOKEN: ${{ secrets.PAT_TOKEN }} + From 26f219da1d470ec226c1985ee382e6992807d54e Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Sun, 30 Jan 2022 08:51:55 -0700 Subject: [PATCH 34/95] Add test runner request action --- .github/workflows/request_test_runner.yml | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 .github/workflows/request_test_runner.yml diff --git a/.github/workflows/request_test_runner.yml b/.github/workflows/request_test_runner.yml new file mode 100644 index 00000000..a00cef03 --- /dev/null +++ b/.github/workflows/request_test_runner.yml @@ -0,0 +1,23 @@ +name: Request Test Runner +on: + workflow_dispatch: + inputs: + run_id: + required: true + type: string + litestream_version: + required: true + type: string + +jobs: + build: + runs-on: ubuntu-18.04 + environment: + name: test + url: http://litestream-test-runner-${{ github.sha }}.fly.dev + steps: + - name: Dispatch test runner + run: gh workflow run deploy.yml -R benbjohnson/litestream-test-runner -f run_id=${{ github.event.inputs.run_id }} -f litestream_version=${{ github.event.inputs.litestream_version }} + env: + GITHUB_TOKEN: ${{ secrets.PAT_TOKEN }} + From 906ed9b3ca311333c1f2697cbdee2e79b3bfeb54 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Sun, 30 Jan 2022 08:57:46 -0700 Subject: [PATCH 35/95] Revert "Add test runner request action" This reverts commit 26f219da1d470ec226c1985ee382e6992807d54e. --- .github/workflows/request_test_runner.yml | 23 ----------------------- 1 file changed, 23 deletions(-) delete mode 100644 .github/workflows/request_test_runner.yml diff --git a/.github/workflows/request_test_runner.yml b/.github/workflows/request_test_runner.yml deleted file mode 100644 index a00cef03..00000000 --- a/.github/workflows/request_test_runner.yml +++ /dev/null @@ -1,23 +0,0 @@ -name: Request Test Runner -on: - workflow_dispatch: - inputs: - run_id: - required: true - type: string - litestream_version: - required: true - type: string - -jobs: - build: - runs-on: ubuntu-18.04 - environment: - name: test - url: http://litestream-test-runner-${{ github.sha }}.fly.dev - steps: - - name: Dispatch test runner - run: gh workflow run deploy.yml -R benbjohnson/litestream-test-runner -f run_id=${{ github.event.inputs.run_id }} -f litestream_version=${{ github.event.inputs.litestream_version }} - env: - GITHUB_TOKEN: ${{ secrets.PAT_TOKEN }} - From 0dfa5f98d10219e6475c0dfacd0666d1e5382562 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Sun, 30 Jan 2022 09:05:57 -0700 Subject: [PATCH 36/95] Re-enable SFTP integration tests --- .github/workflows/integration_test.yml | 66 +++++++++++++++----------- 1 file changed, 37 insertions(+), 29 deletions(-) diff --git a/.github/workflows/integration_test.yml b/.github/workflows/integration_test.yml index 1ac40fb5..b947afbf 100644 --- a/.github/workflows/integration_test.yml +++ b/.github/workflows/integration_test.yml @@ -76,35 +76,43 @@ jobs: LITESTREAM_ABS_ACCOUNT_KEY: ${{ secrets.LITESTREAM_ABS_ACCOUNT_KEY }} LITESTREAM_ABS_BUCKET: integration -# sftp-integration-test: -# name: Run SFTP Integration Tests -# runs-on: ubuntu-18.04 -# steps: -# - uses: actions/checkout@v2 -# -# - uses: actions/setup-go@v2 -# with: -# go-version: '1.17' -# -# - uses: actions/cache@v2 -# with: -# path: ~/go/pkg/mod -# key: ${{ inputs.os }}-go-${{ hashFiles('**/go.sum') }} -# restore-keys: ${{ inputs.os }}-go- -# -# - name: Extract SSH key -# run: 'echo "$LITESTREAM_SFTP_KEY" > /opt/id_ed25519' -# shell: bash -# env: -# LITESTREAM_SFTP_KEY: ${{secrets.LITESTREAM_SFTP_KEY}} -# -# - name: Run sftp tests -# run: go test -v -run=TestReplicaClient ./integration -replica-type sftp -# env: -# LITESTREAM_SFTP_HOST: ${{ secrets.LITESTREAM_SFTP_HOST }} -# LITESTREAM_SFTP_USER: ${{ secrets.LITESTREAM_SFTP_USER }} -# LITESTREAM_SFTP_KEY_PATH: /opt/id_ed25519 -# LITESTREAM_SFTP_PATH: ${{ secrets.LITESTREAM_SFTP_PATH }} + sftp-integration-test: + name: Run SFTP Integration Tests + runs-on: ubuntu-18.04 + steps: + - uses: actions/checkout@v2 + + - uses: actions/setup-go@v2 + with: + go-version: '1.17' + + - uses: actions/cache@v2 + with: + path: ~/go/pkg/mod + key: ${{ inputs.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ inputs.os }}-go- + + - name: Extract SSH key + run: 'echo "$LITESTREAM_SFTP_KEY" > /opt/id_ed25519' + shell: bash + env: + LITESTREAM_SFTP_KEY: ${{secrets.LITESTREAM_SFTP_KEY}} + + - name: Run sftp tests w/ key + run: go test -v -run=TestReplicaClient ./integration -replica-type sftp + env: + LITESTREAM_SFTP_HOST: litestream-test-sftp.fly.dev:2222 + LITESTREAM_SFTP_USER: litestream + LITESTREAM_SFTP_KEY_PATH: /opt/id_ed25519 + LITESTREAM_SFTP_PATH: /litestream + + - name: Run sftp tests w/ password + run: go test -v -run=TestReplicaClient ./integration -replica-type sftp + env: + LITESTREAM_SFTP_HOST: litestream-test-sftp.fly.dev:2222 + LITESTREAM_SFTP_USER: litestream + LITESTREAM_SFTP_PASSWORD: ${{ secrets.LITESTREAM_SFTP_PASSWORD }} + LITESTREAM_SFTP_PATH: /litestream long-running-test: name: Run Long-Running Test From f6c859061bfd7ccc2a21fcde3e9f0eb9ad98cd5e Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Sun, 30 Jan 2022 10:17:36 -0700 Subject: [PATCH 37/95] Fix CodeQL warnings --- .github/workflows/integration_test.yml | 8 ++++---- db.go | 8 ++++++-- integration/replica_client_test.go | 12 +++++++----- internal/internal.go | 20 +++++++++++++++----- s3/replica_client.go | 8 ++++---- sftp/replica_client.go | 21 ++++++++++++++++++--- 6 files changed, 54 insertions(+), 23 deletions(-) diff --git a/.github/workflows/integration_test.yml b/.github/workflows/integration_test.yml index b947afbf..afeccca4 100644 --- a/.github/workflows/integration_test.yml +++ b/.github/workflows/integration_test.yml @@ -101,10 +101,10 @@ jobs: - name: Run sftp tests w/ key run: go test -v -run=TestReplicaClient ./integration -replica-type sftp env: - LITESTREAM_SFTP_HOST: litestream-test-sftp.fly.dev:2222 - LITESTREAM_SFTP_USER: litestream - LITESTREAM_SFTP_KEY_PATH: /opt/id_ed25519 - LITESTREAM_SFTP_PATH: /litestream + LITESTREAM_SFTP_HOST: litestream-test-sftp.fly.dev:2222 + LITESTREAM_SFTP_USER: litestream + LITESTREAM_SFTP_PATH: /litestream + LITESTREAM_SFTP_KEY_PATH: /opt/id_ed25519 - name: Run sftp tests w/ password run: go test -v -run=TestReplicaClient ./integration -replica-type sftp diff --git a/db.go b/db.go index 49fbf219..85d146dc 100644 --- a/db.go +++ b/db.go @@ -12,6 +12,7 @@ import ( "io" "io/ioutil" "log" + "math" "math/rand" "os" "path/filepath" @@ -1593,8 +1594,11 @@ func parseWALPath(s string) (index int, err error) { return 0, fmt.Errorf("invalid wal path: %s", s) } - i64, _ := strconv.ParseUint(a[1], 16, 64) - return int(i64), nil + i32, _ := strconv.ParseUint(a[1], 16, 32) + if i32 > math.MaxInt32 { + return 0, fmt.Errorf("index too large in wal path: %s", s) + } + return int(i32), nil } // formatWALPath formats a WAL filename with a given index. diff --git a/integration/replica_client_test.go b/integration/replica_client_test.go index 109f4f39..8761965e 100644 --- a/integration/replica_client_test.go +++ b/integration/replica_client_test.go @@ -59,11 +59,12 @@ var ( // SFTP settings var ( - sftpHost = flag.String("sftp-host", os.Getenv("LITESTREAM_SFTP_HOST"), "") - sftpUser = flag.String("sftp-user", os.Getenv("LITESTREAM_SFTP_USER"), "") - sftpPassword = flag.String("sftp-password", os.Getenv("LITESTREAM_SFTP_PASSWORD"), "") - sftpKeyPath = flag.String("sftp-key-path", os.Getenv("LITESTREAM_SFTP_KEY_PATH"), "") - sftpPath = flag.String("sftp-path", os.Getenv("LITESTREAM_SFTP_PATH"), "") + sftpHost = flag.String("sftp-host", os.Getenv("LITESTREAM_SFTP_HOST"), "") + sftpUser = flag.String("sftp-user", os.Getenv("LITESTREAM_SFTP_USER"), "") + sftpPassword = flag.String("sftp-password", os.Getenv("LITESTREAM_SFTP_PASSWORD"), "") + sftpKeyPath = flag.String("sftp-key-path", os.Getenv("LITESTREAM_SFTP_KEY_PATH"), "") + sftpHostKeyPath = flag.String("sftp-host-key-path", os.Getenv("LITESTREAM_SFTP_HOST_KEY_PATH"), "") + sftpPath = flag.String("sftp-path", os.Getenv("LITESTREAM_SFTP_PATH"), "") ) func TestReplicaClient_Generations(t *testing.T) { @@ -538,6 +539,7 @@ func NewSFTPReplicaClient(tb testing.TB) *sftp.ReplicaClient { c.User = *sftpUser c.Password = *sftpPassword c.KeyPath = *sftpKeyPath + c.HostKeyPath = *sftpHostKeyPath c.Path = path.Join(*sftpPath, fmt.Sprintf("%016x", rand.Uint64())) return c } diff --git a/internal/internal.go b/internal/internal.go index 36598d4a..cb23a34f 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -3,6 +3,7 @@ package internal import ( "fmt" "io" + "math" "os" "regexp" "strconv" @@ -159,8 +160,11 @@ func ParseSnapshotPath(s string) (index int, err error) { return 0, fmt.Errorf("invalid snapshot path") } - i64, _ := strconv.ParseUint(a[1], 16, 64) - return int(i64), nil + i32, _ := strconv.ParseUint(a[1], 16, 32) + if i32 > math.MaxInt32 { + return 0, fmt.Errorf("index too large in snapshot path %q", s) + } + return int(i32), nil } var snapshotPathRegex = regexp.MustCompile(`^([0-9a-f]{8})\.snapshot\.lz4$`) @@ -172,9 +176,15 @@ func ParseWALSegmentPath(s string) (index int, offset int64, err error) { return 0, 0, fmt.Errorf("invalid wal segment path") } - i64, _ := strconv.ParseUint(a[1], 16, 64) - off64, _ := strconv.ParseUint(a[2], 16, 64) - return int(i64), int64(off64), nil + i32, _ := strconv.ParseUint(a[1], 16, 32) + if i32 > math.MaxInt32 { + return 0, 0, fmt.Errorf("index too large in wal segment path %q", s) + } + off64, _ := strconv.ParseInt(a[2], 16, 64) + if off64 > math.MaxInt64 { + return 0, 0, fmt.Errorf("offset too large in wal segment path %q", s) + } + return int(i32), int64(off64), nil } var walSegmentPathRegex = regexp.MustCompile(`^([0-9a-f]{8})\/([0-9a-f]{8})\.wal\.lz4$`) diff --git a/s3/replica_client.go b/s3/replica_client.go index a739a5f5..a9e3e638 100644 --- a/s3/replica_client.go +++ b/s3/replica_client.go @@ -715,10 +715,10 @@ func ParseHost(s string) (bucket, region, endpoint string, forcePathStyle bool) var ( localhostRegex = regexp.MustCompile(`^(?:(.+)\.)?localhost$`) - backblazeRegex = regexp.MustCompile(`^(?:(.+)\.)?s3.([^.]+)\.backblazeb2.com$`) - filebaseRegex = regexp.MustCompile(`^(?:(.+)\.)?s3.filebase.com$`) - digitalOceanRegex = regexp.MustCompile(`^(?:(.+)\.)?([^.]+)\.digitaloceanspaces.com$`) - linodeRegex = regexp.MustCompile(`^(?:(.+)\.)?([^.]+)\.linodeobjects.com$`) + backblazeRegex = regexp.MustCompile(`^(?:(.+)\.)?s3\.([^.]+)\.backblazeb2\.com$`) + filebaseRegex = regexp.MustCompile(`^(?:(.+)\.)?s3\.filebase\.com$`) + digitalOceanRegex = regexp.MustCompile(`^(?:(.+)\.)?([^.]+)\.digitaloceanspaces\.com$`) + linodeRegex = regexp.MustCompile(`^(?:(.+)\.)?([^.]+)\.linodeobjects\.com$`) ) func isNotExists(err error) bool { diff --git a/sftp/replica_client.go b/sftp/replica_client.go index 30d8fa87..8b651e97 100644 --- a/sftp/replica_client.go +++ b/sftp/replica_client.go @@ -41,6 +41,7 @@ type ReplicaClient struct { Password string Path string KeyPath string + HostKeyPath string DialTimeout time.Duration } @@ -71,14 +72,28 @@ func (c *ReplicaClient) Init(ctx context.Context) (_ *sftp.Client, err error) { // Build SSH configuration & auth methods config := &ssh.ClientConfig{ - User: c.User, - HostKeyCallback: ssh.InsecureIgnoreHostKey(), - BannerCallback: ssh.BannerDisplayStderr(), + User: c.User, + BannerCallback: ssh.BannerDisplayStderr(), } if c.Password != "" { config.Auth = append(config.Auth, ssh.Password(c.Password)) } + if c.HostKeyPath == "" { + config.HostKeyCallback = ssh.InsecureIgnoreHostKey() + } else { + buf, err := os.ReadFile(c.HostKeyPath) + if err != nil { + return nil, fmt.Errorf("cannot read sftp host key path: %w", err) + } + + key, _, _, _, err := ssh.ParseAuthorizedKey(buf) + if err != nil { + return nil, fmt.Errorf("cannot parse sftp host key path: path=%s len=%d err=%w", c.HostKeyPath, len(buf), err) + } + config.HostKeyCallback = ssh.FixedHostKey(key) + } + if c.KeyPath != "" { buf, err := os.ReadFile(c.KeyPath) if err != nil { From e84994ad951ca86294407aa6e21f5faed99da606 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Sun, 30 Jan 2022 20:15:46 -0700 Subject: [PATCH 38/95] Add golangci-lint to CI --- .github/workflows/golangci-lint.yml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 .github/workflows/golangci-lint.yml diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml new file mode 100644 index 00000000..e26eb482 --- /dev/null +++ b/.github/workflows/golangci-lint.yml @@ -0,0 +1,18 @@ +name: golangci-lint +on: + pull_request: + +permissions: + contents: read + +jobs: + golangci: + name: lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - uses: golangci/golangci-lint-action@v2 + with: + version: latest + args: --timeout=10m From 5d811f2e39734c6f0d92d817d232abc0255457c6 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Mon, 31 Jan 2022 08:54:02 -0700 Subject: [PATCH 39/95] Fix golangci-lint issues --- cmd/litestream/main.go | 1 + cmd/litestream/restore.go | 3 -- db.go | 73 +++-------------------------- internal/internal.go | 8 +++- internal/testingutil/testingutil.go | 26 ++++++++++ litestream.go | 8 ---- litestream_test.go | 25 ---------- replica.go | 12 ++--- wal_downloader_test.go | 3 +- 9 files changed, 47 insertions(+), 112 deletions(-) diff --git a/cmd/litestream/main.go b/cmd/litestream/main.go index db491c40..ac9d5a5a 100644 --- a/cmd/litestream/main.go +++ b/cmd/litestream/main.go @@ -96,6 +96,7 @@ func (m *Main) Run(ctx context.Context, args []string) (err error) { // Setup signal handler. ctx, cancel := context.WithCancel(ctx) + defer cancel() signalCh := make(chan os.Signal, 1) signal.Notify(signalCh, notifySignals...) diff --git a/cmd/litestream/restore.go b/cmd/litestream/restore.go index d2a5d1d0..b746b1ea 100644 --- a/cmd/litestream/restore.go +++ b/cmd/litestream/restore.go @@ -2,7 +2,6 @@ package main import ( "context" - "errors" "flag" "fmt" "io" @@ -276,5 +275,3 @@ Examples: DefaultConfigPath(), ) } - -var errSkipDBExists = errors.New("database already exists, skipping") diff --git a/db.go b/db.go index 85d146dc..74f93911 100644 --- a/db.go +++ b/db.go @@ -12,13 +12,10 @@ import ( "io" "io/ioutil" "log" - "math" "math/rand" "os" "path/filepath" - "regexp" "sort" - "strconv" "strings" "sync" "time" @@ -429,7 +426,9 @@ func (db *DB) Close() (err error) { err = e } } - r.Stop(true) + if e := r.Stop(true); e != nil && err == nil { + err = e + } } // Release the read lock to allow other applications to handle checkpointing. @@ -1142,11 +1141,11 @@ func (db *DB) copyToShadowWAL(ctx context.Context) error { go func() { zw := lz4.NewWriter(pw) if _, err := io.Copy(zw, &io.LimitedReader{R: f, N: walByteN}); err != nil { - pw.CloseWithError(err) + _ = pw.CloseWithError(err) } else if err := zw.Close(); err != nil { - pw.CloseWithError(err) + _ = pw.CloseWithError(err) } - pw.Close() + _ = pw.Close() }() // Write a new, compressed segment via pipe. @@ -1336,47 +1335,12 @@ func (itr *shadowWALSegmentIterator) WALSegment() WALSegmentInfo { return itr.infos[0] } -// frameAlign returns a frame-aligned offset. -// Returns zero if offset is less than the WAL header size. -func frameAlign(offset int64, pageSize int) int64 { - assert(offset >= 0, "frameAlign(): offset must be non-negative") - assert(pageSize >= 0, "frameAlign(): page size must be non-negative") - - if offset < WALHeaderSize { - return 0 - } - - frameSize := WALFrameHeaderSize + int64(pageSize) - frameN := (offset - WALHeaderSize) / frameSize - return (frameN * frameSize) + WALHeaderSize -} - // SQLite WAL constants const ( WALHeaderChecksumOffset = 24 WALFrameHeaderChecksumOffset = 16 ) -func readLastChecksumFrom(f *os.File, pageSize int) (uint32, uint32, error) { - // Determine the byte offset of the checksum for the header (if no pages - // exist) or for the last page (if at least one page exists). - offset := int64(WALHeaderChecksumOffset) - if fi, err := f.Stat(); err != nil { - return 0, 0, err - } else if sz := frameAlign(fi.Size(), pageSize); fi.Size() > WALHeaderSize { - offset = sz - int64(pageSize) - WALFrameHeaderSize + WALFrameHeaderChecksumOffset - } - - // Read big endian checksum. - b := make([]byte, 8) - if n, err := f.ReadAt(b, offset); err != nil { - return 0, 0, err - } else if n != len(b) { - return 0, 0, io.ErrUnexpectedEOF - } - return binary.BigEndian.Uint32(b[0:]), binary.BigEndian.Uint32(b[4:]), nil -} - // Checkpoint performs a checkpoint on the WAL file. func (db *DB) Checkpoint(ctx context.Context, mode string) (err error) { db.mu.Lock() @@ -1584,31 +1548,6 @@ func (db *DB) CRC64(ctx context.Context) (uint64, Pos, error) { return h.Sum64(), pos, nil } -// parseWALPath returns the index for the WAL file. -// Returns an error if the path is not a valid WAL path. -func parseWALPath(s string) (index int, err error) { - s = filepath.Base(s) - - a := walPathRegex.FindStringSubmatch(s) - if a == nil { - return 0, fmt.Errorf("invalid wal path: %s", s) - } - - i32, _ := strconv.ParseUint(a[1], 16, 32) - if i32 > math.MaxInt32 { - return 0, fmt.Errorf("index too large in wal path: %s", s) - } - return int(i32), nil -} - -// formatWALPath formats a WAL filename with a given index. -func formatWALPath(index int) string { - assert(index >= 0, "wal index must be non-negative") - return FormatIndex(index) + ".wal" -} - -var walPathRegex = regexp.MustCompile(`^([0-9a-f]{8})\.wal$`) - // ReadWALFields iterates over the header & frames in the WAL data in r. // Returns salt, checksum, byte order & the last frame. WAL data must start // from the beginning of the WAL header and must end on either the WAL header diff --git a/internal/internal.go b/internal/internal.go index cb23a34f..077841c8 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -1,6 +1,7 @@ package internal import ( + "crypto/md5" "fmt" "io" "math" @@ -180,7 +181,7 @@ func ParseWALSegmentPath(s string) (index int, offset int64, err error) { if i32 > math.MaxInt32 { return 0, 0, fmt.Errorf("index too large in wal segment path %q", s) } - off64, _ := strconv.ParseInt(a[2], 16, 64) + off64, _ := strconv.ParseUint(a[2], 16, 64) if off64 > math.MaxInt64 { return 0, 0, fmt.Errorf("offset too large in wal segment path %q", s) } @@ -228,3 +229,8 @@ func TruncateDuration(d time.Duration) time.Duration { } return d } + +// MD5hash returns a hex-encoded MD5 hash of b. +func MD5hash(b []byte) string { + return fmt.Sprintf("%x", md5.Sum(b)) +} diff --git a/internal/testingutil/testingutil.go b/internal/testingutil/testingutil.go index 22636f27..bcf60dc3 100644 --- a/internal/testingutil/testingutil.go +++ b/internal/testingutil/testingutil.go @@ -1,9 +1,12 @@ package testingutil import ( + "bytes" "io" "os" "testing" + + "github.com/pierrec/lz4/v4" ) // ReadFile reads all data from filename. Fail on error. @@ -62,3 +65,26 @@ func Setenv(tb testing.TB, key, value string) func() { } } } + +func CompressLZ4(tb testing.TB, b []byte) []byte { + tb.Helper() + + var buf bytes.Buffer + zw := lz4.NewWriter(&buf) + if _, err := zw.Write(b); err != nil { + tb.Fatal(err) + } else if err := zw.Close(); err != nil { + tb.Fatal(err) + } + return buf.Bytes() +} + +func DecompressLZ4(tb testing.TB, b []byte) []byte { + tb.Helper() + + buf, err := io.ReadAll(lz4.NewReader(bytes.NewReader(b))) + if err != nil { + tb.Fatal(err) + } + return buf +} diff --git a/litestream.go b/litestream.go index 5381e057..6cc3f16b 100644 --- a/litestream.go +++ b/litestream.go @@ -1,10 +1,8 @@ package litestream import ( - "crypto/md5" "database/sql" "encoding/binary" - "encoding/hex" "errors" "fmt" "io" @@ -475,12 +473,6 @@ func isHexChar(ch rune) bool { return (ch >= '0' && ch <= '9') || (ch >= 'a' && ch <= 'f') } -// md5hash returns a hex-encoded MD5 hash of b. -func md5hash(b []byte) string { - sum := md5.Sum(b) - return hex.EncodeToString(sum[:]) -} - // Tracef is used for low-level tracing. var Tracef = func(format string, a ...interface{}) {} diff --git a/litestream_test.go b/litestream_test.go index 9878fbdf..2be6ba27 100644 --- a/litestream_test.go +++ b/litestream_test.go @@ -4,13 +4,11 @@ import ( "bytes" "encoding/binary" "encoding/hex" - "io" "os" "testing" "github.com/benbjohnson/litestream" _ "github.com/mattn/go-sqlite3" - "github.com/pierrec/lz4/v4" ) func TestChecksum(t *testing.T) { @@ -80,26 +78,3 @@ func fileEqual(tb testing.TB, x, y string) bool { return bytes.Equal(bx, by) } - -func compressLZ4(tb testing.TB, b []byte) []byte { - tb.Helper() - - var buf bytes.Buffer - zw := lz4.NewWriter(&buf) - if _, err := zw.Write(b); err != nil { - tb.Fatal(err) - } else if err := zw.Close(); err != nil { - tb.Fatal(err) - } - return buf.Bytes() -} - -func decompressLZ4(tb testing.TB, b []byte) []byte { - tb.Helper() - - buf, err := io.ReadAll(lz4.NewReader(bytes.NewReader(b))) - if err != nil { - tb.Fatal(err) - } - return buf -} diff --git a/replica.go b/replica.go index 8547e61e..95b30e5a 100644 --- a/replica.go +++ b/replica.go @@ -105,14 +105,14 @@ func (r *Replica) DB() *DB { return r.db } func (r *Replica) Client() ReplicaClient { return r.client } // Starts replicating in a background goroutine. -func (r *Replica) Start(ctx context.Context) error { +func (r *Replica) Start(ctx context.Context) { // Ignore if replica is being used sychronously. if !r.MonitorEnabled { - return nil + return } // Stop previous replication. - r.Stop(false) + _ = r.Stop(false) // Wrap context with cancelation. ctx, r.cancel = context.WithCancel(ctx) @@ -123,8 +123,6 @@ func (r *Replica) Start(ctx context.Context) error { go func() { defer r.wg.Done(); r.retainer(ctx) }() go func() { defer r.wg.Done(); r.snapshotter(ctx) }() go func() { defer r.wg.Done(); r.validator(ctx) }() - - return nil } // Stop cancels any outstanding replication and blocks until finished. @@ -512,10 +510,10 @@ func (r *Replica) Snapshot(ctx context.Context) (info SnapshotInfo, err error) { defer zr.Close() if _, err := io.Copy(zr, r.f); err != nil { - pw.CloseWithError(err) + _ = pw.CloseWithError(err) return err } else if err := zr.Close(); err != nil { - pw.CloseWithError(err) + _ = pw.CloseWithError(err) return err } return pw.Close() diff --git a/wal_downloader_test.go b/wal_downloader_test.go index 65d1c8ba..fd1817f9 100644 --- a/wal_downloader_test.go +++ b/wal_downloader_test.go @@ -11,6 +11,7 @@ import ( "testing" "github.com/benbjohnson/litestream" + "github.com/benbjohnson/litestream/internal/testingutil" "github.com/benbjohnson/litestream/mock" ) @@ -193,7 +194,7 @@ func testWALDownloader(t *testing.T, parallelism int) { filename := filepath.Join(tempDir, "generations", "0000000000000000", "wal", fmt.Sprintf("%08x", i), "00000000.wal.lz4") if err := os.MkdirAll(filepath.Dir(filename), 0777); err != nil { t.Fatal(err) - } else if err := os.WriteFile(filename, compressLZ4(t, []byte(fmt.Sprint(i))), 0666); err != nil { + } else if err := os.WriteFile(filename, testingutil.CompressLZ4(t, []byte(fmt.Sprint(i))), 0666); err != nil { t.Fatal(err) } } From a2cf2e260b5addad3ddf57bde61e54d783f165a5 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Mon, 31 Jan 2022 12:41:07 -0700 Subject: [PATCH 40/95] Skip some CI jobs for dependabot --- .github/workflows/integration_test.yml | 4 ++++ .github/workflows/release.linux.yml | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/integration_test.yml b/.github/workflows/integration_test.yml index afeccca4..cd7c13b1 100644 --- a/.github/workflows/integration_test.yml +++ b/.github/workflows/integration_test.yml @@ -5,6 +5,7 @@ jobs: s3-integration-test: name: Run S3 Integration Tests runs-on: ubuntu-18.04 + if: github.actor != 'dependabot' steps: - uses: actions/checkout@v2 @@ -30,6 +31,7 @@ jobs: gcp-integration-test: name: Run GCP Integration Tests runs-on: ubuntu-18.04 + if: github.actor != 'dependabot' steps: - uses: actions/checkout@v2 @@ -57,6 +59,7 @@ jobs: abs-integration-test: name: Run Azure Blob Store Integration Tests runs-on: ubuntu-18.04 + if: github.actor != 'dependabot' steps: - uses: actions/checkout@v2 @@ -79,6 +82,7 @@ jobs: sftp-integration-test: name: Run SFTP Integration Tests runs-on: ubuntu-18.04 + if: github.actor != 'dependabot' steps: - uses: actions/checkout@v2 diff --git a/.github/workflows/release.linux.yml b/.github/workflows/release.linux.yml index 5133bfaf..5e8acd70 100644 --- a/.github/workflows/release.linux.yml +++ b/.github/workflows/release.linux.yml @@ -126,7 +126,7 @@ jobs: asset_content_type: application/octet-stream - name: Dispatch test runner - if: matrix.deploy_test_runner + if: matrix.deploy_test_runner && github.actor != 'dependabot' run: sleep 60 && gh workflow run deploy.yml -R benbjohnson/litestream-test-runner -f run_id=${{ github.run_id }} -f litestream_version=${{ github.sha }} env: GITHUB_TOKEN: ${{ secrets.PAT_TOKEN }} From ee77592d7e8b10168ab9210e681e00a959c8bbeb Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Mon, 31 Jan 2022 13:01:24 -0700 Subject: [PATCH 41/95] Skip dependabot CI using branches --- .github/workflows/integration_test.yml | 9 ++++----- .github/workflows/release.linux.yml | 2 ++ 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/.github/workflows/integration_test.yml b/.github/workflows/integration_test.yml index cd7c13b1..e1b21192 100644 --- a/.github/workflows/integration_test.yml +++ b/.github/workflows/integration_test.yml @@ -1,11 +1,13 @@ name: Integration Tests -on: pull_request +on: + pull_request: + branches-ignore: + - "dependabot/**" jobs: s3-integration-test: name: Run S3 Integration Tests runs-on: ubuntu-18.04 - if: github.actor != 'dependabot' steps: - uses: actions/checkout@v2 @@ -31,7 +33,6 @@ jobs: gcp-integration-test: name: Run GCP Integration Tests runs-on: ubuntu-18.04 - if: github.actor != 'dependabot' steps: - uses: actions/checkout@v2 @@ -59,7 +60,6 @@ jobs: abs-integration-test: name: Run Azure Blob Store Integration Tests runs-on: ubuntu-18.04 - if: github.actor != 'dependabot' steps: - uses: actions/checkout@v2 @@ -82,7 +82,6 @@ jobs: sftp-integration-test: name: Run SFTP Integration Tests runs-on: ubuntu-18.04 - if: github.actor != 'dependabot' steps: - uses: actions/checkout@v2 diff --git a/.github/workflows/release.linux.yml b/.github/workflows/release.linux.yml index 5e8acd70..aff5e841 100644 --- a/.github/workflows/release.linux.yml +++ b/.github/workflows/release.linux.yml @@ -7,6 +7,8 @@ on: - opened - synchronize - reopened + branches-ignore: + - "dependabot/**" name: Release (Linux) jobs: From fb3a3d904ff651e9e284d73107250618730836e8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 31 Jan 2022 20:02:15 +0000 Subject: [PATCH 42/95] Bump github.com/aws/aws-sdk-go from 1.42.40 to 1.42.44 Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.42.40 to 1.42.44. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.42.40...v1.42.44) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 6 ++---- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 169ea193..3f0d18cb 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( cloud.google.com/go/iam v0.1.1 // indirect cloud.google.com/go/storage v1.18.2 github.com/Azure/azure-storage-blob-go v0.14.0 - github.com/aws/aws-sdk-go v1.42.40 + github.com/aws/aws-sdk-go v1.42.44 github.com/mattn/go-shellwords v1.0.12 github.com/mattn/go-sqlite3 v1.14.11 github.com/pierrec/lz4/v4 v4.1.12 diff --git a/go.sum b/go.sum index 7c575947..ce57ecdf 100644 --- a/go.sum +++ b/go.sum @@ -77,8 +77,8 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/aws/aws-sdk-go v1.42.40 h1:oZ+hyhorrkYdT23YO8s0eWBp9Fg8k4HsAFL3n0V25WA= -github.com/aws/aws-sdk-go v1.42.40/go.mod h1:OGr6lGMAKGlG9CVrYnWYDKIyb829c6EVBRjxqjmPepc= +github.com/aws/aws-sdk-go v1.42.44 h1:vPlF4cUsdN5ETfvb7ewZFbFZyB6Rsfndt3kS2XqLXKo= +github.com/aws/aws-sdk-go v1.42.44/go.mod h1:OGr6lGMAKGlG9CVrYnWYDKIyb829c6EVBRjxqjmPepc= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -234,8 +234,6 @@ github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqf github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= -github.com/mattn/go-sqlite3 v1.14.10 h1:MLn+5bFRlWMGoSRmJour3CL1w/qL96mvipqpwQW/Sfk= -github.com/mattn/go-sqlite3 v1.14.10/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.11 h1:gt+cp9c0XGqe9S/wAHTL3n/7MqY+siPWgWJgqdsFrzQ= github.com/mattn/go-sqlite3 v1.14.11/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= From d5c15593bb5402c9c0d93772b6a525bdea7ee668 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 31 Jan 2022 20:17:19 +0000 Subject: [PATCH 43/95] Bump google.golang.org/api from 0.65.0 to 0.66.0 Bumps [google.golang.org/api](https://github.com/googleapis/google-api-go-client) from 0.65.0 to 0.66.0. - [Release notes](https://github.com/googleapis/google-api-go-client/releases) - [Changelog](https://github.com/googleapis/google-api-go-client/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-api-go-client/compare/v0.65.0...v0.66.0) --- updated-dependencies: - dependency-name: google.golang.org/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 3f0d18cb..a22ccaf8 100644 --- a/go.mod +++ b/go.mod @@ -15,6 +15,6 @@ require ( golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 - google.golang.org/api v0.65.0 + google.golang.org/api v0.66.0 gopkg.in/yaml.v2 v2.4.0 ) diff --git a/go.sum b/go.sum index ce57ecdf..fc901358 100644 --- a/go.sum +++ b/go.sum @@ -589,8 +589,8 @@ google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3l google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= google.golang.org/api v0.64.0/go.mod h1:931CdxA8Rm4t6zqTFGSsgwbAEZ2+GMYurbndwSimebM= -google.golang.org/api v0.65.0 h1:MTW9c+LIBAbwoS1Gb+YV7NjFBt2f7GtAS5hIzh2NjgQ= -google.golang.org/api v0.65.0/go.mod h1:ArYhxgGadlWmqO1IqVujw6Cs8IdD33bTmzKo2Sh+cbg= +google.golang.org/api v0.66.0 h1:CbGy4LEiXCVCiNEDFgGpWOVwsDT7E2Qej1ZvN1P7KPg= +google.golang.org/api v0.66.0/go.mod h1:I1dmXYpX7HGwz/ejRxwQp2qj5bFAz93HiCU1C1oYd9M= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -662,9 +662,9 @@ google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220111164026-67b88f271998 h1:g/x+MYjJYDEP3OBCYYmwIbt4x6k3gryb+ohyOR7PXfI= google.golang.org/genproto v0.0.0-20220111164026-67b88f271998/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220114231437-d2e6a121cae0 h1:aCsSLXylHWFno0r4S3joLpiaWayvqd2Mn4iSvx4WZZc= +google.golang.org/genproto v0.0.0-20220114231437-d2e6a121cae0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= From 5f38134032335119779f51f625fcc6768b5b8799 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 31 Jan 2022 20:51:15 +0000 Subject: [PATCH 44/95] Bump cloud.google.com/go/storage from 1.18.2 to 1.19.0 Bumps [cloud.google.com/go/storage](https://github.com/googleapis/google-cloud-go) from 1.18.2 to 1.19.0. - [Release notes](https://github.com/googleapis/google-cloud-go/releases) - [Changelog](https://github.com/googleapis/google-cloud-go/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-cloud-go/compare/storage/v1.18.2...spanner/v1.19.0) --- updated-dependencies: - dependency-name: cloud.google.com/go/storage dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 3 +-- go.sum | 17 ++++++++--------- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/go.mod b/go.mod index a22ccaf8..140cfe3b 100644 --- a/go.mod +++ b/go.mod @@ -3,8 +3,7 @@ module github.com/benbjohnson/litestream go 1.16 require ( - cloud.google.com/go/iam v0.1.1 // indirect - cloud.google.com/go/storage v1.18.2 + cloud.google.com/go/storage v1.19.0 github.com/Azure/azure-storage-blob-go v0.14.0 github.com/aws/aws-sdk-go v1.42.44 github.com/mattn/go-shellwords v1.0.12 diff --git a/go.sum b/go.sum index fc901358..f2ab5966 100644 --- a/go.sum +++ b/go.sum @@ -50,8 +50,8 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.18.2 h1:5NQw6tOn3eMm0oE8vTkfjau18kjL79FlMjy/CHTpmoY= -cloud.google.com/go/storage v1.18.2/go.mod h1:AiIj7BWXyhO5gGVmYJ+S8tbkCx3yb0IMjua8Aw4naVM= +cloud.google.com/go/storage v1.19.0 h1:XOQSnPJD8hRtZJ3VdCyK0mBZsGGImrzPAMbSWcHSe6Q= +cloud.google.com/go/storage v1.19.0/go.mod h1:6rgiTRjOqI/Zd9YKimub5TIB4d+p3LH33V3ZE1DMuUM= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= @@ -168,8 +168,9 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -408,7 +409,6 @@ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -480,7 +480,6 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -585,10 +584,10 @@ google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6 google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E= google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= google.golang.org/api v0.64.0/go.mod h1:931CdxA8Rm4t6zqTFGSsgwbAEZ2+GMYurbndwSimebM= +google.golang.org/api v0.65.0/go.mod h1:ArYhxgGadlWmqO1IqVujw6Cs8IdD33bTmzKo2Sh+cbg= google.golang.org/api v0.66.0 h1:CbGy4LEiXCVCiNEDFgGpWOVwsDT7E2Qej1ZvN1P7KPg= google.golang.org/api v0.66.0/go.mod h1:I1dmXYpX7HGwz/ejRxwQp2qj5bFAz93HiCU1C1oYd9M= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= @@ -654,17 +653,17 @@ google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEc google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211016002631-37fc39342514/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220111164026-67b88f271998/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220114231437-d2e6a121cae0 h1:aCsSLXylHWFno0r4S3joLpiaWayvqd2Mn4iSvx4WZZc= google.golang.org/genproto v0.0.0-20220114231437-d2e6a121cae0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220118154757-00ab72f36ad5 h1:zzNejm+EgrbLfDZ6lu9Uud2IVvHySPl8vQzf04laR5Q= +google.golang.org/genproto v0.0.0-20220118154757-00ab72f36ad5/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= From 89560c8632a46c4f987ee052952efd125e3c24c6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 31 Jan 2022 20:56:37 +0000 Subject: [PATCH 45/95] Bump github.com/prometheus/client_golang from 1.12.0 to 1.12.1 Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.12.0 to 1.12.1. - [Release notes](https://github.com/prometheus/client_golang/releases) - [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md) - [Commits](https://github.com/prometheus/client_golang/compare/v1.12.0...v1.12.1) --- updated-dependencies: - dependency-name: github.com/prometheus/client_golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 140cfe3b..d0b69b08 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/mattn/go-sqlite3 v1.14.11 github.com/pierrec/lz4/v4 v4.1.12 github.com/pkg/sftp v1.13.4 - github.com/prometheus/client_golang v1.12.0 + github.com/prometheus/client_golang v1.12.1 golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 diff --git a/go.sum b/go.sum index f2ab5966..f7218f02 100644 --- a/go.sum +++ b/go.sum @@ -260,8 +260,8 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.0 h1:C+UIj/QWtmqY13Arb8kwMt5j34/0Z2iKamrJ+ryC0Gg= -github.com/prometheus/client_golang v1.12.0/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= From 4349398ff56384651fe08feab857e99c71af8607 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Mon, 31 Jan 2022 15:56:42 -0700 Subject: [PATCH 46/95] Remove shadow WAL iterator This commit removes the shadow WAL iterator and replaces it with a fileWalSegmentIterator instead. This works since the shadow WAL now has the same structure as the replica WAL. This reduces duplicate code and will make it so read replication can be daisy chained in the future. --- db.go | 98 +---------------------------------------------------------- 1 file changed, 1 insertion(+), 97 deletions(-) diff --git a/db.go b/db.go index 74f93911..f73cf0ee 100644 --- a/db.go +++ b/db.go @@ -1236,103 +1236,7 @@ func (db *DB) WALSegments(ctx context.Context, generation string) (WALSegmentIte sort.Ints(indexes) - return newShadowWALSegmentIterator(db, generation, indexes), nil -} - -type shadowWALSegmentIterator struct { - db *DB - generation string - indexes []int - - infos []WALSegmentInfo - err error -} - -func newShadowWALSegmentIterator(db *DB, generation string, indexes []int) *shadowWALSegmentIterator { - return &shadowWALSegmentIterator{ - db: db, - generation: generation, - indexes: indexes, - } -} - -func (itr *shadowWALSegmentIterator) Close() (err error) { - return itr.err -} - -func (itr *shadowWALSegmentIterator) Next() bool { - // Exit if an error has already occurred. - if itr.err != nil { - return false - } - - for { - // Move to the next segment in cache, if available. - if len(itr.infos) > 1 { - itr.infos = itr.infos[1:] - return true - } - itr.infos = itr.infos[:0] // otherwise clear infos - - // If no indexes remain, stop iteration. - if len(itr.indexes) == 0 { - return false - } - - // Read segments into a cache for the current index. - index := itr.indexes[0] - itr.indexes = itr.indexes[1:] - f, err := os.Open(filepath.Join(itr.db.ShadowWALDir(itr.generation), FormatIndex(index))) - if err != nil { - itr.err = err - return false - } - defer func() { _ = f.Close() }() - - fis, err := f.Readdir(-1) - if err != nil { - itr.err = err - return false - } else if err := f.Close(); err != nil { - itr.err = err - return false - } - for _, fi := range fis { - filename := filepath.Base(fi.Name()) - if fi.IsDir() { - continue - } - - offset, err := ParseOffset(strings.TrimSuffix(filename, ".wal.lz4")) - if err != nil { - continue - } - - itr.infos = append(itr.infos, WALSegmentInfo{ - Generation: itr.generation, - Index: index, - Offset: offset, - Size: fi.Size(), - CreatedAt: fi.ModTime().UTC(), - }) - } - - // Ensure segments are sorted within index. - sort.Sort(WALSegmentInfoSlice(itr.infos)) - - if len(itr.infos) > 0 { - return true - } - } -} - -func (itr *shadowWALSegmentIterator) Err() error { return itr.err } - -func (itr *shadowWALSegmentIterator) WALSegment() WALSegmentInfo { - if len(itr.infos) == 0 { - return WALSegmentInfo{} - } - return itr.infos[0] + return newFileWALSegmentIterator(db.ShadowWALDir(generation), generation, indexes), nil } // SQLite WAL constants From 8009bcf6547be4820804c41a7d1a236f9240e23d Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Sat, 5 Feb 2022 08:11:21 -0700 Subject: [PATCH 47/95] Remove Windows support Unfortunately, I don't have the expertise or bandwidth to maintain the Windows support in Litestream. I'm open to re-adding support in the future but right now it is hindering development and is not well-tested or well-used. --- cmd/litestream/main.go | 12 +--- cmd/litestream/main_notwindows.go | 21 ------ cmd/litestream/main_windows.go | 108 ------------------------------ go.mod | 1 - internal/internal.go | 11 ++- internal/internal_unix.go | 21 ------ internal/internal_windows.go | 22 ------ 7 files changed, 13 insertions(+), 183 deletions(-) delete mode 100644 cmd/litestream/main_notwindows.go delete mode 100644 cmd/litestream/main_windows.go delete mode 100644 internal/internal_unix.go delete mode 100644 internal/internal_windows.go diff --git a/cmd/litestream/main.go b/cmd/litestream/main.go index ac9d5a5a..3373eca4 100644 --- a/cmd/litestream/main.go +++ b/cmd/litestream/main.go @@ -17,6 +17,7 @@ import ( "regexp" "strconv" "strings" + "syscall" "time" "github.com/benbjohnson/litestream" @@ -67,13 +68,6 @@ func NewMain(stdin io.Reader, stdout, stderr io.Writer) *Main { // Run executes the program. func (m *Main) Run(ctx context.Context, args []string) (err error) { - // Execute replication command if running as a Windows service. - if isService, err := isWindowsService(); err != nil { - return err - } else if isService { - return runWindowsService(ctx) - } - // Copy "LITESTEAM" environment credentials. applyLitestreamEnv() @@ -98,7 +92,7 @@ func (m *Main) Run(ctx context.Context, args []string) (err error) { ctx, cancel := context.WithCancel(ctx) defer cancel() signalCh := make(chan os.Signal, 1) - signal.Notify(signalCh, notifySignals...) + signal.Notify(signalCh, syscall.SIGINT, syscall.SIGTERM) if err := c.Run(ctx); err != nil { return err @@ -685,7 +679,7 @@ func DefaultConfigPath() string { if v := os.Getenv("LITESTREAM_CONFIG"); v != "" { return v } - return defaultConfigPath + return "/etc/litestream.yml" } func registerConfigFlag(fs *flag.FlagSet, configPath *string, noExpandEnv *bool) { diff --git a/cmd/litestream/main_notwindows.go b/cmd/litestream/main_notwindows.go deleted file mode 100644 index 6d4dcef9..00000000 --- a/cmd/litestream/main_notwindows.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build !windows - -package main - -import ( - "context" - "os" - "syscall" -) - -const defaultConfigPath = "/etc/litestream.yml" - -func isWindowsService() (bool, error) { - return false, nil -} - -func runWindowsService(ctx context.Context) error { - panic("cannot run windows service as unix process") -} - -var notifySignals = []os.Signal{syscall.SIGINT, syscall.SIGTERM} diff --git a/cmd/litestream/main_windows.go b/cmd/litestream/main_windows.go deleted file mode 100644 index e6276eb4..00000000 --- a/cmd/litestream/main_windows.go +++ /dev/null @@ -1,108 +0,0 @@ -//go:build windows -// +build windows - -package main - -import ( - "context" - "io" - "log" - "os" - - "golang.org/x/sys/windows" - "golang.org/x/sys/windows/svc" - "golang.org/x/sys/windows/svc/eventlog" -) - -const defaultConfigPath = `C:\Litestream\litestream.yml` - -// serviceName is the Windows Service name. -const serviceName = "Litestream" - -// isWindowsService returns true if currently executing within a Windows service. -func isWindowsService() (bool, error) { - return svc.IsWindowsService() -} - -func runWindowsService(ctx context.Context) error { - // Attempt to install new log service. This will fail if already installed. - // We don't log the error because we don't have anywhere to log until we open the log. - _ = eventlog.InstallAsEventCreate(serviceName, eventlog.Error|eventlog.Warning|eventlog.Info) - - elog, err := eventlog.Open(serviceName) - if err != nil { - return err - } - defer elog.Close() - - // Set eventlog as log writer while running. - log.SetOutput((*eventlogWriter)(elog)) - defer log.SetOutput(os.Stdout) - - log.Print("Litestream service starting") - - if err := svc.Run(serviceName, &windowsService{ctx: ctx}); err != nil { - return errExit - } - - log.Print("Litestream service stopped") - return nil -} - -// windowsService is an interface adapter for svc.Handler. -type windowsService struct { - ctx context.Context -} - -func (s *windowsService) Execute(args []string, r <-chan svc.ChangeRequest, statusCh chan<- svc.Status) (svcSpecificEC bool, exitCode uint32) { - var err error - - // Notify Windows that the service is starting up. - statusCh <- svc.Status{State: svc.StartPending} - - // Instantiate replication command and load configuration. - c := NewReplicateCommand() - if c.Config, err = ReadConfigFile(DefaultConfigPath(), true); err != nil { - log.Printf("cannot load configuration: %s", err) - return true, 1 - } - - // Execute replication command. - if err := c.Run(s.ctx); err != nil { - log.Printf("cannot replicate: %s", err) - statusCh <- svc.Status{State: svc.StopPending} - return true, 2 - } - - // Notify Windows that the service is now running. - statusCh <- svc.Status{State: svc.Running, Accepts: svc.AcceptStop} - - for { - select { - case req := <-r: - switch req.Cmd { - case svc.Stop: - c.Close() - statusCh <- svc.Status{State: svc.StopPending} - return false, windows.NO_ERROR - case svc.Interrogate: - statusCh <- req.CurrentStatus - default: - log.Printf("Litestream service received unexpected change request cmd: %d", req.Cmd) - } - } - } -} - -// Ensure implementation implements io.Writer interface. -var _ io.Writer = (*eventlogWriter)(nil) - -// eventlogWriter is an adapter for using eventlog.Log as an io.Writer. -type eventlogWriter eventlog.Log - -func (w *eventlogWriter) Write(p []byte) (n int, err error) { - elog := (*eventlog.Log)(w) - return 0, elog.Info(1, string(p)) -} - -var notifySignals = []os.Signal{os.Interrupt} diff --git a/go.mod b/go.mod index d0b69b08..13575b62 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,6 @@ require ( github.com/prometheus/client_golang v1.12.1 golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 google.golang.org/api v0.66.0 gopkg.in/yaml.v2 v2.4.0 ) diff --git a/internal/internal.go b/internal/internal.go index 077841c8..b2db3b98 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -132,7 +132,7 @@ func MkdirAll(path string, mode os.FileMode, uid, gid int) error { if j > 1 { // Create parent. - err = MkdirAll(fixRootDirectory(path[:j-1]), mode, uid, gid) + err = MkdirAll(path[:j-1], mode, uid, gid) if err != nil { return err } @@ -154,6 +154,15 @@ func MkdirAll(path string, mode os.FileMode, uid, gid int) error { return nil } +// Fileinfo returns syscall fields from a FileInfo object. +func Fileinfo(fi os.FileInfo) (uid, gid int) { + if fi == nil { + return -1, -1 + } + stat := fi.Sys().(*syscall.Stat_t) + return int(stat.Uid), int(stat.Gid) +} + // ParseSnapshotPath parses the index from a snapshot filename. Used by path-based replicas. func ParseSnapshotPath(s string) (index int, err error) { a := snapshotPathRegex.FindStringSubmatch(s) diff --git a/internal/internal_unix.go b/internal/internal_unix.go deleted file mode 100644 index cedc947e..00000000 --- a/internal/internal_unix.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris - -package internal - -import ( - "os" - "syscall" -) - -// Fileinfo returns syscall fields from a FileInfo object. -func Fileinfo(fi os.FileInfo) (uid, gid int) { - if fi == nil { - return -1, -1 - } - stat := fi.Sys().(*syscall.Stat_t) - return int(stat.Uid), int(stat.Gid) -} - -func fixRootDirectory(p string) string { - return p -} diff --git a/internal/internal_windows.go b/internal/internal_windows.go deleted file mode 100644 index 18531642..00000000 --- a/internal/internal_windows.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build windows - -package internal - -import ( - "os" -) - -// Fileinfo returns syscall fields from a FileInfo object. -func Fileinfo(fi os.FileInfo) (uid, gid int) { - return -1, -1 -} - -// fixRootDirectory is copied from the standard library for use with mkdirAll() -func fixRootDirectory(p string) string { - if len(p) == len(`\\?\c:`) { - if os.IsPathSeparator(p[0]) && os.IsPathSeparator(p[1]) && p[2] == '?' && os.IsPathSeparator(p[3]) && p[5] == ':' { - return p + `\` - } - } - return p -} From 762c7ae5313bb6b72a1e32629229760bdf828087 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Sun, 6 Feb 2022 09:27:26 -0700 Subject: [PATCH 48/95] Implement FileWatcher --- cmd/litestream/main.go | 8 +- cmd/litestream/replicate.go | 28 +-- db.go | 71 ++++--- db_test.go | 1 - go.mod | 1 + go.sum | 2 + integration/cmd_test.go | 6 +- internal/file_watcher.go | 36 ++++ internal/file_watcher_bsd.go | 259 +++++++++++++++++++++++ internal/file_watcher_linux.go | 369 +++++++++++++++++++++++++++++++++ internal/file_watcher_test.go | 211 +++++++++++++++++++ internal/internal.go | 4 +- replica.go | 2 +- server.go | 186 +++++++++++++++++ server_test.go | 1 + 15 files changed, 1132 insertions(+), 53 deletions(-) create mode 100644 internal/file_watcher.go create mode 100644 internal/file_watcher_bsd.go create mode 100644 internal/file_watcher_linux.go create mode 100644 internal/file_watcher_test.go create mode 100644 server.go create mode 100644 server_test.go diff --git a/cmd/litestream/main.go b/cmd/litestream/main.go index 3373eca4..fa43f4dc 100644 --- a/cmd/litestream/main.go +++ b/cmd/litestream/main.go @@ -267,7 +267,6 @@ func ReadConfigFile(filename string, expandEnv bool) (_ Config, err error) { // DBConfig represents the configuration for a single database. type DBConfig struct { Path string `yaml:"path"` - MonitorInterval *time.Duration `yaml:"monitor-interval"` CheckpointInterval *time.Duration `yaml:"checkpoint-interval"` MinCheckpointPageN *int `yaml:"min-checkpoint-page-count"` MaxCheckpointPageN *int `yaml:"max-checkpoint-page-count"` @@ -281,14 +280,15 @@ func NewDBFromConfig(dbc *DBConfig) (*litestream.DB, error) { if err != nil { return nil, err } + return NewDBFromConfigWithPath(dbc, path) +} +// NewDBFromConfigWithPath instantiates a DB based on a configuration and using a given path. +func NewDBFromConfigWithPath(dbc *DBConfig, path string) (*litestream.DB, error) { // Initialize database with given path. db := litestream.NewDB(path) // Override default database settings if specified in configuration. - if dbc.MonitorInterval != nil { - db.MonitorInterval = *dbc.MonitorInterval - } if dbc.CheckpointInterval != nil { db.CheckpointInterval = *dbc.CheckpointInterval } diff --git a/cmd/litestream/replicate.go b/cmd/litestream/replicate.go index fa849073..e0ae7bd1 100644 --- a/cmd/litestream/replicate.go +++ b/cmd/litestream/replicate.go @@ -35,8 +35,7 @@ type ReplicateCommand struct { Config Config - // List of managed databases specified in the config. - DBs []*litestream.DB + server *litestream.Server } // NewReplicateCommand returns a new instance of ReplicateCommand. @@ -104,21 +103,27 @@ func (c *ReplicateCommand) Run(ctx context.Context) (err error) { log.Println("no databases specified in configuration") } + c.server = litestream.NewServer() + if err := c.server.Open(); err != nil { + return fmt.Errorf("open server: %w", err) + } + + // Add databases to the server. for _, dbConfig := range c.Config.DBs { - db, err := NewDBFromConfig(dbConfig) + path, err := expand(dbConfig.Path) if err != nil { return err } - // Open database & attach to program. - if err := db.Open(); err != nil { + if err := c.server.Watch(path, func(path string) (*litestream.DB, error) { + return NewDBFromConfigWithPath(dbConfig, path) + }); err != nil { return err } - c.DBs = append(c.DBs, db) } // Notify user that initialization is done. - for _, db := range c.DBs { + for _, db := range c.server.DBs() { log.Printf("initialized db: %s", db.Path()) for _, r := range db.Replicas { switch client := r.Client().(type) { @@ -180,13 +185,8 @@ func (c *ReplicateCommand) Run(ctx context.Context) (err error) { // Close closes all open databases. func (c *ReplicateCommand) Close() (err error) { - for _, db := range c.DBs { - if e := db.Close(); e != nil { - log.Printf("error closing db: path=%s err=%s", db.Path(), e) - if err == nil { - err = e - } - } + if e := c.server.Close(); e != nil && err == nil { + err = e } return err } diff --git a/db.go b/db.go index f73cf0ee..84acd616 100644 --- a/db.go +++ b/db.go @@ -28,12 +28,15 @@ import ( // Default DB settings. const ( - DefaultMonitorInterval = 1 * time.Second DefaultCheckpointInterval = 1 * time.Minute DefaultMinCheckpointPageN = 1000 DefaultMaxCheckpointPageN = 10000 ) +// MonitorDelayInterval is the time Litestream will wait after receiving a file +// change notification before processing the WAL file for changes. +const MonitorDelayInterval = 100 * time.Millisecond + // MaxIndex is the maximum possible WAL index. // If this index is reached then a new generation will be started. const MaxIndex = 0x7FFFFFFF @@ -43,14 +46,15 @@ const BusyTimeout = 1 * time.Second // DB represents a managed instance of a SQLite database in the file system. type DB struct { - mu sync.RWMutex - path string // part to database - db *sql.DB // target database - f *os.File // long-running db file descriptor - rtx *sql.Tx // long running read transaction - pos Pos // cached position - pageSize int // page size, in bytes - notify chan struct{} // closes on WAL change + mu sync.RWMutex + path string // part to database + db *sql.DB // target database + f *os.File // long-running db file descriptor + rtx *sql.Tx // long running read transaction + pos Pos // cached position + pageSize int // page size, in bytes + notifyCh chan struct{} // notifies DB of changes + walNotify chan struct{} // closes on WAL change // Cached salt & checksum from current shadow header. hdr []byte @@ -98,9 +102,6 @@ type DB struct { // better precision. CheckpointInterval time.Duration - // Frequency at which to perform db sync. - MonitorInterval time.Duration - // List of replicas for the database. // Must be set before calling Open(). Replicas []*Replica @@ -111,13 +112,13 @@ type DB struct { // NewDB returns a new instance of DB for a given path. func NewDB(path string) *DB { db := &DB{ - path: path, - notify: make(chan struct{}), + path: path, + notifyCh: make(chan struct{}, 1), + walNotify: make(chan struct{}), MinCheckpointPageN: DefaultMinCheckpointPageN, MaxCheckpointPageN: DefaultMaxCheckpointPageN, CheckpointInterval: DefaultCheckpointInterval, - MonitorInterval: DefaultMonitorInterval, Logger: log.New(LogWriter, fmt.Sprintf("%s: ", logPrefixPath(path)), LogFlags), } @@ -358,11 +359,16 @@ func (db *DB) walSegmentOffsetsByIndex(generation string, index int) ([]int64, e return offsets, nil } -// Notify returns a channel that closes when the shadow WAL changes. -func (db *DB) Notify() <-chan struct{} { +// NotifyCh returns a channel that can be used to signal changes in the DB. +func (db *DB) NotifyCh() chan<- struct{} { + return db.notifyCh +} + +// WALNotify returns a channel that closes when the shadow WAL changes. +func (db *DB) WALNotify() <-chan struct{} { db.mu.RLock() defer db.mu.RUnlock() - return db.notify + return db.walNotify } // PageSize returns the page size of the underlying database. @@ -395,10 +401,8 @@ func (db *DB) Open() (err error) { } // Start monitoring SQLite database in a separate goroutine. - if db.MonitorInterval > 0 { - db.wg.Add(1) - go func() { defer db.wg.Done(); db.monitor() }() - } + db.wg.Add(1) + go func() { defer db.wg.Done(); db.monitor() }() return nil } @@ -903,8 +907,8 @@ func (db *DB) Sync(ctx context.Context) (err error) { // Notify replicas of WAL changes. if db.pos != origPos { - close(db.notify) - db.notify = make(chan struct{}) + close(db.walNotify) + db.walNotify = make(chan struct{}) } return nil @@ -1367,18 +1371,27 @@ func (db *DB) execCheckpoint(mode string) (err error) { // monitor runs in a separate goroutine and monitors the database & WAL. func (db *DB) monitor() { - ticker := time.NewTicker(db.MonitorInterval) - defer ticker.Stop() + timer := time.NewTimer(MonitorDelayInterval) + defer timer.Stop() for { - // Wait for ticker or context close. + // Wait for a file change notification from the file system. select { case <-db.ctx.Done(): return - case <-ticker.C: + case <-db.notifyCh: + } + + // Wait for small delay before processing changes. + timer.Reset(MonitorDelayInterval) + <-timer.C + + // Clear any additional change notifications that occurred during delay. + select { + case <-db.notifyCh: + default: } - // Sync the database to the shadow WAL. if err := db.Sync(db.ctx); err != nil && !errors.Is(err, context.Canceled) { db.Logger.Printf("sync error: %s", err) } diff --git a/db_test.go b/db_test.go index a9dbb585..f424fc44 100644 --- a/db_test.go +++ b/db_test.go @@ -560,7 +560,6 @@ func MustOpenDB(tb testing.TB) *litestream.DB { func MustOpenDBAt(tb testing.TB, path string) *litestream.DB { tb.Helper() db := litestream.NewDB(path) - db.MonitorInterval = 0 // disable background goroutine if err := db.Open(); err != nil { tb.Fatal(err) } diff --git a/go.mod b/go.mod index 13575b62..a01cc98f 100644 --- a/go.mod +++ b/go.mod @@ -13,6 +13,7 @@ require ( github.com/prometheus/client_golang v1.12.1 golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce golang.org/x/sync v0.0.0-20210220032951-036812b2e83c + golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a // indirect google.golang.org/api v0.66.0 gopkg.in/yaml.v2 v2.4.0 ) diff --git a/go.sum b/go.sum index f7218f02..cfbb6548 100644 --- a/go.sum +++ b/go.sum @@ -485,6 +485,8 @@ golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a h1:ppl5mZgokTT8uPkmYOyEUmPTr3ypaKkg5eFOGrAmxxE= +golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/integration/cmd_test.go b/integration/cmd_test.go index 92d143b4..a663f9c2 100644 --- a/integration/cmd_test.go +++ b/integration/cmd_test.go @@ -43,6 +43,8 @@ func TestCmd_Replicate_OK(t *testing.T) { db, err := sql.Open("sqlite3", filepath.Join(tempDir, "db")) if err != nil { t.Fatal(err) + } else if _, err := db.ExecContext(ctx, `PRAGMA journal_mode = wal`); err != nil { + t.Fatal(err) } else if _, err := db.ExecContext(ctx, `CREATE TABLE t (id INTEGER PRIMARY KEY)`); err != nil { t.Fatal(err) } @@ -378,9 +380,9 @@ func waitForLogMessage(tb testing.TB, b *internal.LockingBuffer, msg string) { // killLitestreamCmd interrupts the process and waits for a clean shutdown. func killLitestreamCmd(tb testing.TB, cmd *exec.Cmd, stdout *internal.LockingBuffer) { if err := cmd.Process.Signal(os.Interrupt); err != nil { - tb.Fatal(err) + tb.Fatal("kill litestream: signal:", err) } else if err := cmd.Wait(); err != nil { - tb.Fatal(err) + tb.Fatal("kill litestream: cmd:", err) } } diff --git a/internal/file_watcher.go b/internal/file_watcher.go new file mode 100644 index 00000000..501703d4 --- /dev/null +++ b/internal/file_watcher.go @@ -0,0 +1,36 @@ +package internal + +import ( + "errors" +) + +// File event mask constants. +const ( + FileEventCreated = 1 << iota + FileEventModified + FileEventDeleted +) + +// FileEvent represents an event on a watched file. +type FileEvent struct { + Name string + Mask int +} + +// ErrFileEventQueueOverflow is returned when the file event queue has overflowed. +var ErrFileEventQueueOverflow = errors.New("file event queue overflow") + +// FileWatcher represents a watcher of file events. +type FileWatcher interface { + Open() error + Close() error + + // Returns a channel of events for watched files. + Events() <-chan FileEvent + + // Adds a specific file to be watched. + Watch(filename string) error + + // Removes a specific file from being watched. + Unwatch(filename string) error +} diff --git a/internal/file_watcher_bsd.go b/internal/file_watcher_bsd.go new file mode 100644 index 00000000..c4852e0a --- /dev/null +++ b/internal/file_watcher_bsd.go @@ -0,0 +1,259 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin + +package internal + +import ( + "context" + "log" + "os" + "path/filepath" + "sync" + "time" + + "golang.org/x/sync/errgroup" + "golang.org/x/sys/unix" +) + +var _ FileWatcher = (*KqueueFileWatcher)(nil) + +// KqueueFileWatcher watches files and is notified of events on them. +// +// Watcher code based on https://github.com/fsnotify/fsnotify +type KqueueFileWatcher struct { + fd int + events chan FileEvent + + mu sync.Mutex + watches map[string]int + paths map[int]string + notExists map[string]struct{} + + g errgroup.Group + ctx context.Context + cancel func() +} + +// NewKqueueFileWatcher returns a new instance of KqueueFileWatcher. +func NewKqueueFileWatcher() *KqueueFileWatcher { + return &KqueueFileWatcher{ + events: make(chan FileEvent), + + watches: make(map[string]int), + paths: make(map[int]string), + notExists: make(map[string]struct{}), + } +} + +// NewFileWatcher returns an instance of KqueueFileWatcher on BSD systems. +func NewFileWatcher() FileWatcher { + return NewKqueueFileWatcher() +} + +// Events returns a read-only channel of file events. +func (w *KqueueFileWatcher) Events() <-chan FileEvent { + return w.events +} + +// Open initializes the watcher and begins listening for file events. +func (w *KqueueFileWatcher) Open() (err error) { + if w.fd, err = unix.Kqueue(); err != nil { + return err + } + + w.ctx, w.cancel = context.WithCancel(context.Background()) + w.g.Go(func() error { + if err := w.monitor(w.ctx); err != nil && w.ctx.Err() == nil { + return err + } + return nil + }) + w.g.Go(func() error { + if err := w.monitorNotExists(w.ctx); err != nil && w.ctx.Err() == nil { + return err + } + return nil + }) + + return nil +} + +// Close stops watching for file events and cleans up resources. +func (w *KqueueFileWatcher) Close() (err error) { + w.cancel() + + if w.fd != 0 { + if e := unix.Close(w.fd); e != nil && err == nil { + err = e + } + } + + if e := w.g.Wait(); e != nil && err == nil { + err = e + } + return err +} + +// Watch begins watching the given file or directory. +func (w *KqueueFileWatcher) Watch(filename string) error { + w.mu.Lock() + defer w.mu.Unlock() + + filename = filepath.Clean(filename) + + // If file doesn't exist, monitor separately until it does exist as we + // can't watch non-existent files with kqueue. + if _, err := os.Stat(filename); os.IsNotExist(err) { + w.notExists[filename] = struct{}{} + return nil + } + + return w.addWatch(filename) +} + +func (w *KqueueFileWatcher) addWatch(filename string) error { + wd, err := unix.Open(filename, unix.O_NONBLOCK|unix.O_RDONLY|unix.O_CLOEXEC, 0700) + if err != nil { + return err + } + + // TODO: Handle return count different than 1. + kevent := unix.Kevent_t{Fflags: unix.NOTE_DELETE | unix.NOTE_WRITE} + unix.SetKevent(&kevent, wd, unix.EVFILT_VNODE, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE) + if _, err := unix.Kevent(w.fd, []unix.Kevent_t{kevent}, nil, nil); err != nil { + return err + } + + w.watches[filename] = wd + w.paths[wd] = filename + + delete(w.notExists, filename) + + return err +} + +// Unwatch stops watching the given file or directory. +func (w *KqueueFileWatcher) Unwatch(filename string) error { + w.mu.Lock() + defer w.mu.Unlock() + + filename = filepath.Clean(filename) + + // Look up watch ID by filename. + wd, ok := w.watches[filename] + if !ok { + return nil + } + + // TODO: Handle return count different than 1. + var kevent unix.Kevent_t + unix.SetKevent(&kevent, wd, unix.EVFILT_VNODE, unix.EV_DELETE) + if _, err := unix.Kevent(w.fd, []unix.Kevent_t{kevent}, nil, nil); err != nil { + return err + } + unix.Close(wd) + + delete(w.paths, wd) + delete(w.watches, filename) + delete(w.notExists, filename) + + return nil +} + +// monitorNotExist runs in a separate goroutine and monitors for the creation of +// watched files that do not yet exist. +func (w *KqueueFileWatcher) monitorNotExists(ctx context.Context) error { + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return nil + case <-ticker.C: + w.checkNotExists(ctx) + } + } +} + +func (w *KqueueFileWatcher) checkNotExists(ctx context.Context) { + w.mu.Lock() + defer w.mu.Unlock() + + for filename := range w.notExists { + if _, err := os.Stat(filename); os.IsNotExist(err) { + continue + } + + if err := w.addWatch(filename); err != nil { + log.Printf("non-existent file monitor: cannot add watch: %s", err) + continue + } + + // Send event to channel. + select { + case w.events <- FileEvent{ + Name: filename, + Mask: FileEventCreated, + }: + default: + } + } +} + +// monitor runs in a separate goroutine and monitors the inotify event queue. +func (w *KqueueFileWatcher) monitor(ctx context.Context) error { + kevents := make([]unix.Kevent_t, 10) + timeout := unix.NsecToTimespec(int64(100 * time.Millisecond)) + + for { + n, err := unix.Kevent(w.fd, nil, kevents, &timeout) + if err != nil && err != unix.EINTR { + return err + } else if n < 0 { + continue + } + + for _, kevent := range kevents[:n] { + if err := w.recv(ctx, &kevent); err != nil { + return err + } + } + } +} + +// recv processes a single event from kqeueue. +func (w *KqueueFileWatcher) recv(ctx context.Context, kevent *unix.Kevent_t) error { + if err := ctx.Err(); err != nil { + return err + } + + // Look up filename & remove from watcher if this is a delete. + w.mu.Lock() + filename, ok := w.paths[int(kevent.Ident)] + if ok && kevent.Fflags&unix.NOTE_DELETE != 0 { + delete(w.paths, int(kevent.Ident)) + delete(w.watches, filename) + unix.Close(int(kevent.Ident)) + } + w.mu.Unlock() + + // Convert to generic file event mask. + var mask int + if kevent.Fflags&unix.NOTE_WRITE != 0 { + mask |= FileEventModified + } + if kevent.Fflags&unix.NOTE_DELETE != 0 { + mask |= FileEventDeleted + } + + // Send event to channel or wait for close. + select { + case <-ctx.Done(): + return ctx.Err() + case w.events <- FileEvent{ + Name: filename, + Mask: mask, + }: + return nil + } +} diff --git a/internal/file_watcher_linux.go b/internal/file_watcher_linux.go new file mode 100644 index 00000000..07358750 --- /dev/null +++ b/internal/file_watcher_linux.go @@ -0,0 +1,369 @@ +//go:build linux + +package internal + +import ( + "context" + "fmt" + "log" + "os" + "path/filepath" + "sync" + "time" + "unsafe" + + "golang.org/x/sync/errgroup" + "golang.org/x/sys/unix" +) + +var _ FileWatcher = (*InotifyFileWatcher)(nil) + +// InotifyFileWatcher watches files and is notified of events on them. +// +// Watcher code based on https://github.com/fsnotify/fsnotify +type InotifyFileWatcher struct { + inotify struct { + fd int + buf []byte + } + epoll struct { + fd int // epoll_create1() file descriptor + events []unix.EpollEvent + } + pipe struct { + r int // read pipe file descriptor + w int // write pipe file descriptor + } + + events chan FileEvent + + mu sync.Mutex + watches map[string]int + paths map[int]string + notExists map[string]struct{} + + g errgroup.Group + ctx context.Context + cancel func() +} + +// NewInotifyFileWatcher returns a new instance of InotifyFileWatcher. +func NewInotifyFileWatcher() *InotifyFileWatcher { + w := &InotifyFileWatcher{ + events: make(chan FileEvent), + + watches: make(map[string]int), + paths: make(map[int]string), + notExists: make(map[string]struct{}), + } + + w.inotify.buf = make([]byte, 4096*unix.SizeofInotifyEvent) + w.epoll.events = make([]unix.EpollEvent, 64) + + return w +} + +// NewFileWatcher returns an instance of InotifyFileWatcher on Linux systems. +func NewFileWatcher() FileWatcher { + return NewInotifyFileWatcher() +} + +// Events returns a read-only channel of file events. +func (w *InotifyFileWatcher) Events() <-chan FileEvent { + return w.events +} + +// Open initializes the watcher and begins listening for file events. +func (w *InotifyFileWatcher) Open() (err error) { + w.inotify.fd, err = unix.InotifyInit1(unix.IN_CLOEXEC) + if err != nil { + return fmt.Errorf("cannot init inotify: %w", err) + } + + // Initialize epoll and create a non-blocking pipe. + if w.epoll.fd, err = unix.EpollCreate1(unix.EPOLL_CLOEXEC); err != nil { + return fmt.Errorf("cannot create epoll: %w", err) + } + + pipe := []int{-1, -1} + if err := unix.Pipe2(pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC); err != nil { + return fmt.Errorf("cannot create epoll pipe: %w", err) + } + w.pipe.r, w.pipe.w = pipe[0], pipe[1] + + // Register inotify fd with epoll + if err := unix.EpollCtl(w.epoll.fd, unix.EPOLL_CTL_ADD, w.inotify.fd, &unix.EpollEvent{ + Fd: int32(w.inotify.fd), + Events: unix.EPOLLIN, + }); err != nil { + return fmt.Errorf("cannot add inotify to epoll: %w", err) + } + + // Register pipe fd with epoll + if err := unix.EpollCtl(w.epoll.fd, unix.EPOLL_CTL_ADD, w.pipe.r, &unix.EpollEvent{ + Fd: int32(w.pipe.r), + Events: unix.EPOLLIN, + }); err != nil { + return fmt.Errorf("cannot add pipe to epoll: %w", err) + } + + w.ctx, w.cancel = context.WithCancel(context.Background()) + w.g.Go(func() error { + if err := w.monitor(w.ctx); err != nil && w.ctx.Err() == nil { + return err + } + return nil + }) + w.g.Go(func() error { + if err := w.monitorNotExists(w.ctx); err != nil && w.ctx.Err() == nil { + return err + } + return nil + }) + + return nil +} + +// Close stops watching for file events and cleans up resources. +func (w *InotifyFileWatcher) Close() (err error) { + w.cancel() + + if e := w.wake(); e != nil && err == nil { + err = e + } + if e := w.g.Wait(); e != nil && err == nil { + err = e + } + return err +} + +// Watch begins watching the given file or directory. +func (w *InotifyFileWatcher) Watch(filename string) error { + w.mu.Lock() + defer w.mu.Unlock() + + filename = filepath.Clean(filename) + + // If file doesn't exist, monitor separately until it does exist as we + // can't watch non-existent files with inotify. + if _, err := os.Stat(filename); os.IsNotExist(err) { + w.notExists[filename] = struct{}{} + return nil + } + + return w.addWatch(filename) +} + +func (w *InotifyFileWatcher) addWatch(filename string) error { + wd, err := unix.InotifyAddWatch(w.inotify.fd, filename, unix.IN_MODIFY|unix.IN_DELETE_SELF) + if err != nil { + return err + } + + w.watches[filename] = wd + w.paths[wd] = filename + + delete(w.notExists, filename) + + return err +} + +// Unwatch stops watching the given file or directory. +func (w *InotifyFileWatcher) Unwatch(filename string) error { + w.mu.Lock() + defer w.mu.Unlock() + + filename = filepath.Clean(filename) + + // Look up watch ID by filename. + wd, ok := w.watches[filename] + if !ok { + return nil + } + + if _, err := unix.InotifyRmWatch(w.inotify.fd, uint32(wd)); err != nil { + return err + } + + delete(w.paths, wd) + delete(w.watches, filename) + delete(w.notExists, filename) + + return nil +} + +// monitorNotExist runs in a separate goroutine and monitors for the creation of +// watched files that do not yet exist. +func (w *InotifyFileWatcher) monitorNotExists(ctx context.Context) error { + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return nil + case <-ticker.C: + w.checkNotExists(ctx) + } + } +} + +func (w *InotifyFileWatcher) checkNotExists(ctx context.Context) { + w.mu.Lock() + defer w.mu.Unlock() + + for filename := range w.notExists { + if _, err := os.Stat(filename); os.IsNotExist(err) { + continue + } + + if err := w.addWatch(filename); err != nil { + log.Printf("non-existent file monitor: cannot add watch: %s", err) + continue + } + + // Send event to channel. + select { + case w.events <- FileEvent{ + Name: filename, + Mask: FileEventCreated, + }: + default: + } + } +} + +// monitor runs in a separate goroutine and monitors the inotify event queue. +func (w *InotifyFileWatcher) monitor(ctx context.Context) error { + // Close all file descriptors once monitor exits. + defer func() { + unix.Close(w.inotify.fd) + unix.Close(w.epoll.fd) + unix.Close(w.pipe.w) + unix.Close(w.pipe.r) + }() + + for { + if err := w.wait(ctx); err != nil { + return err + } else if err := w.read(ctx); err != nil { + return err + } + } +} + +// read reads from the inotify file descriptor. Automatically rety on EINTR. +func (w *InotifyFileWatcher) read(ctx context.Context) error { + for { + n, err := unix.Read(w.inotify.fd, w.inotify.buf) + if err != nil && err != unix.EINTR { + return err + } else if n < 0 { + continue + } + + return w.recv(ctx, w.inotify.buf[:n]) + } +} + +func (w *InotifyFileWatcher) recv(ctx context.Context, b []byte) error { + if err := ctx.Err(); err != nil { + return err + } + + for { + if len(b) == 0 { + return nil + } else if len(b) < unix.SizeofInotifyEvent { + return fmt.Errorf("InotifyFileWatcher.recv(): inotify short record: n=%d", len(b)) + } + + event := (*unix.InotifyEvent)(unsafe.Pointer(&b[0])) + if event.Mask&unix.IN_Q_OVERFLOW != 0 { + // TODO: Change to notify all watches. + return ErrFileEventQueueOverflow + } + + // Remove deleted files from the lookups. + w.mu.Lock() + name, ok := w.paths[int(event.Wd)] + if ok && event.Mask&unix.IN_DELETE_SELF != 0 { + delete(w.paths, int(event.Wd)) + delete(w.watches, name) + } + w.mu.Unlock() + + //if nameLen > 0 { + // // Point "bytes" at the first byte of the filename + // bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] + // // The filename is padded with NULL bytes. TrimRight() gets rid of those. + // name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + //} + + // Move to next event. + b = b[unix.SizeofInotifyEvent+event.Len:] + + // Skip event if ignored. + if event.Mask&unix.IN_IGNORED != 0 { + continue + } + + // Convert to generic file event mask. + var mask int + if event.Mask&unix.IN_MODIFY != 0 { + mask |= FileEventModified + } + if event.Mask&unix.IN_DELETE_SELF != 0 { + mask |= FileEventDeleted + } + + // Send event to channel or wait for close. + select { + case <-ctx.Done(): + return ctx.Err() + case w.events <- FileEvent{ + Name: name, + Mask: mask, + }: + } + } +} + +func (w *InotifyFileWatcher) wait(ctx context.Context) error { + for { + n, err := unix.EpollWait(w.epoll.fd, w.epoll.events, -1) + if n == 0 || err == unix.EINTR { + continue + } else if err != nil { + return err + } + + // Read events to see if we have data available on inotify or if we are awaken. + var hasData bool + for _, event := range w.epoll.events[:n] { + switch event.Fd { + case int32(w.inotify.fd): // inotify file descriptor + hasData = hasData || event.Events&(unix.EPOLLHUP|unix.EPOLLERR|unix.EPOLLIN) != 0 + + case int32(w.pipe.r): // epoll file descriptor + if _, err := unix.Read(w.pipe.r, make([]byte, 1024)); err != nil && err != unix.EAGAIN { + return fmt.Errorf("epoll pipe error: %w", err) + } + } + } + + // Check if context is closed and then exit if data is available. + if err := ctx.Err(); err != nil { + return err + } else if hasData { + return nil + } + } +} + +func (w *InotifyFileWatcher) wake() error { + if _, err := unix.Write(w.pipe.w, []byte{0}); err != nil && err != unix.EAGAIN { + return err + } + return nil +} diff --git a/internal/file_watcher_test.go b/internal/file_watcher_test.go new file mode 100644 index 00000000..dd767154 --- /dev/null +++ b/internal/file_watcher_test.go @@ -0,0 +1,211 @@ +package internal_test + +import ( + "database/sql" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/benbjohnson/litestream/internal" + _ "github.com/mattn/go-sqlite3" +) + +func TestFileWatcher(t *testing.T) { + t.Run("WriteAndRemove", func(t *testing.T) { + dbPath := filepath.Join(t.TempDir(), "db") + + w := internal.NewFileWatcher() + if err := w.Open(); err != nil { + t.Fatal(err) + } + defer w.Close() + + db, err := sql.Open("sqlite3", dbPath) + if err != nil { + t.Fatal(err) + } + defer db.Close() + + if _, err := db.Exec(`PRAGMA journal_mode = wal`); err != nil { + t.Fatal(err) + } else if _, err := db.Exec(`CREATE TABLE t (x)`); err != nil { + t.Fatal(err) + } + + if err := w.Watch(dbPath + "-wal"); err != nil { + t.Fatal(err) + } + + // Write to the WAL file & ensure a "modified" event occurs. + if _, err := db.Exec(`INSERT INTO t (x) VALUES (1)`); err != nil { + t.Fatal(err) + } + + select { + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for event") + case event := <-w.Events(): + if got, want := event.Name, dbPath+"-wal"; got != want { + t.Fatalf("name=%s, want %s", got, want) + } else if got, want := event.Mask, internal.FileEventModified; got != want { + t.Fatalf("mask=0x%02x, want 0x%02x", got, want) + } + } + + // Flush any duplicate events. + drainFileEventChannel(w.Events()) + + // Close database and ensure checkpointed WAL creates a "delete" event. + if err := db.Close(); err != nil { + t.Fatal(err) + } + + select { + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for event") + case event := <-w.Events(): + if got, want := event.Name, dbPath+"-wal"; got != want { + t.Fatalf("name=%s, want %s", got, want) + } else if got, want := event.Mask, internal.FileEventDeleted; got != want { + t.Fatalf("mask=0x%02x, want 0x%02x", got, want) + } + } + }) + + t.Run("LargeTx", func(t *testing.T) { + w := internal.NewFileWatcher() + if err := w.Open(); err != nil { + t.Fatal(err) + } + defer w.Close() + + dbPath := filepath.Join(t.TempDir(), "db") + db, err := sql.Open("sqlite3", dbPath) + if err != nil { + t.Fatal(err) + } else if _, err := db.Exec(`PRAGMA cache_size = 4`); err != nil { + t.Fatal(err) + } else if _, err := db.Exec(`PRAGMA journal_mode = wal`); err != nil { + t.Fatal(err) + } else if _, err := db.Exec(`CREATE TABLE t (x)`); err != nil { + t.Fatal(err) + } + defer db.Close() + + if err := w.Watch(dbPath + "-wal"); err != nil { + t.Fatal(err) + } + + // Start a transaction to ensure writing large data creates multiple write events. + tx, err := db.Begin() + if err != nil { + t.Fatal(err) + } + defer func() { _ = tx.Rollback() }() + + // Write enough data to require a spill. + for i := 0; i < 100; i++ { + if _, err := tx.Exec(`INSERT INTO t (x) VALUES (?)`, strings.Repeat("x", 512)); err != nil { + t.Fatal(err) + } + } + + // Ensure spill writes to disk. + select { + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for event") + case event := <-w.Events(): + if got, want := event.Name, dbPath+"-wal"; got != want { + t.Fatalf("name=%s, want %s", got, want) + } else if got, want := event.Mask, internal.FileEventModified; got != want { + t.Fatalf("mask=0x%02x, want 0x%02x", got, want) + } + } + + // Flush any duplicate events. + drainFileEventChannel(w.Events()) + + if err := tx.Commit(); err != nil { + t.Fatal(err) + } + + // Final commit should spill remaining pages and cause another write event. + select { + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for event") + case event := <-w.Events(): + if got, want := event.Name, dbPath+"-wal"; got != want { + t.Fatalf("name=%s, want %s", got, want) + } else if got, want := event.Mask, internal.FileEventModified; got != want { + t.Fatalf("mask=0x%02x, want 0x%02x", got, want) + } + } + }) + + t.Run("WatchBeforeCreate", func(t *testing.T) { + dbPath := filepath.Join(t.TempDir(), "db") + + w := internal.NewFileWatcher() + if err := w.Open(); err != nil { + t.Fatal(err) + } + defer w.Close() + + if err := w.Watch(dbPath); err != nil { + t.Fatal(err) + } else if err := w.Watch(dbPath + "-wal"); err != nil { + t.Fatal(err) + } + + db, err := sql.Open("sqlite3", dbPath) + if err != nil { + t.Fatal(err) + } + defer db.Close() + + if _, err := db.Exec(`CREATE TABLE t (x)`); err != nil { + t.Fatal(err) + } + + // Wait for main database creation event. + waitForFileEvent(t, w.Events(), internal.FileEvent{Name: dbPath, Mask: internal.FileEventCreated}) + + // Write to the WAL file & ensure a "modified" event occurs. + if _, err := db.Exec(`PRAGMA journal_mode = wal`); err != nil { + t.Fatal(err) + } else if _, err := db.Exec(`INSERT INTO t (x) VALUES (1)`); err != nil { + t.Fatal(err) + } + + // Wait for WAL creation event. + waitForFileEvent(t, w.Events(), internal.FileEvent{Name: dbPath + "-wal", Mask: internal.FileEventCreated}) + }) +} + +func drainFileEventChannel(ch <-chan internal.FileEvent) { + for { + select { + case <-time.After(100 * time.Millisecond): + return + case <-ch: + } + } +} + +func waitForFileEvent(tb testing.TB, ch <-chan internal.FileEvent, want internal.FileEvent) { + tb.Helper() + + timeout := time.After(10 * time.Second) + + for { + select { + case <-timeout: + tb.Fatalf("timeout waiting for event: %#v", want) + case got := <-ch: + if got == want { + return + } + } + } +} diff --git a/internal/internal.go b/internal/internal.go index b2db3b98..681726a9 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -239,7 +239,7 @@ func TruncateDuration(d time.Duration) time.Duration { return d } -// MD5hash returns a hex-encoded MD5 hash of b. -func MD5hash(b []byte) string { +// MD5Hash returns a hex-encoded MD5 hash of b. +func MD5Hash(b []byte) string { return fmt.Sprintf("%x", md5.Sum(b)) } diff --git a/replica.go b/replica.go index 95b30e5a..e401d8c2 100644 --- a/replica.go +++ b/replica.go @@ -662,7 +662,7 @@ func (r *Replica) monitor(ctx context.Context) { } // Fetch new notify channel before replicating data. - notify = r.db.Notify() + notify = r.db.WALNotify() // Synchronize the shadow wal into the replication directory. if err := r.Sync(ctx); err != nil { diff --git a/server.go b/server.go new file mode 100644 index 00000000..501e81eb --- /dev/null +++ b/server.go @@ -0,0 +1,186 @@ +package litestream + +import ( + "context" + "fmt" + "strings" + "sync" + + "github.com/benbjohnson/litestream/internal" + "golang.org/x/sync/errgroup" +) + +// Server represents the top-level container. +// It manage databases and routes global file system events. +type Server struct { + mu sync.Mutex + dbs map[string]*DB // databases by path + watcher internal.FileWatcher + + ctx context.Context + cancel func() + errgroup errgroup.Group +} + +// NewServer returns a new instance of Server. +func NewServer() *Server { + return &Server{ + dbs: make(map[string]*DB), + } +} + +// Open initializes the server and begins watching for file system events. +func (s *Server) Open() error { + s.watcher = internal.NewFileWatcher() + if err := s.watcher.Open(); err != nil { + return err + } + + s.ctx, s.cancel = context.WithCancel(context.Background()) + s.errgroup.Go(func() error { + if err := s.monitor(s.ctx); err != nil && err != context.Canceled { + return fmt.Errorf("server monitor error: %w", err) + } + return nil + }) + return nil +} + +// Close shuts down the server and all databases it manages. +func (s *Server) Close() (err error) { + // Cancel context and wait for goroutines to finish. + s.cancel() + if e := s.errgroup.Wait(); e != nil && err == nil { + err = e + } + + s.mu.Lock() + defer s.mu.Unlock() + + if s.watcher != nil { + if e := s.watcher.Close(); e != nil && err == nil { + err = fmt.Errorf("close watcher: %w", e) + } + } + + for _, db := range s.dbs { + if e := db.Close(); e != nil && err == nil { + err = fmt.Errorf("close db: path=%s err=%w", db.Path(), e) + } + } + s.dbs = make(map[string]*DB) + + return err +} + +// DB returns the database with the given path, if it's managed by the server. +func (s *Server) DB(path string) *DB { + s.mu.Lock() + defer s.mu.Unlock() + return s.dbs[path] +} + +// DBs returns a slice of all databases managed by the server. +func (s *Server) DBs() []*DB { + s.mu.Lock() + defer s.mu.Unlock() + + a := make([]*DB, 0, len(s.dbs)) + for _, db := range s.dbs { + a = append(a, db) + } + return a +} + +// Watch adds a database path to be managed by the server. +func (s *Server) Watch(path string, fn func(path string) (*DB, error)) error { + s.mu.Lock() + defer s.mu.Unlock() + + // Instantiate DB from factory function. + db, err := fn(path) + if err != nil { + return fmt.Errorf("new database: %w", err) + } + + // Start watching the database for changes. + if err := db.Open(); err != nil { + return fmt.Errorf("open database: %w", err) + } + s.dbs[path] = db + + // Watch for changes on the database file & WAL. + if err := s.watcher.Watch(path); err != nil { + return fmt.Errorf("watch db file: %w", err) + } else if err := s.watcher.Watch(path + "-wal"); err != nil { + return fmt.Errorf("watch wal file: %w", err) + } + + // Kick off an initial sync. + select { + case db.NotifyCh() <- struct{}{}: + default: + } + + return nil +} + +// Unwatch removes a database path from being managed by the server. +func (s *Server) Unwatch(path string) error { + s.mu.Lock() + defer s.mu.Unlock() + + db := s.dbs[path] + if db == nil { + return nil + } + delete(s.dbs, path) + + // Stop watching for changes on the database WAL. + if err := s.watcher.Unwatch(path + "-wal"); err != nil { + return fmt.Errorf("unwatch file: %w", err) + } + + // Shut down database. + if err := db.Close(); err != nil { + return fmt.Errorf("close db: %w", err) + } + + return nil +} + +// monitor runs in a separate goroutine and dispatches notifications to managed DBs. +func (s *Server) monitor(ctx context.Context) error { + for { + select { + case <-ctx.Done(): + return ctx.Err() + case event := <-s.watcher.Events(): + if err := s.dispatchFileEvent(ctx, event); err != nil { + return err + } + } + } +} + +// dispatchFileEvent dispatches a notification to the database which owns the file. +func (s *Server) dispatchFileEvent(ctx context.Context, event internal.FileEvent) error { + path := event.Name + path = strings.TrimSuffix(path, "-wal") + + db := s.DB(path) + if db == nil { + return nil + } + + // TODO: If deleted, remove from server and close DB. + + select { + case <-ctx.Done(): + return ctx.Err() + case db.NotifyCh() <- struct{}{}: + return nil // notify db + default: + return nil // already pending notification, skip + } +} diff --git a/server_test.go b/server_test.go new file mode 100644 index 00000000..3d7601f0 --- /dev/null +++ b/server_test.go @@ -0,0 +1 @@ +package litestream_test From 76e53dc6ea3599ab5c7808e940ba6edbf97b5963 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Sun, 6 Feb 2022 11:25:40 -0700 Subject: [PATCH 49/95] Remove built-in validation option Previously, Litestream had a validator that worked most of the time but also caused some false positives. It is difficult to provide validation from with Litestream without controlling outside processes that can also affect the database. As such, validation has been moved out to the external CI test runner which provides a more consistent validation process. --- db.go | 45 -------------- db_test.go | 45 -------------- replica.go | 175 +---------------------------------------------------- 3 files changed, 1 insertion(+), 264 deletions(-) diff --git a/db.go b/db.go index 84acd616..5ebacc2b 100644 --- a/db.go +++ b/db.go @@ -8,7 +8,6 @@ import ( "encoding/hex" "errors" "fmt" - "hash/crc64" "io" "io/ioutil" "log" @@ -1421,50 +1420,6 @@ func ApplyWAL(ctx context.Context, dbPath, walPath string) error { return d.Close() } -// CRC64 returns a CRC-64 ISO checksum of the database and its current position. -// -// This function obtains a read lock so it prevents syncs from occurring until -// the operation is complete. The database will still be usable but it will be -// unable to checkpoint during this time. -// -// If dst is set, the database file is copied to that location before checksum. -func (db *DB) CRC64(ctx context.Context) (uint64, Pos, error) { - db.mu.Lock() - defer db.mu.Unlock() - - if err := db.init(); err != nil { - return 0, Pos{}, err - } else if db.db == nil { - return 0, Pos{}, os.ErrNotExist - } - - generation, err := db.CurrentGeneration() - if err != nil { - return 0, Pos{}, fmt.Errorf("cannot find current generation: %w", err) - } else if generation == "" { - return 0, Pos{}, fmt.Errorf("no current generation") - } - - // Force a RESTART checkpoint to ensure the database is at the start of the WAL. - if err := db.checkpoint(ctx, generation, CheckpointModeRestart); err != nil { - return 0, Pos{}, err - } - - // Obtain current position. Clear the offset since we are only reading the - // DB and not applying the current WAL. - pos := db.pos - pos.Offset = 0 - - // Seek to the beginning of the db file descriptor and checksum whole file. - h := crc64.New(crc64.MakeTable(crc64.ISO)) - if _, err := db.f.Seek(0, io.SeekStart); err != nil { - return 0, pos, err - } else if _, err := io.Copy(h, db.f); err != nil { - return 0, pos, err - } - return h.Sum64(), pos, nil -} - // ReadWALFields iterates over the header & frames in the WAL data in r. // Returns salt, checksum, byte order & the last frame. WAL data must start // from the beginning of the WAL header and must end on either the WAL header diff --git a/db_test.go b/db_test.go index f424fc44..6f54ea8d 100644 --- a/db_test.go +++ b/db_test.go @@ -112,51 +112,6 @@ func TestDB_UpdatedAt(t *testing.T) { }) } -// Ensure we can compute a checksum on the real database. -func TestDB_CRC64(t *testing.T) { - t.Run("ErrNotExist", func(t *testing.T) { - db := MustOpenDB(t) - defer MustCloseDB(t, db) - if _, _, err := db.CRC64(context.Background()); !os.IsNotExist(err) { - t.Fatalf("unexpected error: %#v", err) - } - }) - - t.Run("DB", func(t *testing.T) { - db, sqldb := MustOpenDBs(t) - defer MustCloseDBs(t, db, sqldb) - - if err := db.Sync(context.Background()); err != nil { - t.Fatal(err) - } - - chksum0, _, err := db.CRC64(context.Background()) - if err != nil { - t.Fatal(err) - } - - // Issue change that is applied to the WAL. Checksum should not change. - if _, err := sqldb.Exec(`CREATE TABLE t (id INT);`); err != nil { - t.Fatal(err) - } else if chksum1, _, err := db.CRC64(context.Background()); err != nil { - t.Fatal(err) - } else if chksum0 == chksum1 { - t.Fatal("expected different checksum event after WAL change") - } - - // Checkpoint change into database. Checksum should change. - if err := db.Checkpoint(context.Background(), litestream.CheckpointModeTruncate); err != nil { - t.Fatal(err) - } - - if chksum2, _, err := db.CRC64(context.Background()); err != nil { - t.Fatal(err) - } else if chksum0 == chksum2 { - t.Fatal("expected different checksums after checkpoint") - } - }) -} - // Ensure we can sync the real WAL to the shadow WAL. func TestDB_Sync(t *testing.T) { // Ensure sync is skipped if no database exists. diff --git a/replica.go b/replica.go index e401d8c2..b92b4b4e 100644 --- a/replica.go +++ b/replica.go @@ -3,12 +3,10 @@ package litestream import ( "context" "fmt" - "hash/crc64" "io" "io/ioutil" "log" "os" - "path/filepath" "sort" "sync" "time" @@ -118,11 +116,10 @@ func (r *Replica) Start(ctx context.Context) { ctx, r.cancel = context.WithCancel(ctx) // Start goroutine to replicate data. - r.wg.Add(4) + r.wg.Add(3) go func() { defer r.wg.Done(); r.monitor(ctx) }() go func() { defer r.wg.Done(); r.retainer(ctx) }() go func() { defer r.wg.Done(); r.snapshotter(ctx) }() - go func() { defer r.wg.Done(); r.validator(ctx) }() } // Stop cancels any outstanding replication and blocks until finished. @@ -144,15 +141,6 @@ func (r *Replica) Stop(hard bool) (err error) { return err } -// logPrefix returns the prefix used when logging from the replica. -// This includes the replica name as well as the database path, if available. -func (r *Replica) logPrefix() string { - if db := r.DB(); db != nil { - return fmt.Sprintf("%s(%s): ", db.Path(), r.Name()) - } - return r.Name() + ": " -} - // Sync copies new WAL frames from the shadow WAL to the replica client. func (r *Replica) Sync(ctx context.Context) (err error) { // Clear last position if if an error occurs during sync. @@ -723,160 +711,6 @@ func (r *Replica) snapshotter(ctx context.Context) { } } -// validator runs in a separate goroutine and handles periodic validation. -func (r *Replica) validator(ctx context.Context) { - // Initialize counters since validation occurs infrequently. - for _, status := range []string{"ok", "error"} { - replicaValidationTotalCounterVec.WithLabelValues(r.db.Path(), r.Name(), status).Add(0) - } - - // Exit validation if interval is not set. - if r.ValidationInterval <= 0 { - return - } - - ticker := time.NewTicker(r.ValidationInterval) - defer ticker.Stop() - - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - if err := r.Validate(ctx); err != nil { - r.Logger.Printf("validation error: %s", err) - continue - } - } - } -} - -// Validate restores the most recent data from a replica and validates -// that the resulting database matches the current database. -func (r *Replica) Validate(ctx context.Context) error { - db := r.DB() - - // Restore replica to a temporary directory. - tmpdir, err := ioutil.TempDir("", "*-litestream") - if err != nil { - return err - } - defer os.RemoveAll(tmpdir) - - // Compute checksum of primary database under lock. This prevents a - // sync from occurring and the database will not be written. - chksum0, pos, err := db.CRC64(ctx) - if err != nil { - return fmt.Errorf("cannot compute checksum: %w", err) - } - - // Wait until replica catches up to position. - if err := r.waitForReplica(ctx, pos); err != nil { - return fmt.Errorf("cannot wait for replica: %w", err) - } - - // Find lastest snapshot that occurs before the index. - snapshotIndex, err := FindSnapshotForIndex(ctx, r.client, pos.Generation, pos.Index-1) - if err != nil { - return fmt.Errorf("cannot find snapshot index: %w", err) - } - - restorePath := filepath.Join(tmpdir, "replica") - opt := RestoreOptions{ - Logger: log.New(os.Stderr, "", 0), - LogPrefix: r.logPrefix(), - } - if err := Restore(ctx, r.client, restorePath, pos.Generation, snapshotIndex, pos.Index-1, opt); err != nil { - return fmt.Errorf("cannot restore: %w", err) - } - - // Open file handle for restored database. - // NOTE: This open is ok as the restored database is not managed by litestream. - f, err := os.Open(restorePath) - if err != nil { - return err - } - defer f.Close() - - // Read entire file into checksum. - h := crc64.New(crc64.MakeTable(crc64.ISO)) - if _, err := io.Copy(h, f); err != nil { - return err - } - chksum1 := h.Sum64() - - status := "ok" - mismatch := chksum0 != chksum1 - if mismatch { - status = "mismatch" - } - r.Logger.Printf("validator: status=%s db=%016x replica=%016x pos=%s", status, chksum0, chksum1, pos) - - // Validate checksums match. - if mismatch { - replicaValidationTotalCounterVec.WithLabelValues(r.db.Path(), r.Name(), "error").Inc() - return ErrChecksumMismatch - } - - replicaValidationTotalCounterVec.WithLabelValues(r.db.Path(), r.Name(), "ok").Inc() - - if err := os.RemoveAll(tmpdir); err != nil { - return fmt.Errorf("cannot remove temporary validation directory: %w", err) - } - return nil -} - -// waitForReplica blocks until replica reaches at least the given position. -func (r *Replica) waitForReplica(ctx context.Context, pos Pos) error { - ticker := time.NewTicker(500 * time.Millisecond) - defer ticker.Stop() - - timer := time.NewTicker(10 * time.Second) - defer ticker.Stop() - - once := make(chan struct{}, 1) - once <- struct{}{} - - for { - select { - case <-ctx.Done(): - return ctx.Err() - case <-timer.C: - return fmt.Errorf("replica wait exceeded timeout") - case <-ticker.C: - case <-once: // immediate on first check - } - - // Obtain current position of replica, check if past target position. - curr := r.Pos() - if curr.IsZero() { - r.Logger.Printf("validator: no replica position available") - continue - } - - // Exit if the generation has changed while waiting as there will be - // no further progress on the old generation. - if curr.Generation != pos.Generation { - return fmt.Errorf("generation changed") - } - - ready := true - if curr.Index < pos.Index { - ready = false - } else if curr.Index == pos.Index && curr.Offset < pos.Offset { - ready = false - } - - // If not ready, restart loop. - if !ready { - continue - } - - // Current position at or after target position. - return nil - } -} - // GenerationCreatedAt returns the earliest creation time of any snapshot. // Returns zero time if no snapshots exist. func (r *Replica) GenerationCreatedAt(ctx context.Context, generation string) (time.Time, error) { @@ -970,11 +804,4 @@ var ( Name: "wal_offset", Help: "The current WAL offset", }, []string{"db", "name"}) - - replicaValidationTotalCounterVec = promauto.NewCounterVec(prometheus.CounterOpts{ - Namespace: "litestream", - Subsystem: "replica", - Name: "validation_total", - Help: "The number of validations performed", - }, []string{"db", "name", "status"}) ) From 30a8d07a81c5afba96ce3df281ded2a2fffc954f Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Sun, 6 Feb 2022 11:17:31 -0700 Subject: [PATCH 50/95] Add WAL overrun validation Under high write load, it is possible for write transactions from another process to overrun the WAL between the time when Litestream performs a RESTART checkpoint and when it obtains the write lock immediately after. This change adds validation that an overrun has not occurred and, if it has, it will start a new generation. --- db.go | 94 +++++++++++++++++++++++++++++++++++++++++++++++---- litestream.go | 2 +- replica.go | 18 +++++----- 3 files changed, 97 insertions(+), 17 deletions(-) diff --git a/db.go b/db.go index 5ebacc2b..791d80d9 100644 --- a/db.go +++ b/db.go @@ -34,7 +34,7 @@ const ( // MonitorDelayInterval is the time Litestream will wait after receiving a file // change notification before processing the WAL file for changes. -const MonitorDelayInterval = 100 * time.Millisecond +const MonitorDelayInterval = 10 * time.Millisecond // MaxIndex is the maximum possible WAL index. // If this index is reached then a new generation will be started. @@ -422,14 +422,20 @@ func (db *DB) Close() (err error) { } } - // Ensure replicas perform a final sync and stop replicating. + // Ensure replicas stop replicating and perform a final sync. for _, r := range db.Replicas { + // Stop normal background sync. + r.Stop() + + // Force one final sync if DB is open. if db.db != nil { if e := r.Sync(ctx); e != nil && err == nil { err = e } } - if e := r.Stop(true); e != nil && err == nil { + + // Close out replica. + if e := r.Close(); e != nil && err == nil { err = e } } @@ -795,10 +801,25 @@ func (db *DB) createGeneration(ctx context.Context) (string, error) { } // Sync copies pending data from the WAL to the shadow WAL. -func (db *DB) Sync(ctx context.Context) (err error) { - db.mu.Lock() - defer db.mu.Unlock() +func (db *DB) Sync(ctx context.Context) error { + const retryN = 5 + + for i := 0; i < retryN; i++ { + if err := func() error { + db.mu.Lock() + defer db.mu.Unlock() + return db.sync(ctx) + }(); err != nil { + db.Logger.Printf("sync error, retrying: %s", err) + } else { + break + } + } + return nil + +} +func (db *DB) sync(ctx context.Context) (err error) { // Initialize database, if necessary. Exit if no DB exists. if err := db.init(); err != nil { return err @@ -889,7 +910,16 @@ func (db *DB) Sync(ctx context.Context) (err error) { // Issue the checkpoint. if checkpoint { - if err := db.checkpoint(ctx, info.generation, checkpointMode); err != nil { + // Under rare circumstances, a checkpoint can be unable to verify continuity + // and will require a restart. + if err := db.checkpoint(ctx, info.generation, checkpointMode); errors.Is(err, errRestartGeneration) { + generation, err := db.createGeneration(ctx) + if err != nil { + return fmt.Errorf("create generation: %w", err) + } + db.Logger.Printf("sync: new generation %q, possible WAL overrun occurred", generation) + + } else if err != nil { return fmt.Errorf("checkpoint: mode=%v err=%w", checkpointMode, err) } } @@ -1174,6 +1204,37 @@ func (db *DB) copyToShadowWAL(ctx context.Context) error { return nil } +// verifyLastShadowFrame re-reads the last frame read during the shadow copy. +// This ensures that the frame has not been overrun after a checkpoint occurs +// but before the new write lock has been obtained to initialize the new wal index. +func (db *DB) verifyLastShadowFrame(ctx context.Context) error { + // Skip if we don't have a previous frame to verify. + if db.frame == nil { + return nil + } + + r, err := os.Open(db.WALPath()) + if err != nil { + return err + } + defer r.Close() + + // Seek to position of where the last frame was read. + buf := make([]byte, len(db.frame)) + if _, err := r.Seek(db.pos.Offset-int64(len(db.frame)), io.SeekStart); err != nil { + return fmt.Errorf("seek to last frame: %w", err) + } else if _, err := io.ReadFull(r, buf); err != nil { + return fmt.Errorf("read last frame: %w", err) + } + + // Return a marker error if frames do not match. + if !bytes.Equal(db.frame, buf) { + return errRestartGeneration + } + + return nil +} + // WALSegmentReader returns a reader for a section of WAL data at the given position. // Returns os.ErrNotExist if no matching index/offset is found. func (db *DB) WALSegmentReader(ctx context.Context, pos Pos) (io.ReadCloser, error) { @@ -1304,6 +1365,16 @@ func (db *DB) checkpoint(ctx context.Context, generation, mode string) error { return fmt.Errorf("_litestream_lock: %w", err) } + // Verify we can re-read the last frame copied to the shadow WAL. + // This ensures that another transaction has not overrun the WAL past where + // our previous copy was which would overwrite any additional unread + // frames between the checkpoint & the new write lock. + // + // This only occurs with high load and a short sync frequency so it is rare. + if err := db.verifyLastShadowFrame(ctx); err != nil { + return fmt.Errorf("cannot verify last frame copied from shadow wal: %w", err) + } + // Copy the end of the previous WAL before starting a new shadow WAL. if err := db.copyToShadowWAL(ctx); err != nil { return fmt.Errorf("cannot copy to end of shadow wal: %w", err) @@ -1360,6 +1431,11 @@ func (db *DB) execCheckpoint(mode string) (err error) { } db.Logger.Printf("checkpoint(%s): [%d,%d,%d]", mode, row[0], row[1], row[2]) + // Clear last read frame if we are truncating. + if mode == CheckpointModeTruncate { + db.frame = nil + } + // Reacquire the read lock immediately after the checkpoint. if err := db.acquireReadLock(); err != nil { return fmt.Errorf("reacquire read lock: %w", err) @@ -1543,3 +1619,7 @@ func logPrefixPath(path string) string { } return path } + +// A marker error to indicate that a restart checkpoint could not verify +// continuity between WAL indices and a new generation should be started. +var errRestartGeneration = errors.New("restart generation") diff --git a/litestream.go b/litestream.go index 6cc3f16b..56144573 100644 --- a/litestream.go +++ b/litestream.go @@ -302,7 +302,7 @@ func (p Pos) String() string { if p.IsZero() { return "" } - return fmt.Sprintf("%s/%08x:%d", p.Generation, p.Index, p.Offset) + return fmt.Sprintf("%s/%08x:%08x", p.Generation, p.Index, p.Offset) } // IsZero returns true if p is the zero value. diff --git a/replica.go b/replica.go index b92b4b4e..cc5652d1 100644 --- a/replica.go +++ b/replica.go @@ -110,7 +110,7 @@ func (r *Replica) Start(ctx context.Context) { } // Stop previous replication. - _ = r.Stop(false) + r.Stop() // Wrap context with cancelation. ctx, r.cancel = context.WithCancel(ctx) @@ -123,17 +123,17 @@ func (r *Replica) Start(ctx context.Context) { } // Stop cancels any outstanding replication and blocks until finished. -// -// Performing a hard stop will close the DB file descriptor which could release -// locks on per-process locks. Hard stops should only be performed when -// stopping the entire process. -func (r *Replica) Stop(hard bool) (err error) { +func (r *Replica) Stop() { r.cancel() r.wg.Wait() +} +// Close will close the DB file descriptor which could release locks on +// per-process locks (e.g. non-Linux OSes). +func (r *Replica) Close() (err error) { r.muf.Lock() defer r.muf.Unlock() - if hard && r.f != nil { + if r.f != nil { if e := r.f.Close(); e != nil && err == nil { err = e } @@ -297,9 +297,9 @@ func (r *Replica) writeIndexSegments(ctx context.Context, segments []WALSegmentI // Flush LZ4 writer, close pipe, and wait for write to finish. if err := zw.Close(); err != nil { - return err + return fmt.Errorf("lz4 writer close: %w", err) } else if err := pw.Close(); err != nil { - return err + return fmt.Errorf("pipe writer close: %w", err) } else if err := g.Wait(); err != nil { return err } From 54f3b94d3f0d1137b12a2801dc686a243176e70c Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Mon, 7 Feb 2022 14:09:30 -0700 Subject: [PATCH 51/95] Upgrade dependencies - github.com/aws/aws-sdk-go v1.42.44 => v1.42.48 - cloud.google.com/go/storage v1.19.0 => v1.20.0 - github.com/pierrec/lz4/v4 v4.1.12 => v4.1.14 - google.golang.org/api v0.66.0 => v0.67.0 --- go.mod | 10 +++++----- go.sum | 24 ++++++++++++------------ 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/go.mod b/go.mod index a01cc98f..592c596a 100644 --- a/go.mod +++ b/go.mod @@ -3,17 +3,17 @@ module github.com/benbjohnson/litestream go 1.16 require ( - cloud.google.com/go/storage v1.19.0 + cloud.google.com/go/storage v1.20.0 github.com/Azure/azure-storage-blob-go v0.14.0 - github.com/aws/aws-sdk-go v1.42.44 + github.com/aws/aws-sdk-go v1.42.48 github.com/mattn/go-shellwords v1.0.12 github.com/mattn/go-sqlite3 v1.14.11 - github.com/pierrec/lz4/v4 v4.1.12 + github.com/pierrec/lz4/v4 v4.1.14 github.com/pkg/sftp v1.13.4 github.com/prometheus/client_golang v1.12.1 golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a // indirect - google.golang.org/api v0.66.0 + golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a + google.golang.org/api v0.67.0 gopkg.in/yaml.v2 v2.4.0 ) diff --git a/go.sum b/go.sum index cfbb6548..23897fc6 100644 --- a/go.sum +++ b/go.sum @@ -50,8 +50,8 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.19.0 h1:XOQSnPJD8hRtZJ3VdCyK0mBZsGGImrzPAMbSWcHSe6Q= -cloud.google.com/go/storage v1.19.0/go.mod h1:6rgiTRjOqI/Zd9YKimub5TIB4d+p3LH33V3ZE1DMuUM= +cloud.google.com/go/storage v1.20.0 h1:kv3rQ3clEQdxqokkCCgQo+bxPqcuXiROjxvnKb8Oqdk= +cloud.google.com/go/storage v1.20.0/go.mod h1:TiC1o6FxNCG8y5gB7rqCsFZCIYPMPZCO81ppOoEPLGI= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= @@ -77,8 +77,8 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/aws/aws-sdk-go v1.42.44 h1:vPlF4cUsdN5ETfvb7ewZFbFZyB6Rsfndt3kS2XqLXKo= -github.com/aws/aws-sdk-go v1.42.44/go.mod h1:OGr6lGMAKGlG9CVrYnWYDKIyb829c6EVBRjxqjmPepc= +github.com/aws/aws-sdk-go v1.42.48 h1:8ZVBAsA9X2eCpSr/8SrWDk4BOT91wRdqxpAog875+K0= +github.com/aws/aws-sdk-go v1.42.48/go.mod h1:OGr6lGMAKGlG9CVrYnWYDKIyb829c6EVBRjxqjmPepc= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -246,8 +246,8 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/pierrec/lz4/v4 v4.1.12 h1:44l88ehTZAUGW4VlO1QC4zkilL99M6Y9MXNwEs0uzP8= -github.com/pierrec/lz4/v4 v4.1.12/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.14 h1:+fL8AQEZtz/ijeNnpduH0bROTu0O3NZAlPjQxGn8LwE= +github.com/pierrec/lz4/v4 v4.1.14/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -483,8 +483,8 @@ golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a h1:ppl5mZgokTT8uPkmYOyEUmPTr3ypaKkg5eFOGrAmxxE= golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= @@ -589,9 +589,9 @@ google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdr google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= google.golang.org/api v0.64.0/go.mod h1:931CdxA8Rm4t6zqTFGSsgwbAEZ2+GMYurbndwSimebM= -google.golang.org/api v0.65.0/go.mod h1:ArYhxgGadlWmqO1IqVujw6Cs8IdD33bTmzKo2Sh+cbg= -google.golang.org/api v0.66.0 h1:CbGy4LEiXCVCiNEDFgGpWOVwsDT7E2Qej1ZvN1P7KPg= google.golang.org/api v0.66.0/go.mod h1:I1dmXYpX7HGwz/ejRxwQp2qj5bFAz93HiCU1C1oYd9M= +google.golang.org/api v0.67.0 h1:lYaaLa+x3VVUhtosaK9xihwQ9H9KRa557REHwwZ2orM= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -661,11 +661,11 @@ google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220111164026-67b88f271998/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220114231437-d2e6a121cae0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220118154757-00ab72f36ad5 h1:zzNejm+EgrbLfDZ6lu9Uud2IVvHySPl8vQzf04laR5Q= -google.golang.org/genproto v0.0.0-20220118154757-00ab72f36ad5/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220201184016-50beb8ab5c44 h1:0UVUC7VWA/mIU+5a4hVWH6xa234gLcRX8ZcrFKmWWKA= +google.golang.org/genproto v0.0.0-20220201184016-50beb8ab5c44/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= From 006e4b7155d819dcaa99743cf7bea1f854289ee1 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Tue, 8 Feb 2022 12:49:36 -0700 Subject: [PATCH 52/95] Update index & offset encoding Previously, the index & offsets were encoded as 8-character hex strings, however, this limits the maximum value to a `uint32`. This is normally not an issue, however, indices could go over the maximum value of 4 billion over time and the offset could exceed this value for an especially large WAL update. For safety, these encodings have been updated to 16-character hex encodings. --- abs/replica_client.go | 2 -- cmd/litestream/main.go | 2 +- cmd/litestream/restore_test.go | 16 +++++----- cmd/litestream/snapshots.go | 4 +-- .../testdata/generations/no-database/Makefile | 4 +-- ...shot.lz4 => 0000000000000000.snapshot.lz4} | Bin ...shot.lz4 => 0000000000000000.snapshot.lz4} | Bin .../testdata/generations/ok/Makefile | 12 ++++---- ...shot.lz4 => 0000000000000000.snapshot.lz4} | Bin ...shot.lz4 => 0000000000000001.snapshot.lz4} | Bin .../0000000000000000.wal.lz4} | Bin .../0000000000000001.wal.lz4} | Bin .../0000000000000000.wal.lz4} | Bin ...shot.lz4 => 0000000000000000.snapshot.lz4} | Bin .../generations/replica-name/Makefile | 4 +-- ...shot.lz4 => 0000000000000000.snapshot.lz4} | Bin ...shot.lz4 => 0000000000000001.snapshot.lz4} | Bin .../0000000000000000.wal.lz4} | Bin .../0000000000000001.wal.lz4} | Bin .../0000000000000000.wal.lz4} | Bin ...shot.lz4 => 0000000000000000.snapshot.lz4} | Bin .../testdata/generations/replica-url/Makefile | 12 ++++---- ...shot.lz4 => 0000000000000000.snapshot.lz4} | Bin ...shot.lz4 => 0000000000000001.snapshot.lz4} | Bin .../0000000000000000.wal.lz4} | Bin .../0000000000000001.wal.lz4} | Bin .../0000000000000000.wal.lz4} | Bin ...shot.lz4 => 0000000000000000.snapshot.lz4} | Bin .../testdata/restore/latest-replica/Makefile | 6 ++-- ...shot.lz4 => 0000000000000000.snapshot.lz4} | Bin ...shot.lz4 => 0000000000000000.snapshot.lz4} | Bin ...shot.lz4 => 0000000000000000.snapshot.lz4} | Bin .../ok/{00000002.db => 0000000000000002.db} | Bin cmd/litestream/testdata/restore/ok/README | 12 ++++---- ...shot.lz4 => 0000000000000000.snapshot.lz4} | Bin .../0000000000000000.wal.lz4} | Bin .../0000000000002050.wal.lz4} | Bin .../0000000000003068.wal.lz4} | Bin .../0000000000000000.wal.lz4} | Bin .../0000000000000000.wal.lz4} | Bin .../0000000000001038.wal.lz4} | Bin ...shot.lz4 => 0000000000000000.snapshot.lz4} | Bin ...shot.lz4 => 0000000000000001.snapshot.lz4} | Bin ...shot.lz4 => 0000000000000000.snapshot.lz4} | Bin cmd/litestream/testdata/snapshots/ok/Makefile | 6 ++-- ...shot.lz4 => 0000000000000000.snapshot.lz4} | Bin ...shot.lz4 => 0000000000000001.snapshot.lz4} | Bin ...shot.lz4 => 0000000000000000.snapshot.lz4} | Bin cmd/litestream/testdata/snapshots/ok/stdout | 8 ++--- .../testdata/snapshots/replica-name/Makefile | 4 +-- ...shot.lz4 => 0000000000000000.snapshot.lz4} | Bin ...shot.lz4 => 0000000000000001.snapshot.lz4} | Bin ...shot.lz4 => 0000000000000000.snapshot.lz4} | Bin .../testdata/snapshots/replica-name/stdout | 4 +-- .../testdata/snapshots/replica-url/Makefile | 6 ++-- ...shot.lz4 => 0000000000000000.snapshot.lz4} | Bin ...shot.lz4 => 0000000000000001.snapshot.lz4} | Bin ...shot.lz4 => 0000000000000000.snapshot.lz4} | Bin .../testdata/snapshots/replica-url/stdout | 8 ++--- cmd/litestream/testdata/wal/ok/Makefile | 8 ++--- .../0000000000000000.wal.lz4} | Bin .../0000000000000001.wal.lz4} | Bin .../0000000000000000.wal.lz4} | Bin .../0000000000000000.wal.lz4} | Bin cmd/litestream/testdata/wal/ok/stdout | 10 +++---- .../testdata/wal/replica-name/Makefile | 8 ++--- .../0000000000000000.wal.lz4} | Bin .../0000000000000001.wal.lz4} | Bin .../0000000000000000.wal.lz4} | Bin .../0000000000000000.wal.lz4} | Bin .../testdata/wal/replica-name/stdout | 4 +-- .../testdata/wal/replica-url/Makefile | 8 ++--- .../0000000000000000.wal.lz4} | Bin .../0000000000000001.wal.lz4} | Bin .../0000000000000000.wal.lz4} | Bin .../0000000000000000.wal.lz4} | Bin .../testdata/wal/replica-url/stdout | 10 +++---- cmd/litestream/wal.go | 6 ++-- db.go | 2 +- file_replica_client.go | 2 +- file_replica_client_test.go | 4 +-- gcs/replica_client.go | 2 -- internal/internal.go | 22 +++++++++----- internal/internal_test.go | 28 ++++++++++-------- litestream.go | 24 +++++++++------ replica.go | 6 ++-- replica_client.go | 6 ++-- replica_client_test.go | 12 ++++---- s3/replica_client.go | 2 -- sftp/replica_client.go | 2 -- testdata/find-latest-generation/ok/Makefile | 10 +++---- ...shot.lz4 => 0000000000000000.snapshot.lz4} | Bin ...shot.lz4 => 0000000000000001.snapshot.lz4} | Bin ...shot.lz4 => 0000000000000000.snapshot.lz4} | Bin ...shot.lz4 => 0000000000000001.snapshot.lz4} | Bin ...shot.lz4 => 0000000000000000.snapshot.lz4} | Bin ...shot.lz4 => 0000000000000bb8.snapshot.lz4} | Bin ...shot.lz4 => 0000000000000000.snapshot.lz4} | Bin ...shot.lz4 => 00000000000003e8.snapshot.lz4} | Bin ...shot.lz4 => 0000000000000bb8.snapshot.lz4} | Bin testdata/generation-time-bounds/ok/Makefile | 10 +++---- ...shot.lz4 => 0000000000000000.snapshot.lz4} | Bin ...shot.lz4 => 0000000000000001.snapshot.lz4} | Bin .../0000000000000000.wal.lz4} | Bin .../0000000000000001.wal.lz4} | Bin .../0000000000000000.wal.lz4} | Bin .../snapshots-only/Makefile | 4 +-- ...shot.lz4 => 0000000000000000.snapshot.lz4} | Bin ...shot.lz4 => 0000000000000001.snapshot.lz4} | Bin ...shot.lz4 => 0000000000000000.snapshot.lz4} | Bin ...shot.lz4 => 0000000000000001.snapshot.lz4} | Bin ...shot.lz4 => 0000000000000000.snapshot.lz4} | Bin ...shot.lz4 => 0000000000000001.snapshot.lz4} | Bin .../0000000000000000.wal.lz4} | Bin .../0000000000001234.wal.lz4} | Bin .../0000000000000000.wal.lz4} | Bin .../0000000000000000.wal.lz4} | Bin ...shot.lz4 => 0000000000000000.snapshot.lz4} | Bin ...shot.lz4 => 0000000000000001.snapshot.lz4} | Bin .../0000000000000000.wal.lz4} | Bin .../0000000000001234.wal.lz4} | Bin testdata/max-snapshot-index/ok/Makefile | 8 ++--- ...shot.lz4 => 0000000000000000.snapshot.lz4} | Bin ...shot.lz4 => 00000000000003e8.snapshot.lz4} | Bin ...shot.lz4 => 00000000000007d0.snapshot.lz4} | Bin .../0000000000000000.wal.lz4} | Bin .../0000000000001234.wal.lz4} | Bin .../0000000000000000.wal.lz4} | Bin .../replica-client-time-bounds/ok/Makefile | 8 ++--- ...shot.lz4 => 0000000000000000.snapshot.lz4} | Bin ...shot.lz4 => 0000000000000000.snapshot.lz4} | Bin ...shot.lz4 => 0000000000000001.snapshot.lz4} | Bin ...shot.lz4 => 0000000000000000.snapshot.lz4} | Bin .../{00000000.db => 0000000000000000.db} | Bin testdata/restore/bad-permissions/README | 12 ++++---- ...shot.lz4 => 0000000000000000.snapshot.lz4} | Bin .../ok/{00000002.db => 0000000000000002.db} | Bin testdata/restore/ok/README | 12 ++++---- ...shot.lz4 => 0000000000000000.snapshot.lz4} | Bin .../0000000000000000.wal.lz4} | Bin .../0000000000002050.wal.lz4} | Bin .../0000000000003068.wal.lz4} | Bin .../0000000000000000.wal.lz4} | Bin .../0000000000000000.wal.lz4} | Bin .../0000000000001038.wal.lz4} | Bin .../{00000000.db => 0000000000000000.db} | Bin ...shot.lz4 => 0000000000000000.snapshot.lz4} | Bin testdata/snapshot-time-bounds/ok/Makefile | 6 ++-- ...shot.lz4 => 0000000000000000.snapshot.lz4} | Bin ...shot.lz4 => 0000000000000001.snapshot.lz4} | Bin ...shot.lz4 => 0000000000000002.snapshot.lz4} | Bin .../0000000000000000.wal.lz4} | Bin .../0000000000000000.wal.lz4} | Bin .../0000000000000000.wal.lz4} | Bin .../0000000000000000.wal.lz4} | Bin .../0000000000002050.wal.lz4} | Bin .../0000000000003068.wal.lz4} | Bin .../0000000000000000.wal.lz4} | Bin .../0000000000000000.wal.lz4} | Bin .../0000000000000000.wal.lz4} | Bin .../0000000000001038.wal.lz4} | Bin .../0000000000000000.wal.lz4} | Bin .../0000000000002050.wal.lz4} | Bin .../0000000000003068.wal.lz4} | Bin .../0000000000000000.wal.lz4} | Bin .../0000000000001038.wal.lz4} | Bin .../0000000000000000.wal.lz4} | Bin .../0000000000003068.wal.lz4} | Bin .../ok/{00000000.wal => 0000000000000000.wal} | Bin .../ok/{00000001.wal => 0000000000000001.wal} | Bin .../ok/{00000002.wal => 0000000000000002.wal} | Bin testdata/wal-downloader/ok/README | 12 ++++---- .../0000000000000000.wal.lz4} | Bin .../0000000000002050.wal.lz4} | Bin .../0000000000003068.wal.lz4} | Bin .../0000000000000000.wal.lz4} | Bin .../0000000000000000.wal.lz4} | Bin .../0000000000001038.wal.lz4} | Bin .../{00000000.wal => 0000000000000000.wal} | Bin testdata/wal-downloader/one/README | 4 +-- .../0000000000000000.wal.lz4} | Bin .../0000000000002050.wal.lz4} | Bin .../0000000000003068.wal.lz4} | Bin testdata/wal-time-bounds/ok/Makefile | 6 ++-- .../0000000000000000.wal.lz4} | Bin .../0000000000000001.wal.lz4} | Bin .../0000000000000000.wal.lz4} | Bin wal_downloader.go | 6 ++-- wal_downloader_test.go | 26 ++++++++-------- 189 files changed, 203 insertions(+), 197 deletions(-) rename cmd/litestream/testdata/generations/no-database/replica/generations/0000000000000000/snapshots/{00000000.snapshot.lz4 => 0000000000000000.snapshot.lz4} (100%) rename cmd/litestream/testdata/generations/no-database/replica/generations/0000000000000001/snapshots/{00000000.snapshot.lz4 => 0000000000000000.snapshot.lz4} (100%) rename cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/snapshots/{00000000.snapshot.lz4 => 0000000000000000.snapshot.lz4} (100%) rename cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/snapshots/{00000001.snapshot.lz4 => 0000000000000001.snapshot.lz4} (100%) rename cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/wal/{00000000/00000000.wal.lz4 => 0000000000000000/0000000000000000.wal.lz4} (100%) rename cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/wal/{00000000/00000001.wal.lz4 => 0000000000000000/0000000000000001.wal.lz4} (100%) rename cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/wal/{00000001/00000000.wal.lz4 => 0000000000000001/0000000000000000.wal.lz4} (100%) rename cmd/litestream/testdata/generations/ok/replica/generations/0000000000000001/snapshots/{00000000.snapshot.lz4 => 0000000000000000.snapshot.lz4} (100%) rename cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/snapshots/{00000000.snapshot.lz4 => 0000000000000000.snapshot.lz4} (100%) rename cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/snapshots/{00000001.snapshot.lz4 => 0000000000000001.snapshot.lz4} (100%) rename cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/wal/{00000000/00000000.wal.lz4 => 0000000000000000/0000000000000000.wal.lz4} (100%) rename cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/wal/{00000000/00000001.wal.lz4 => 0000000000000000/0000000000000001.wal.lz4} (100%) rename cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/wal/{00000001/00000000.wal.lz4 => 0000000000000001/0000000000000000.wal.lz4} (100%) rename cmd/litestream/testdata/generations/replica-name/replica1/generations/0000000000000001/snapshots/{00000000.snapshot.lz4 => 0000000000000000.snapshot.lz4} (100%) rename cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/snapshots/{00000000.snapshot.lz4 => 0000000000000000.snapshot.lz4} (100%) rename cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/snapshots/{00000001.snapshot.lz4 => 0000000000000001.snapshot.lz4} (100%) rename cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/wal/{00000000/00000000.wal.lz4 => 0000000000000000/0000000000000000.wal.lz4} (100%) rename cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/wal/{00000000/00000001.wal.lz4 => 0000000000000000/0000000000000001.wal.lz4} (100%) rename cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/wal/{00000001/00000000.wal.lz4 => 0000000000000001/0000000000000000.wal.lz4} (100%) rename cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000001/snapshots/{00000000.snapshot.lz4 => 0000000000000000.snapshot.lz4} (100%) rename cmd/litestream/testdata/restore/latest-replica/replica0/generations/0000000000000000/snapshots/{00000000.snapshot.lz4 => 0000000000000000.snapshot.lz4} (100%) rename cmd/litestream/testdata/restore/latest-replica/replica0/generations/0000000000000001/snapshots/{00000000.snapshot.lz4 => 0000000000000000.snapshot.lz4} (100%) rename cmd/litestream/testdata/restore/latest-replica/replica1/generations/0000000000000002/snapshots/{00000000.snapshot.lz4 => 0000000000000000.snapshot.lz4} (100%) rename cmd/litestream/testdata/restore/ok/{00000002.db => 0000000000000002.db} (100%) rename cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/snapshots/{00000000.snapshot.lz4 => 0000000000000000.snapshot.lz4} (100%) rename cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/{00000000/00000000.wal.lz4 => 0000000000000000/0000000000000000.wal.lz4} (100%) rename cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/{00000000/00002050.wal.lz4 => 0000000000000000/0000000000002050.wal.lz4} (100%) rename cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/{00000000/00003068.wal.lz4 => 0000000000000000/0000000000003068.wal.lz4} (100%) rename cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/{00000001/00000000.wal.lz4 => 0000000000000001/0000000000000000.wal.lz4} (100%) rename cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/{00000002/00000000.wal.lz4 => 0000000000000002/0000000000000000.wal.lz4} (100%) rename cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/{00000002/00001038.wal.lz4 => 0000000000000002/0000000000001038.wal.lz4} (100%) rename cmd/litestream/testdata/restore/replica-name/replica0/generations/0000000000000000/snapshots/{00000000.snapshot.lz4 => 0000000000000000.snapshot.lz4} (100%) rename cmd/litestream/testdata/restore/replica-name/replica1/generations/0000000000000001/snapshots/{00000001.snapshot.lz4 => 0000000000000001.snapshot.lz4} (100%) rename cmd/litestream/testdata/restore/replica-url/replica/generations/0000000000000000/snapshots/{00000000.snapshot.lz4 => 0000000000000000.snapshot.lz4} (100%) rename cmd/litestream/testdata/snapshots/ok/replica/generations/0000000000000000/snapshots/{00000000.snapshot.lz4 => 0000000000000000.snapshot.lz4} (100%) rename cmd/litestream/testdata/snapshots/ok/replica/generations/0000000000000000/snapshots/{00000001.snapshot.lz4 => 0000000000000001.snapshot.lz4} (100%) rename cmd/litestream/testdata/snapshots/ok/replica/generations/0000000000000001/snapshots/{00000000.snapshot.lz4 => 0000000000000000.snapshot.lz4} (100%) rename cmd/litestream/testdata/snapshots/replica-name/replica0/generations/0000000000000000/snapshots/{00000000.snapshot.lz4 => 0000000000000000.snapshot.lz4} (100%) rename cmd/litestream/testdata/snapshots/replica-name/replica0/generations/0000000000000000/snapshots/{00000001.snapshot.lz4 => 0000000000000001.snapshot.lz4} (100%) rename cmd/litestream/testdata/snapshots/replica-name/replica1/generations/0000000000000001/snapshots/{00000000.snapshot.lz4 => 0000000000000000.snapshot.lz4} (100%) rename cmd/litestream/testdata/snapshots/replica-url/replica/generations/0000000000000000/snapshots/{00000000.snapshot.lz4 => 0000000000000000.snapshot.lz4} (100%) rename cmd/litestream/testdata/snapshots/replica-url/replica/generations/0000000000000000/snapshots/{00000001.snapshot.lz4 => 0000000000000001.snapshot.lz4} (100%) rename cmd/litestream/testdata/snapshots/replica-url/replica/generations/0000000000000001/snapshots/{00000000.snapshot.lz4 => 0000000000000000.snapshot.lz4} (100%) rename cmd/litestream/testdata/wal/ok/replica/generations/0000000000000000/wal/{00000000/00000000.wal.lz4 => 0000000000000000/0000000000000000.wal.lz4} (100%) rename cmd/litestream/testdata/wal/ok/replica/generations/0000000000000000/wal/{00000000/00000001.wal.lz4 => 0000000000000000/0000000000000001.wal.lz4} (100%) rename cmd/litestream/testdata/wal/ok/replica/generations/0000000000000000/wal/{00000001/00000000.wal.lz4 => 0000000000000001/0000000000000000.wal.lz4} (100%) rename cmd/litestream/testdata/wal/ok/replica/generations/0000000000000001/wal/{00000000/00000000.wal.lz4 => 0000000000000000/0000000000000000.wal.lz4} (100%) rename cmd/litestream/testdata/wal/replica-name/replica0/generations/0000000000000000/wal/{00000000/00000000.wal.lz4 => 0000000000000000/0000000000000000.wal.lz4} (100%) rename cmd/litestream/testdata/wal/replica-name/replica0/generations/0000000000000000/wal/{00000000/00000001.wal.lz4 => 0000000000000000/0000000000000001.wal.lz4} (100%) rename cmd/litestream/testdata/wal/replica-name/replica0/generations/0000000000000000/wal/{00000001/00000000.wal.lz4 => 0000000000000001/0000000000000000.wal.lz4} (100%) rename cmd/litestream/testdata/wal/replica-name/replica1/generations/0000000000000001/wal/{00000000/00000000.wal.lz4 => 0000000000000000/0000000000000000.wal.lz4} (100%) rename cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000000/wal/{00000000/00000000.wal.lz4 => 0000000000000000/0000000000000000.wal.lz4} (100%) rename cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000000/wal/{00000000/00000001.wal.lz4 => 0000000000000000/0000000000000001.wal.lz4} (100%) rename cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000000/wal/{00000001/00000000.wal.lz4 => 0000000000000001/0000000000000000.wal.lz4} (100%) rename cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000001/wal/{00000000/00000000.wal.lz4 => 0000000000000000/0000000000000000.wal.lz4} (100%) rename testdata/find-latest-generation/ok/generations/0000000000000000/snapshots/{00000000.snapshot.lz4 => 0000000000000000.snapshot.lz4} (100%) rename testdata/find-latest-generation/ok/generations/0000000000000000/snapshots/{00000001.snapshot.lz4 => 0000000000000001.snapshot.lz4} (100%) rename testdata/find-latest-generation/ok/generations/0000000000000001/snapshots/{00000000.snapshot.lz4 => 0000000000000000.snapshot.lz4} (100%) rename testdata/find-latest-generation/ok/generations/0000000000000001/snapshots/{00000001.snapshot.lz4 => 0000000000000001.snapshot.lz4} (100%) rename testdata/find-latest-generation/ok/generations/0000000000000002/snapshots/{00000000.snapshot.lz4 => 0000000000000000.snapshot.lz4} (100%) rename testdata/find-snapshot-for-index/no-snapshots-before-index/generations/0000000000000000/snapshots/{00000bb8.snapshot.lz4 => 0000000000000bb8.snapshot.lz4} (100%) rename testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/{00000000.snapshot.lz4 => 0000000000000000.snapshot.lz4} (100%) rename testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/{000003e8.snapshot.lz4 => 00000000000003e8.snapshot.lz4} (100%) rename testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/{00000bb8.snapshot.lz4 => 0000000000000bb8.snapshot.lz4} (100%) rename testdata/generation-time-bounds/ok/generations/0000000000000000/snapshots/{00000000.snapshot.lz4 => 0000000000000000.snapshot.lz4} (100%) rename testdata/generation-time-bounds/ok/generations/0000000000000000/snapshots/{00000001.snapshot.lz4 => 0000000000000001.snapshot.lz4} (100%) rename testdata/generation-time-bounds/ok/generations/0000000000000000/wal/{00000000/00000000.wal.lz4 => 0000000000000000/0000000000000000.wal.lz4} (100%) rename testdata/generation-time-bounds/ok/generations/0000000000000000/wal/{00000000/00000001.wal.lz4 => 0000000000000000/0000000000000001.wal.lz4} (100%) rename testdata/generation-time-bounds/ok/generations/0000000000000000/wal/{00000001/00000000.wal.lz4 => 0000000000000001/0000000000000000.wal.lz4} (100%) rename testdata/generation-time-bounds/snapshots-only/generations/0000000000000000/snapshots/{00000000.snapshot.lz4 => 0000000000000000.snapshot.lz4} (100%) rename testdata/generation-time-bounds/snapshots-only/generations/0000000000000000/snapshots/{00000001.snapshot.lz4 => 0000000000000001.snapshot.lz4} (100%) rename testdata/max-index/no-wal/generations/0000000000000000/snapshots/{00000000.snapshot.lz4 => 0000000000000000.snapshot.lz4} (100%) rename testdata/max-index/no-wal/generations/0000000000000000/snapshots/{00000001.snapshot.lz4 => 0000000000000001.snapshot.lz4} (100%) rename testdata/max-index/ok/generations/0000000000000000/snapshots/{00000000.snapshot.lz4 => 0000000000000000.snapshot.lz4} (100%) rename testdata/max-index/ok/generations/0000000000000000/snapshots/{00000001.snapshot.lz4 => 0000000000000001.snapshot.lz4} (100%) rename testdata/max-index/ok/generations/0000000000000000/wal/{00000000/00000000.wal.lz4 => 0000000000000000/0000000000000000.wal.lz4} (100%) rename testdata/max-index/ok/generations/0000000000000000/wal/{00000000/00001234.wal.lz4 => 0000000000000000/0000000000001234.wal.lz4} (100%) rename testdata/max-index/ok/generations/0000000000000000/wal/{00000001/00000000.wal.lz4 => 0000000000000001/0000000000000000.wal.lz4} (100%) rename testdata/max-index/ok/generations/0000000000000000/wal/{00000002/00000000.wal.lz4 => 0000000000000002/0000000000000000.wal.lz4} (100%) rename testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/snapshots/{00000000.snapshot.lz4 => 0000000000000000.snapshot.lz4} (100%) rename testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/snapshots/{00000001.snapshot.lz4 => 0000000000000001.snapshot.lz4} (100%) rename testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/wal/{00000000/00000000.wal.lz4 => 0000000000000000/0000000000000000.wal.lz4} (100%) rename testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/wal/{00000000/00001234.wal.lz4 => 0000000000000000/0000000000001234.wal.lz4} (100%) rename testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/{00000000.snapshot.lz4 => 0000000000000000.snapshot.lz4} (100%) rename testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/{000003e8.snapshot.lz4 => 00000000000003e8.snapshot.lz4} (100%) rename testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/{000007d0.snapshot.lz4 => 00000000000007d0.snapshot.lz4} (100%) rename testdata/max-wal-index/ok/generations/0000000000000000/wal/{00000000/00000000.wal.lz4 => 0000000000000000/0000000000000000.wal.lz4} (100%) rename testdata/max-wal-index/ok/generations/0000000000000000/wal/{00000000/00001234.wal.lz4 => 0000000000000000/0000000000001234.wal.lz4} (100%) rename testdata/max-wal-index/ok/generations/0000000000000000/wal/{00000001/00000000.wal.lz4 => 0000000000000001/0000000000000000.wal.lz4} (100%) rename testdata/replica-client-time-bounds/ok/generations/0000000000000000/snapshots/{00000000.snapshot.lz4 => 0000000000000000.snapshot.lz4} (100%) rename testdata/replica-client-time-bounds/ok/generations/0000000000000001/snapshots/{00000000.snapshot.lz4 => 0000000000000000.snapshot.lz4} (100%) rename testdata/replica-client-time-bounds/ok/generations/0000000000000001/snapshots/{00000001.snapshot.lz4 => 0000000000000001.snapshot.lz4} (100%) rename testdata/replica-client-time-bounds/ok/generations/0000000000000002/snapshots/{00000000.snapshot.lz4 => 0000000000000000.snapshot.lz4} (100%) rename testdata/restore/bad-permissions/{00000000.db => 0000000000000000.db} (100%) rename testdata/restore/bad-permissions/generations/0000000000000000/snapshots/{00000000.snapshot.lz4 => 0000000000000000.snapshot.lz4} (100%) rename testdata/restore/ok/{00000002.db => 0000000000000002.db} (100%) rename testdata/restore/ok/generations/0000000000000000/snapshots/{00000000.snapshot.lz4 => 0000000000000000.snapshot.lz4} (100%) rename testdata/restore/ok/generations/0000000000000000/wal/{00000000/00000000.wal.lz4 => 0000000000000000/0000000000000000.wal.lz4} (100%) rename testdata/restore/ok/generations/0000000000000000/wal/{00000000/00002050.wal.lz4 => 0000000000000000/0000000000002050.wal.lz4} (100%) rename testdata/restore/ok/generations/0000000000000000/wal/{00000000/00003068.wal.lz4 => 0000000000000000/0000000000003068.wal.lz4} (100%) rename testdata/restore/ok/generations/0000000000000000/wal/{00000001/00000000.wal.lz4 => 0000000000000001/0000000000000000.wal.lz4} (100%) rename testdata/restore/ok/generations/0000000000000000/wal/{00000002/00000000.wal.lz4 => 0000000000000002/0000000000000000.wal.lz4} (100%) rename testdata/restore/ok/generations/0000000000000000/wal/{00000002/00001038.wal.lz4 => 0000000000000002/0000000000001038.wal.lz4} (100%) rename testdata/restore/snapshot-only/{00000000.db => 0000000000000000.db} (100%) rename testdata/restore/snapshot-only/generations/0000000000000000/snapshots/{00000000.snapshot.lz4 => 0000000000000000.snapshot.lz4} (100%) rename testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/{00000000.snapshot.lz4 => 0000000000000000.snapshot.lz4} (100%) rename testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/{00000001.snapshot.lz4 => 0000000000000001.snapshot.lz4} (100%) rename testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/{00000002.snapshot.lz4 => 0000000000000002.snapshot.lz4} (100%) rename testdata/wal-downloader/err-download-wal/generations/0000000000000000/wal/{00000000/00000000.wal.lz4 => 0000000000000000/0000000000000000.wal.lz4} (100%) rename testdata/wal-downloader/err-read-wal-segment/generations/0000000000000000/wal/{00000000/00000000.wal.lz4 => 0000000000000000/0000000000000000.wal.lz4} (100%) rename testdata/wal-downloader/err-write-wal/generations/0000000000000000/wal/{00000000/00000000.wal.lz4 => 0000000000000000/0000000000000000.wal.lz4} (100%) rename testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/{00000000/00000000.wal.lz4 => 0000000000000000/0000000000000000.wal.lz4} (100%) rename testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/{00000000/00002050.wal.lz4 => 0000000000000000/0000000000002050.wal.lz4} (100%) rename testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/{00000000/00003068.wal.lz4 => 0000000000000000/0000000000003068.wal.lz4} (100%) rename testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/{00000001/00000000.wal.lz4 => 0000000000000001/0000000000000000.wal.lz4} (100%) rename testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/{00000001/00000000.wal.lz4 => 0000000000000001/0000000000000000.wal.lz4} (100%) rename testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/{00000002/00000000.wal.lz4 => 0000000000000002/0000000000000000.wal.lz4} (100%) rename testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/{00000002/00001038.wal.lz4 => 0000000000000002/0000000000001038.wal.lz4} (100%) rename testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/{00000000/00000000.wal.lz4 => 0000000000000000/0000000000000000.wal.lz4} (100%) rename testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/{00000000/00002050.wal.lz4 => 0000000000000000/0000000000002050.wal.lz4} (100%) rename testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/{00000000/00003068.wal.lz4 => 0000000000000000/0000000000003068.wal.lz4} (100%) rename testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/{00000002/00000000.wal.lz4 => 0000000000000002/0000000000000000.wal.lz4} (100%) rename testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/{00000002/00001038.wal.lz4 => 0000000000000002/0000000000001038.wal.lz4} (100%) rename testdata/wal-downloader/missing-offset/generations/0000000000000000/wal/{00000000/00000000.wal.lz4 => 0000000000000000/0000000000000000.wal.lz4} (100%) rename testdata/wal-downloader/missing-offset/generations/0000000000000000/wal/{00000000/00003068.wal.lz4 => 0000000000000000/0000000000003068.wal.lz4} (100%) rename testdata/wal-downloader/ok/{00000000.wal => 0000000000000000.wal} (100%) rename testdata/wal-downloader/ok/{00000001.wal => 0000000000000001.wal} (100%) rename testdata/wal-downloader/ok/{00000002.wal => 0000000000000002.wal} (100%) rename testdata/wal-downloader/ok/generations/0000000000000000/wal/{00000000/00000000.wal.lz4 => 0000000000000000/0000000000000000.wal.lz4} (100%) rename testdata/wal-downloader/ok/generations/0000000000000000/wal/{00000000/00002050.wal.lz4 => 0000000000000000/0000000000002050.wal.lz4} (100%) rename testdata/wal-downloader/ok/generations/0000000000000000/wal/{00000000/00003068.wal.lz4 => 0000000000000000/0000000000003068.wal.lz4} (100%) rename testdata/wal-downloader/ok/generations/0000000000000000/wal/{00000001/00000000.wal.lz4 => 0000000000000001/0000000000000000.wal.lz4} (100%) rename testdata/wal-downloader/ok/generations/0000000000000000/wal/{00000002/00000000.wal.lz4 => 0000000000000002/0000000000000000.wal.lz4} (100%) rename testdata/wal-downloader/ok/generations/0000000000000000/wal/{00000002/00001038.wal.lz4 => 0000000000000002/0000000000001038.wal.lz4} (100%) rename testdata/wal-downloader/one/{00000000.wal => 0000000000000000.wal} (100%) rename testdata/wal-downloader/one/generations/0000000000000000/wal/{00000000/00000000.wal.lz4 => 0000000000000000/0000000000000000.wal.lz4} (100%) rename testdata/wal-downloader/one/generations/0000000000000000/wal/{00000000/00002050.wal.lz4 => 0000000000000000/0000000000002050.wal.lz4} (100%) rename testdata/wal-downloader/one/generations/0000000000000000/wal/{00000000/00003068.wal.lz4 => 0000000000000000/0000000000003068.wal.lz4} (100%) rename testdata/wal-time-bounds/ok/generations/0000000000000000/wal/{00000000/00000000.wal.lz4 => 0000000000000000/0000000000000000.wal.lz4} (100%) rename testdata/wal-time-bounds/ok/generations/0000000000000000/wal/{00000000/00000001.wal.lz4 => 0000000000000000/0000000000000001.wal.lz4} (100%) rename testdata/wal-time-bounds/ok/generations/0000000000000000/wal/{00000001/00000000.wal.lz4 => 0000000000000001/0000000000000000.wal.lz4} (100%) diff --git a/abs/replica_client.go b/abs/replica_client.go index 551f638a..394c76a5 100644 --- a/abs/replica_client.go +++ b/abs/replica_client.go @@ -190,8 +190,6 @@ func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, in internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "PUT").Inc() internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "PUT").Add(float64(rc.N())) - // log.Printf("%s(%s): snapshot: creating %s/%08x t=%s", r.db.Path(), r.Name(), generation, index, time.Since(startTime).Truncate(time.Millisecond)) - return litestream.SnapshotInfo{ Generation: generation, Index: index, diff --git a/cmd/litestream/main.go b/cmd/litestream/main.go index fa43f4dc..490ba187 100644 --- a/cmd/litestream/main.go +++ b/cmd/litestream/main.go @@ -718,7 +718,7 @@ var _ flag.Value = (*indexVar)(nil) // String returns an 8-character hexadecimal value. func (v *indexVar) String() string { - return fmt.Sprintf("%08x", int(*v)) + return litestream.FormatIndex(int(*v)) } // Set parses s into an integer from a hexadecimal value. diff --git a/cmd/litestream/restore_test.go b/cmd/litestream/restore_test.go index 9469b5a1..6744963c 100644 --- a/cmd/litestream/restore_test.go +++ b/cmd/litestream/restore_test.go @@ -27,10 +27,10 @@ func TestRestoreCommand(t *testing.T) { // STDOUT has timing info so we need to grep per line. lines := strings.Split(stdout.String(), "\n") for i, substr := range []string{ - `restoring snapshot 0000000000000000/00000000 to ` + filepath.Join(tempDir, "db.tmp"), - `applied wal 0000000000000000/00000000 elapsed=`, - `applied wal 0000000000000000/00000001 elapsed=`, - `applied wal 0000000000000000/00000002 elapsed=`, + `restoring snapshot 0000000000000000/0000000000000000 to ` + filepath.Join(tempDir, "db.tmp"), + `applied wal 0000000000000000/0000000000000000 elapsed=`, + `applied wal 0000000000000000/0000000000000001 elapsed=`, + `applied wal 0000000000000000/0000000000000002 elapsed=`, `renaming database from temporary location`, } { if !strings.Contains(lines[i], substr) { @@ -54,7 +54,7 @@ func TestRestoreCommand(t *testing.T) { // STDOUT has timing info so we need to grep per line. lines := strings.Split(stdout.String(), "\n") for i, substr := range []string{ - `restoring snapshot 0000000000000001/00000001 to ` + filepath.Join(tempDir, "db.tmp"), + `restoring snapshot 0000000000000001/0000000000000001 to ` + filepath.Join(tempDir, "db.tmp"), `no wal files found, snapshot only`, `renaming database from temporary location`, } { @@ -78,7 +78,7 @@ func TestRestoreCommand(t *testing.T) { lines := strings.Split(stdout.String(), "\n") for i, substr := range []string{ - `restoring snapshot 0000000000000000/00000000 to ` + filepath.Join(tempDir, "db.tmp"), + `restoring snapshot 0000000000000000/0000000000000000 to ` + filepath.Join(tempDir, "db.tmp"), `no wal files found, snapshot only`, `renaming database from temporary location`, } { @@ -102,7 +102,7 @@ func TestRestoreCommand(t *testing.T) { lines := strings.Split(stdout.String(), "\n") for i, substr := range []string{ - `restoring snapshot 0000000000000001/00000000 to ` + filepath.Join(tempDir, "db.tmp"), + `restoring snapshot 0000000000000001/0000000000000000 to ` + filepath.Join(tempDir, "db.tmp"), `no wal files found, snapshot only`, `renaming database from temporary location`, } { @@ -256,7 +256,7 @@ func TestRestoreCommand(t *testing.T) { t.Run("ErrInvalidReplicaURL", func(t *testing.T) { m, _, _, _ := newMain() - err := m.Run(context.Background(), []string{"restore", "-o", "/tmp/db", "xyz://xyz"}) + err := m.Run(context.Background(), []string{"restore", "-o", filepath.Join(t.TempDir(), "db"), "xyz://xyz"}) if err == nil || err.Error() != `unknown replica type in config: "xyz"` { t.Fatalf("unexpected error: %s", err) } diff --git a/cmd/litestream/snapshots.go b/cmd/litestream/snapshots.go index c274f3e9..c8309bae 100644 --- a/cmd/litestream/snapshots.go +++ b/cmd/litestream/snapshots.go @@ -83,10 +83,10 @@ func (c *SnapshotsCommand) Run(ctx context.Context, args []string) (ret error) { fmt.Fprintln(w, "replica\tgeneration\tindex\tsize\tcreated") for _, info := range infos { - fmt.Fprintf(w, "%s\t%s\t%08x\t%d\t%s\n", + fmt.Fprintf(w, "%s\t%s\t%s\t%d\t%s\n", info.replicaName, info.Generation, - info.Index, + litestream.FormatIndex(info.Index), info.Size, info.CreatedAt.Format(time.RFC3339), ) diff --git a/cmd/litestream/testdata/generations/no-database/Makefile b/cmd/litestream/testdata/generations/no-database/Makefile index 793e5cd3..61fea093 100644 --- a/cmd/litestream/testdata/generations/no-database/Makefile +++ b/cmd/litestream/testdata/generations/no-database/Makefile @@ -1,4 +1,4 @@ .PHONY: default default: - TZ=UTC touch -ct 200001010000 replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 - TZ=UTC touch -ct 200001020000 replica/generations/0000000000000001/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -ct 200001010000 replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 replica/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/generations/no-database/replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/cmd/litestream/testdata/generations/no-database/replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 similarity index 100% rename from cmd/litestream/testdata/generations/no-database/replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 rename to cmd/litestream/testdata/generations/no-database/replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/generations/no-database/replica/generations/0000000000000001/snapshots/00000000.snapshot.lz4 b/cmd/litestream/testdata/generations/no-database/replica/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 similarity index 100% rename from cmd/litestream/testdata/generations/no-database/replica/generations/0000000000000001/snapshots/00000000.snapshot.lz4 rename to cmd/litestream/testdata/generations/no-database/replica/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/generations/ok/Makefile b/cmd/litestream/testdata/generations/ok/Makefile index 51f53943..d18db159 100644 --- a/cmd/litestream/testdata/generations/ok/Makefile +++ b/cmd/litestream/testdata/generations/ok/Makefile @@ -1,9 +1,9 @@ .PHONY: default default: TZ=UTC touch -ct 200001030000 db - TZ=UTC touch -ct 200001010000 replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 - TZ=UTC touch -ct 200001020000 replica/generations/0000000000000000/snapshots/00000001.snapshot.lz4 - TZ=UTC touch -ct 200001010000 replica/generations/0000000000000000/wal/00000000/00000000.wal.lz4 - TZ=UTC touch -ct 200001020000 replica/generations/0000000000000000/wal/00000000/00000001.wal.lz4 - TZ=UTC touch -ct 200001030000 replica/generations/0000000000000000/wal/00000001/00000000.wal.lz4 - TZ=UTC touch -ct 200001010000 replica/generations/0000000000000001/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -ct 200001010000 replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 replica/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 + TZ=UTC touch -ct 200001010000 replica/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 + TZ=UTC touch -ct 200001020000 replica/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 + TZ=UTC touch -ct 200001030000 replica/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 + TZ=UTC touch -ct 200001010000 replica/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 similarity index 100% rename from cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 rename to cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/snapshots/00000001.snapshot.lz4 b/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 similarity index 100% rename from cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/snapshots/00000001.snapshot.lz4 rename to cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 diff --git a/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 similarity index 100% rename from cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/wal/00000000/00000000.wal.lz4 rename to cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 diff --git a/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/wal/00000000/00000001.wal.lz4 b/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 similarity index 100% rename from cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/wal/00000000/00000001.wal.lz4 rename to cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 diff --git a/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/wal/00000001/00000000.wal.lz4 b/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 similarity index 100% rename from cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/wal/00000001/00000000.wal.lz4 rename to cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 diff --git a/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000001/snapshots/00000000.snapshot.lz4 b/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 similarity index 100% rename from cmd/litestream/testdata/generations/ok/replica/generations/0000000000000001/snapshots/00000000.snapshot.lz4 rename to cmd/litestream/testdata/generations/ok/replica/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/generations/replica-name/Makefile b/cmd/litestream/testdata/generations/replica-name/Makefile index f6a5eaed..aaab4b13 100644 --- a/cmd/litestream/testdata/generations/replica-name/Makefile +++ b/cmd/litestream/testdata/generations/replica-name/Makefile @@ -1,5 +1,5 @@ .PHONY: default default: TZ=UTC touch -ct 200001030000 db - TZ=UTC touch -ct 200001010000 replica0/generations/0000000000000000/snapshots/00000000.snapshot.lz4 - TZ=UTC touch -ct 200001020000 replica1/generations/0000000000000001/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -ct 200001010000 replica0/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 replica1/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 similarity index 100% rename from cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/snapshots/00000000.snapshot.lz4 rename to cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/snapshots/00000001.snapshot.lz4 b/cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 similarity index 100% rename from cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/snapshots/00000001.snapshot.lz4 rename to cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 diff --git a/cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 similarity index 100% rename from cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/wal/00000000/00000000.wal.lz4 rename to cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 diff --git a/cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/wal/00000000/00000001.wal.lz4 b/cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 similarity index 100% rename from cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/wal/00000000/00000001.wal.lz4 rename to cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 diff --git a/cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/wal/00000001/00000000.wal.lz4 b/cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 similarity index 100% rename from cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/wal/00000001/00000000.wal.lz4 rename to cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 diff --git a/cmd/litestream/testdata/generations/replica-name/replica1/generations/0000000000000001/snapshots/00000000.snapshot.lz4 b/cmd/litestream/testdata/generations/replica-name/replica1/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 similarity index 100% rename from cmd/litestream/testdata/generations/replica-name/replica1/generations/0000000000000001/snapshots/00000000.snapshot.lz4 rename to cmd/litestream/testdata/generations/replica-name/replica1/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/generations/replica-url/Makefile b/cmd/litestream/testdata/generations/replica-url/Makefile index 3125ed28..01edcf5d 100644 --- a/cmd/litestream/testdata/generations/replica-url/Makefile +++ b/cmd/litestream/testdata/generations/replica-url/Makefile @@ -1,9 +1,9 @@ .PHONY: default default: - TZ=UTC touch -ct 200001010000 replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 - TZ=UTC touch -ct 200001020000 replica/generations/0000000000000000/snapshots/00000001.snapshot.lz4 - TZ=UTC touch -ct 200001010000 replica/generations/0000000000000000/wal/00000000/00000000.wal.lz4 - TZ=UTC touch -ct 200001020000 replica/generations/0000000000000000/wal/00000000/00000001.wal.lz4 - TZ=UTC touch -ct 200001030000 replica/generations/0000000000000000/wal/00000001/00000000.wal.lz4 - TZ=UTC touch -ct 200001020000 replica/generations/0000000000000001/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -ct 200001010000 replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 replica/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 + TZ=UTC touch -ct 200001010000 replica/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 + TZ=UTC touch -ct 200001020000 replica/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 + TZ=UTC touch -ct 200001030000 replica/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 + TZ=UTC touch -ct 200001020000 replica/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 similarity index 100% rename from cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 rename to cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/snapshots/00000001.snapshot.lz4 b/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 similarity index 100% rename from cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/snapshots/00000001.snapshot.lz4 rename to cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 diff --git a/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 similarity index 100% rename from cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/wal/00000000/00000000.wal.lz4 rename to cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 diff --git a/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/wal/00000000/00000001.wal.lz4 b/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 similarity index 100% rename from cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/wal/00000000/00000001.wal.lz4 rename to cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 diff --git a/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/wal/00000001/00000000.wal.lz4 b/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 similarity index 100% rename from cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/wal/00000001/00000000.wal.lz4 rename to cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 diff --git a/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000001/snapshots/00000000.snapshot.lz4 b/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 similarity index 100% rename from cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000001/snapshots/00000000.snapshot.lz4 rename to cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/restore/latest-replica/Makefile b/cmd/litestream/testdata/restore/latest-replica/Makefile index a8a9885f..24334a30 100644 --- a/cmd/litestream/testdata/restore/latest-replica/Makefile +++ b/cmd/litestream/testdata/restore/latest-replica/Makefile @@ -1,6 +1,6 @@ .PHONY: default default: - TZ=UTC touch -ct 200001010000 replica0/generations/0000000000000000/snapshots/00000000.snapshot.lz4 - TZ=UTC touch -ct 200001020000 replica1/generations/0000000000000002/snapshots/00000000.snapshot.lz4 - TZ=UTC touch -ct 200001030000 replica0/generations/0000000000000001/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -ct 200001010000 replica0/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 replica1/generations/0000000000000002/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001030000 replica0/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/restore/latest-replica/replica0/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/cmd/litestream/testdata/restore/latest-replica/replica0/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 similarity index 100% rename from cmd/litestream/testdata/restore/latest-replica/replica0/generations/0000000000000000/snapshots/00000000.snapshot.lz4 rename to cmd/litestream/testdata/restore/latest-replica/replica0/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/restore/latest-replica/replica0/generations/0000000000000001/snapshots/00000000.snapshot.lz4 b/cmd/litestream/testdata/restore/latest-replica/replica0/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 similarity index 100% rename from cmd/litestream/testdata/restore/latest-replica/replica0/generations/0000000000000001/snapshots/00000000.snapshot.lz4 rename to cmd/litestream/testdata/restore/latest-replica/replica0/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/restore/latest-replica/replica1/generations/0000000000000002/snapshots/00000000.snapshot.lz4 b/cmd/litestream/testdata/restore/latest-replica/replica1/generations/0000000000000002/snapshots/0000000000000000.snapshot.lz4 similarity index 100% rename from cmd/litestream/testdata/restore/latest-replica/replica1/generations/0000000000000002/snapshots/00000000.snapshot.lz4 rename to cmd/litestream/testdata/restore/latest-replica/replica1/generations/0000000000000002/snapshots/0000000000000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/restore/ok/00000002.db b/cmd/litestream/testdata/restore/ok/0000000000000002.db similarity index 100% rename from cmd/litestream/testdata/restore/ok/00000002.db rename to cmd/litestream/testdata/restore/ok/0000000000000002.db diff --git a/cmd/litestream/testdata/restore/ok/README b/cmd/litestream/testdata/restore/ok/README index 9450f45a..48c0fd4e 100644 --- a/cmd/litestream/testdata/restore/ok/README +++ b/cmd/litestream/testdata/restore/ok/README @@ -5,9 +5,9 @@ To reproduce this testdata, run sqlite3 and execute: INSERT INTO t (x) VALUES (1); INSERT INTO t (x) VALUES (2); - sl3 split -o generations/0000000000000000/wal/00000000 db-wal - cp db generations/0000000000000000/snapshots/00000000.snapshot - lz4 -c --rm generations/0000000000000000/snapshots/00000000.snapshot + sl3 split -o generations/0000000000000000/wal/0000000000000000 db-wal + cp db generations/0000000000000000/snapshots/0000000000000000.snapshot + lz4 -c --rm generations/0000000000000000/snapshots/0000000000000000.snapshot Then execute: @@ -15,7 +15,7 @@ Then execute: PRAGMA wal_checkpoint(TRUNCATE); INSERT INTO t (x) VALUES (3); - sl3 split -o generations/0000000000000000/wal/00000001 db-wal + sl3 split -o generations/0000000000000000/wal/0000000000000001 db-wal Then execute: @@ -24,13 +24,13 @@ Then execute: INSERT INTO t (x) VALUES (4); INSERT INTO t (x) VALUES (5); - sl3 split -o generations/0000000000000000/wal/00000002 db-wal + sl3 split -o generations/0000000000000000/wal/0000000000000002 db-wal Finally, obtain the final snapshot: PRAGMA wal_checkpoint(TRUNCATE); - cp db 00000002.db + cp db 0000000000000002.db rm db* diff --git a/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 similarity index 100% rename from cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 rename to cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 similarity index 100% rename from cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/00000000/00000000.wal.lz4 rename to cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 diff --git a/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/00000000/00002050.wal.lz4 b/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/0000000000000000/0000000000002050.wal.lz4 similarity index 100% rename from cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/00000000/00002050.wal.lz4 rename to cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/0000000000000000/0000000000002050.wal.lz4 diff --git a/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/00000000/00003068.wal.lz4 b/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/0000000000000000/0000000000003068.wal.lz4 similarity index 100% rename from cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/00000000/00003068.wal.lz4 rename to cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/0000000000000000/0000000000003068.wal.lz4 diff --git a/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/00000001/00000000.wal.lz4 b/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 similarity index 100% rename from cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/00000001/00000000.wal.lz4 rename to cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 diff --git a/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/00000002/00000000.wal.lz4 b/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 similarity index 100% rename from cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/00000002/00000000.wal.lz4 rename to cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 diff --git a/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/00000002/00001038.wal.lz4 b/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/0000000000000002/0000000000001038.wal.lz4 similarity index 100% rename from cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/00000002/00001038.wal.lz4 rename to cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/0000000000000002/0000000000001038.wal.lz4 diff --git a/cmd/litestream/testdata/restore/replica-name/replica0/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/cmd/litestream/testdata/restore/replica-name/replica0/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 similarity index 100% rename from cmd/litestream/testdata/restore/replica-name/replica0/generations/0000000000000000/snapshots/00000000.snapshot.lz4 rename to cmd/litestream/testdata/restore/replica-name/replica0/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/restore/replica-name/replica1/generations/0000000000000001/snapshots/00000001.snapshot.lz4 b/cmd/litestream/testdata/restore/replica-name/replica1/generations/0000000000000001/snapshots/0000000000000001.snapshot.lz4 similarity index 100% rename from cmd/litestream/testdata/restore/replica-name/replica1/generations/0000000000000001/snapshots/00000001.snapshot.lz4 rename to cmd/litestream/testdata/restore/replica-name/replica1/generations/0000000000000001/snapshots/0000000000000001.snapshot.lz4 diff --git a/cmd/litestream/testdata/restore/replica-url/replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/cmd/litestream/testdata/restore/replica-url/replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 similarity index 100% rename from cmd/litestream/testdata/restore/replica-url/replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 rename to cmd/litestream/testdata/restore/replica-url/replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/snapshots/ok/Makefile b/cmd/litestream/testdata/snapshots/ok/Makefile index 866903e6..739022ae 100644 --- a/cmd/litestream/testdata/snapshots/ok/Makefile +++ b/cmd/litestream/testdata/snapshots/ok/Makefile @@ -1,6 +1,6 @@ .PHONY: default default: - TZ=UTC touch -ct 200001010000 replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 - TZ=UTC touch -ct 200001020000 replica/generations/0000000000000000/snapshots/00000001.snapshot.lz4 - TZ=UTC touch -ct 200001030000 replica/generations/0000000000000001/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -ct 200001010000 replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 replica/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 + TZ=UTC touch -ct 200001030000 replica/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/snapshots/ok/replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/cmd/litestream/testdata/snapshots/ok/replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 similarity index 100% rename from cmd/litestream/testdata/snapshots/ok/replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 rename to cmd/litestream/testdata/snapshots/ok/replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/snapshots/ok/replica/generations/0000000000000000/snapshots/00000001.snapshot.lz4 b/cmd/litestream/testdata/snapshots/ok/replica/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 similarity index 100% rename from cmd/litestream/testdata/snapshots/ok/replica/generations/0000000000000000/snapshots/00000001.snapshot.lz4 rename to cmd/litestream/testdata/snapshots/ok/replica/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 diff --git a/cmd/litestream/testdata/snapshots/ok/replica/generations/0000000000000001/snapshots/00000000.snapshot.lz4 b/cmd/litestream/testdata/snapshots/ok/replica/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 similarity index 100% rename from cmd/litestream/testdata/snapshots/ok/replica/generations/0000000000000001/snapshots/00000000.snapshot.lz4 rename to cmd/litestream/testdata/snapshots/ok/replica/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/snapshots/ok/stdout b/cmd/litestream/testdata/snapshots/ok/stdout index 270a2524..604aefcc 100644 --- a/cmd/litestream/testdata/snapshots/ok/stdout +++ b/cmd/litestream/testdata/snapshots/ok/stdout @@ -1,4 +1,4 @@ -replica generation index size created -file 0000000000000001 00000000 93 2000-01-03T00:00:00Z -file 0000000000000000 00000001 93 2000-01-02T00:00:00Z -file 0000000000000000 00000000 93 2000-01-01T00:00:00Z +replica generation index size created +file 0000000000000001 0000000000000000 93 2000-01-03T00:00:00Z +file 0000000000000000 0000000000000001 93 2000-01-02T00:00:00Z +file 0000000000000000 0000000000000000 93 2000-01-01T00:00:00Z diff --git a/cmd/litestream/testdata/snapshots/replica-name/Makefile b/cmd/litestream/testdata/snapshots/replica-name/Makefile index 050a241b..c7399029 100644 --- a/cmd/litestream/testdata/snapshots/replica-name/Makefile +++ b/cmd/litestream/testdata/snapshots/replica-name/Makefile @@ -1,4 +1,4 @@ .PHONY: default default: - TZ=UTC touch -ct 200001010000 replica0/generations/0000000000000000/snapshots/00000000.snapshot.lz4 - TZ=UTC touch -ct 200001020000 replica1/generations/0000000000000001/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -ct 200001010000 replica0/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 replica1/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/snapshots/replica-name/replica0/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/cmd/litestream/testdata/snapshots/replica-name/replica0/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 similarity index 100% rename from cmd/litestream/testdata/snapshots/replica-name/replica0/generations/0000000000000000/snapshots/00000000.snapshot.lz4 rename to cmd/litestream/testdata/snapshots/replica-name/replica0/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/snapshots/replica-name/replica0/generations/0000000000000000/snapshots/00000001.snapshot.lz4 b/cmd/litestream/testdata/snapshots/replica-name/replica0/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 similarity index 100% rename from cmd/litestream/testdata/snapshots/replica-name/replica0/generations/0000000000000000/snapshots/00000001.snapshot.lz4 rename to cmd/litestream/testdata/snapshots/replica-name/replica0/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 diff --git a/cmd/litestream/testdata/snapshots/replica-name/replica1/generations/0000000000000001/snapshots/00000000.snapshot.lz4 b/cmd/litestream/testdata/snapshots/replica-name/replica1/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 similarity index 100% rename from cmd/litestream/testdata/snapshots/replica-name/replica1/generations/0000000000000001/snapshots/00000000.snapshot.lz4 rename to cmd/litestream/testdata/snapshots/replica-name/replica1/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/snapshots/replica-name/stdout b/cmd/litestream/testdata/snapshots/replica-name/stdout index 42c074e0..276224a9 100644 --- a/cmd/litestream/testdata/snapshots/replica-name/stdout +++ b/cmd/litestream/testdata/snapshots/replica-name/stdout @@ -1,2 +1,2 @@ -replica generation index size created -replica1 0000000000000001 00000000 93 2000-01-02T00:00:00Z +replica generation index size created +replica1 0000000000000001 0000000000000000 93 2000-01-02T00:00:00Z diff --git a/cmd/litestream/testdata/snapshots/replica-url/Makefile b/cmd/litestream/testdata/snapshots/replica-url/Makefile index f300c83a..233e9223 100644 --- a/cmd/litestream/testdata/snapshots/replica-url/Makefile +++ b/cmd/litestream/testdata/snapshots/replica-url/Makefile @@ -1,5 +1,5 @@ .PHONY: default default: - TZ=UTC touch -ct 200001010000 replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 - TZ=UTC touch -ct 200001020000 replica/generations/0000000000000000/snapshots/00000001.snapshot.lz4 - TZ=UTC touch -ct 200001030000 replica/generations/0000000000000001/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -ct 200001010000 replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 replica/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 + TZ=UTC touch -ct 200001030000 replica/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/snapshots/replica-url/replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/cmd/litestream/testdata/snapshots/replica-url/replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 similarity index 100% rename from cmd/litestream/testdata/snapshots/replica-url/replica/generations/0000000000000000/snapshots/00000000.snapshot.lz4 rename to cmd/litestream/testdata/snapshots/replica-url/replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/snapshots/replica-url/replica/generations/0000000000000000/snapshots/00000001.snapshot.lz4 b/cmd/litestream/testdata/snapshots/replica-url/replica/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 similarity index 100% rename from cmd/litestream/testdata/snapshots/replica-url/replica/generations/0000000000000000/snapshots/00000001.snapshot.lz4 rename to cmd/litestream/testdata/snapshots/replica-url/replica/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 diff --git a/cmd/litestream/testdata/snapshots/replica-url/replica/generations/0000000000000001/snapshots/00000000.snapshot.lz4 b/cmd/litestream/testdata/snapshots/replica-url/replica/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 similarity index 100% rename from cmd/litestream/testdata/snapshots/replica-url/replica/generations/0000000000000001/snapshots/00000000.snapshot.lz4 rename to cmd/litestream/testdata/snapshots/replica-url/replica/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/snapshots/replica-url/stdout b/cmd/litestream/testdata/snapshots/replica-url/stdout index 270a2524..604aefcc 100644 --- a/cmd/litestream/testdata/snapshots/replica-url/stdout +++ b/cmd/litestream/testdata/snapshots/replica-url/stdout @@ -1,4 +1,4 @@ -replica generation index size created -file 0000000000000001 00000000 93 2000-01-03T00:00:00Z -file 0000000000000000 00000001 93 2000-01-02T00:00:00Z -file 0000000000000000 00000000 93 2000-01-01T00:00:00Z +replica generation index size created +file 0000000000000001 0000000000000000 93 2000-01-03T00:00:00Z +file 0000000000000000 0000000000000001 93 2000-01-02T00:00:00Z +file 0000000000000000 0000000000000000 93 2000-01-01T00:00:00Z diff --git a/cmd/litestream/testdata/wal/ok/Makefile b/cmd/litestream/testdata/wal/ok/Makefile index 2bb5a8e1..6522d941 100644 --- a/cmd/litestream/testdata/wal/ok/Makefile +++ b/cmd/litestream/testdata/wal/ok/Makefile @@ -1,7 +1,7 @@ .PHONY: default default: - TZ=UTC touch -ct 200001010000 replica/generations/0000000000000000/wal/00000000/00000000.wal.lz4 - TZ=UTC touch -ct 200001020000 replica/generations/0000000000000000/wal/00000000/00000001.wal.lz4 - TZ=UTC touch -ct 200001030000 replica/generations/0000000000000000/wal/00000001/00000000.wal.lz4 - TZ=UTC touch -ct 200001040000 replica/generations/0000000000000001/wal/00000000/00000000.wal.lz4 + TZ=UTC touch -ct 200001010000 replica/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 + TZ=UTC touch -ct 200001020000 replica/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 + TZ=UTC touch -ct 200001030000 replica/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 + TZ=UTC touch -ct 200001040000 replica/generations/0000000000000001/wal/0000000000000000/0000000000000000.wal.lz4 diff --git a/cmd/litestream/testdata/wal/ok/replica/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/cmd/litestream/testdata/wal/ok/replica/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 similarity index 100% rename from cmd/litestream/testdata/wal/ok/replica/generations/0000000000000000/wal/00000000/00000000.wal.lz4 rename to cmd/litestream/testdata/wal/ok/replica/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 diff --git a/cmd/litestream/testdata/wal/ok/replica/generations/0000000000000000/wal/00000000/00000001.wal.lz4 b/cmd/litestream/testdata/wal/ok/replica/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 similarity index 100% rename from cmd/litestream/testdata/wal/ok/replica/generations/0000000000000000/wal/00000000/00000001.wal.lz4 rename to cmd/litestream/testdata/wal/ok/replica/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 diff --git a/cmd/litestream/testdata/wal/ok/replica/generations/0000000000000000/wal/00000001/00000000.wal.lz4 b/cmd/litestream/testdata/wal/ok/replica/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 similarity index 100% rename from cmd/litestream/testdata/wal/ok/replica/generations/0000000000000000/wal/00000001/00000000.wal.lz4 rename to cmd/litestream/testdata/wal/ok/replica/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 diff --git a/cmd/litestream/testdata/wal/ok/replica/generations/0000000000000001/wal/00000000/00000000.wal.lz4 b/cmd/litestream/testdata/wal/ok/replica/generations/0000000000000001/wal/0000000000000000/0000000000000000.wal.lz4 similarity index 100% rename from cmd/litestream/testdata/wal/ok/replica/generations/0000000000000001/wal/00000000/00000000.wal.lz4 rename to cmd/litestream/testdata/wal/ok/replica/generations/0000000000000001/wal/0000000000000000/0000000000000000.wal.lz4 diff --git a/cmd/litestream/testdata/wal/ok/stdout b/cmd/litestream/testdata/wal/ok/stdout index 90a58f59..f420a7b8 100644 --- a/cmd/litestream/testdata/wal/ok/stdout +++ b/cmd/litestream/testdata/wal/ok/stdout @@ -1,5 +1,5 @@ -replica generation index offset size created -file 0000000000000001 00000000 00000000 93 2000-01-04T00:00:00Z -file 0000000000000000 00000001 00000000 93 2000-01-03T00:00:00Z -file 0000000000000000 00000000 00000001 93 2000-01-02T00:00:00Z -file 0000000000000000 00000000 00000000 93 2000-01-01T00:00:00Z +replica generation index offset size created +file 0000000000000001 0000000000000000 0000000000000000 93 2000-01-04T00:00:00Z +file 0000000000000000 0000000000000001 0000000000000000 93 2000-01-03T00:00:00Z +file 0000000000000000 0000000000000000 0000000000000001 93 2000-01-02T00:00:00Z +file 0000000000000000 0000000000000000 0000000000000000 93 2000-01-01T00:00:00Z diff --git a/cmd/litestream/testdata/wal/replica-name/Makefile b/cmd/litestream/testdata/wal/replica-name/Makefile index 5556bc8f..2347a2f1 100644 --- a/cmd/litestream/testdata/wal/replica-name/Makefile +++ b/cmd/litestream/testdata/wal/replica-name/Makefile @@ -1,6 +1,6 @@ .PHONY: default default: - TZ=UTC touch -ct 200001010000 replica0/generations/0000000000000000/wal/00000000/00000000.wal.lz4 - TZ=UTC touch -ct 200001020000 replica1/generations/0000000000000000/wal/00000000/00000001.wal.lz4 - TZ=UTC touch -ct 200001030000 replica1/generations/0000000000000000/wal/00000001/00000000.wal.lz4 - TZ=UTC touch -ct 200001040000 replica1/generations/0000000000000001/wal/00000000/00000000.wal.lz4 + TZ=UTC touch -ct 200001010000 replica0/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 + TZ=UTC touch -ct 200001020000 replica1/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 + TZ=UTC touch -ct 200001030000 replica1/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 + TZ=UTC touch -ct 200001040000 replica1/generations/0000000000000001/wal/0000000000000000/0000000000000000.wal.lz4 diff --git a/cmd/litestream/testdata/wal/replica-name/replica0/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/cmd/litestream/testdata/wal/replica-name/replica0/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 similarity index 100% rename from cmd/litestream/testdata/wal/replica-name/replica0/generations/0000000000000000/wal/00000000/00000000.wal.lz4 rename to cmd/litestream/testdata/wal/replica-name/replica0/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 diff --git a/cmd/litestream/testdata/wal/replica-name/replica0/generations/0000000000000000/wal/00000000/00000001.wal.lz4 b/cmd/litestream/testdata/wal/replica-name/replica0/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 similarity index 100% rename from cmd/litestream/testdata/wal/replica-name/replica0/generations/0000000000000000/wal/00000000/00000001.wal.lz4 rename to cmd/litestream/testdata/wal/replica-name/replica0/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 diff --git a/cmd/litestream/testdata/wal/replica-name/replica0/generations/0000000000000000/wal/00000001/00000000.wal.lz4 b/cmd/litestream/testdata/wal/replica-name/replica0/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 similarity index 100% rename from cmd/litestream/testdata/wal/replica-name/replica0/generations/0000000000000000/wal/00000001/00000000.wal.lz4 rename to cmd/litestream/testdata/wal/replica-name/replica0/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 diff --git a/cmd/litestream/testdata/wal/replica-name/replica1/generations/0000000000000001/wal/00000000/00000000.wal.lz4 b/cmd/litestream/testdata/wal/replica-name/replica1/generations/0000000000000001/wal/0000000000000000/0000000000000000.wal.lz4 similarity index 100% rename from cmd/litestream/testdata/wal/replica-name/replica1/generations/0000000000000001/wal/00000000/00000000.wal.lz4 rename to cmd/litestream/testdata/wal/replica-name/replica1/generations/0000000000000001/wal/0000000000000000/0000000000000000.wal.lz4 diff --git a/cmd/litestream/testdata/wal/replica-name/stdout b/cmd/litestream/testdata/wal/replica-name/stdout index 2e9f9d94..80756dc8 100644 --- a/cmd/litestream/testdata/wal/replica-name/stdout +++ b/cmd/litestream/testdata/wal/replica-name/stdout @@ -1,2 +1,2 @@ -replica generation index offset size created -replica1 0000000000000001 00000000 00000000 93 2000-01-04T00:00:00Z +replica generation index offset size created +replica1 0000000000000001 0000000000000000 0000000000000000 93 2000-01-04T00:00:00Z diff --git a/cmd/litestream/testdata/wal/replica-url/Makefile b/cmd/litestream/testdata/wal/replica-url/Makefile index 2bb5a8e1..6522d941 100644 --- a/cmd/litestream/testdata/wal/replica-url/Makefile +++ b/cmd/litestream/testdata/wal/replica-url/Makefile @@ -1,7 +1,7 @@ .PHONY: default default: - TZ=UTC touch -ct 200001010000 replica/generations/0000000000000000/wal/00000000/00000000.wal.lz4 - TZ=UTC touch -ct 200001020000 replica/generations/0000000000000000/wal/00000000/00000001.wal.lz4 - TZ=UTC touch -ct 200001030000 replica/generations/0000000000000000/wal/00000001/00000000.wal.lz4 - TZ=UTC touch -ct 200001040000 replica/generations/0000000000000001/wal/00000000/00000000.wal.lz4 + TZ=UTC touch -ct 200001010000 replica/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 + TZ=UTC touch -ct 200001020000 replica/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 + TZ=UTC touch -ct 200001030000 replica/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 + TZ=UTC touch -ct 200001040000 replica/generations/0000000000000001/wal/0000000000000000/0000000000000000.wal.lz4 diff --git a/cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 similarity index 100% rename from cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000000/wal/00000000/00000000.wal.lz4 rename to cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 diff --git a/cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000000/wal/00000000/00000001.wal.lz4 b/cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 similarity index 100% rename from cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000000/wal/00000000/00000001.wal.lz4 rename to cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 diff --git a/cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000000/wal/00000001/00000000.wal.lz4 b/cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 similarity index 100% rename from cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000000/wal/00000001/00000000.wal.lz4 rename to cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 diff --git a/cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000001/wal/00000000/00000000.wal.lz4 b/cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000001/wal/0000000000000000/0000000000000000.wal.lz4 similarity index 100% rename from cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000001/wal/00000000/00000000.wal.lz4 rename to cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000001/wal/0000000000000000/0000000000000000.wal.lz4 diff --git a/cmd/litestream/testdata/wal/replica-url/stdout b/cmd/litestream/testdata/wal/replica-url/stdout index 90a58f59..f420a7b8 100644 --- a/cmd/litestream/testdata/wal/replica-url/stdout +++ b/cmd/litestream/testdata/wal/replica-url/stdout @@ -1,5 +1,5 @@ -replica generation index offset size created -file 0000000000000001 00000000 00000000 93 2000-01-04T00:00:00Z -file 0000000000000000 00000001 00000000 93 2000-01-03T00:00:00Z -file 0000000000000000 00000000 00000001 93 2000-01-02T00:00:00Z -file 0000000000000000 00000000 00000000 93 2000-01-01T00:00:00Z +replica generation index offset size created +file 0000000000000001 0000000000000000 0000000000000000 93 2000-01-04T00:00:00Z +file 0000000000000000 0000000000000001 0000000000000000 93 2000-01-03T00:00:00Z +file 0000000000000000 0000000000000000 0000000000000001 93 2000-01-02T00:00:00Z +file 0000000000000000 0000000000000000 0000000000000000 93 2000-01-01T00:00:00Z diff --git a/cmd/litestream/wal.go b/cmd/litestream/wal.go index 1124c030..fc54840c 100644 --- a/cmd/litestream/wal.go +++ b/cmd/litestream/wal.go @@ -108,11 +108,11 @@ func (c *WALCommand) Run(ctx context.Context, args []string) (ret error) { fmt.Fprintln(w, "replica\tgeneration\tindex\toffset\tsize\tcreated") for _, info := range infos { - fmt.Fprintf(w, "%s\t%s\t%08x\t%08x\t%d\t%s\n", + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%d\t%s\n", info.replicaName, info.Generation, - info.Index, - info.Offset, + litestream.FormatIndex(info.Index), + litestream.FormatOffset(info.Offset), info.Size, info.CreatedAt.Format(time.RFC3339), ) diff --git a/db.go b/db.go index 791d80d9..a8c59dec 100644 --- a/db.go +++ b/db.go @@ -703,7 +703,7 @@ func (db *DB) cleanWAL(ctx context.Context) error { return err } - db.Logger.Printf("remove shadow index: %s/%08x", generation, index) + db.Logger.Printf("remove shadow index: %s/%s", generation, FormatIndex(index)) } return nil diff --git a/file_replica_client.go b/file_replica_client.go index a8873f0f..2eef73ad 100644 --- a/file_replica_client.go +++ b/file_replica_client.go @@ -100,7 +100,7 @@ func (c *FileReplicaClient) WALSegmentPath(generation string, index int, offset if err != nil { return "", err } - return filepath.Join(dir, FormatIndex(index), fmt.Sprintf("%08x.wal.lz4", offset)), nil + return filepath.Join(dir, FormatIndex(index), fmt.Sprintf("%s.wal.lz4", FormatOffset(offset))), nil } // Generations returns a list of available generation names. diff --git a/file_replica_client_test.go b/file_replica_client_test.go index 65dcb111..1a1405f6 100644 --- a/file_replica_client_test.go +++ b/file_replica_client_test.go @@ -78,7 +78,7 @@ func TestReplicaClient_SnapshotPath(t *testing.T) { t.Run("OK", func(t *testing.T) { if got, err := litestream.NewFileReplicaClient("/foo").SnapshotPath("0123456701234567", 1000); err != nil { t.Fatal(err) - } else if want := "/foo/generations/0123456701234567/snapshots/000003e8.snapshot.lz4"; got != want { + } else if want := "/foo/generations/0123456701234567/snapshots/00000000000003e8.snapshot.lz4"; got != want { t.Fatalf("SnapshotPath()=%v, want %v", got, want) } }) @@ -118,7 +118,7 @@ func TestReplicaClient_WALSegmentPath(t *testing.T) { t.Run("OK", func(t *testing.T) { if got, err := litestream.NewFileReplicaClient("/foo").WALSegmentPath("0123456701234567", 1000, 1001); err != nil { t.Fatal(err) - } else if want := "/foo/generations/0123456701234567/wal/000003e8/000003e9.wal.lz4"; got != want { + } else if want := "/foo/generations/0123456701234567/wal/00000000000003e8/00000000000003e9.wal.lz4"; got != want { t.Fatalf("WALPath()=%v, want %v", got, want) } }) diff --git a/gcs/replica_client.go b/gcs/replica_client.go index 0a45b2b0..7f5d91c3 100644 --- a/gcs/replica_client.go +++ b/gcs/replica_client.go @@ -160,8 +160,6 @@ func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, in internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "PUT").Inc() internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "PUT").Add(float64(n)) - // log.Printf("%s(%s): snapshot: creating %s/%08x t=%s", r.db.Path(), r.Name(), generation, index, time.Since(startTime).Truncate(time.Millisecond)) - return litestream.SnapshotInfo{ Generation: generation, Index: index, diff --git a/internal/internal.go b/internal/internal.go index 681726a9..0c70d4d7 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -15,6 +15,12 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" ) +// Platform-independent maximum integer sizes. +const ( + MaxUint = ^uint(0) + MaxInt = int(MaxUint >> 1) +) + // ReadCloser wraps a reader to also attach a separate closer. type ReadCloser struct { r io.Reader @@ -170,14 +176,14 @@ func ParseSnapshotPath(s string) (index int, err error) { return 0, fmt.Errorf("invalid snapshot path") } - i32, _ := strconv.ParseUint(a[1], 16, 32) - if i32 > math.MaxInt32 { + i64, _ := strconv.ParseUint(a[1], 16, 64) + if i64 > uint64(MaxInt) { return 0, fmt.Errorf("index too large in snapshot path %q", s) } - return int(i32), nil + return int(i64), nil } -var snapshotPathRegex = regexp.MustCompile(`^([0-9a-f]{8})\.snapshot\.lz4$`) +var snapshotPathRegex = regexp.MustCompile(`^([0-9a-f]{16})\.snapshot\.lz4$`) // ParseWALSegmentPath parses the index/offset from a segment filename. Used by path-based replicas. func ParseWALSegmentPath(s string) (index int, offset int64, err error) { @@ -186,18 +192,18 @@ func ParseWALSegmentPath(s string) (index int, offset int64, err error) { return 0, 0, fmt.Errorf("invalid wal segment path") } - i32, _ := strconv.ParseUint(a[1], 16, 32) - if i32 > math.MaxInt32 { + i64, _ := strconv.ParseUint(a[1], 16, 64) + if i64 > uint64(MaxInt) { return 0, 0, fmt.Errorf("index too large in wal segment path %q", s) } off64, _ := strconv.ParseUint(a[2], 16, 64) if off64 > math.MaxInt64 { return 0, 0, fmt.Errorf("offset too large in wal segment path %q", s) } - return int(i32), int64(off64), nil + return int(i64), int64(off64), nil } -var walSegmentPathRegex = regexp.MustCompile(`^([0-9a-f]{8})\/([0-9a-f]{8})\.wal\.lz4$`) +var walSegmentPathRegex = regexp.MustCompile(`^([0-9a-f]{16})\/([0-9a-f]{16})\.wal\.lz4$`) // Shared replica metrics. var ( diff --git a/internal/internal_test.go b/internal/internal_test.go index 5b661dea..9d2c49b2 100644 --- a/internal/internal_test.go +++ b/internal/internal_test.go @@ -15,12 +15,12 @@ func TestParseSnapshotPath(t *testing.T) { index int err error }{ - {"00bc614e.snapshot.lz4", 12345678, nil}, - {"xxxxxxxx.snapshot.lz4", 0, fmt.Errorf("invalid snapshot path")}, - {"00bc614.snapshot.lz4", 0, fmt.Errorf("invalid snapshot path")}, - {"00bc614e.snapshot.lz", 0, fmt.Errorf("invalid snapshot path")}, - {"00bc614e.snapshot", 0, fmt.Errorf("invalid snapshot path")}, - {"00bc614e", 0, fmt.Errorf("invalid snapshot path")}, + {"0000000000bc614e.snapshot.lz4", 12345678, nil}, + {"xxxxxxxxxxxxxxxx.snapshot.lz4", 0, fmt.Errorf("invalid snapshot path")}, + {"0000000000bc614.snapshot.lz4", 0, fmt.Errorf("invalid snapshot path")}, + {"0000000000bc614e.snapshot.lz", 0, fmt.Errorf("invalid snapshot path")}, + {"0000000000bc614e.snapshot", 0, fmt.Errorf("invalid snapshot path")}, + {"0000000000bc614e", 0, fmt.Errorf("invalid snapshot path")}, {"", 0, fmt.Errorf("invalid snapshot path")}, } { t.Run("", func(t *testing.T) { @@ -41,20 +41,22 @@ func TestParseWALSegmentPath(t *testing.T) { offset int64 err error }{ - {"00bc614e/000003e8.wal.lz4", 12345678, 1000, nil}, - {"00000000/00000000.wal", 0, 0, fmt.Errorf("invalid wal segment path")}, - {"00000000/00000000", 0, 0, fmt.Errorf("invalid wal segment path")}, - {"00000000/", 0, 0, fmt.Errorf("invalid wal segment path")}, - {"00000000", 0, 0, fmt.Errorf("invalid wal segment path")}, + {"0000000000bc614e/00000000000003e8.wal.lz4", 12345678, 1000, nil}, + {"0000000000000000/0000000000000000.wal", 0, 0, fmt.Errorf("invalid wal segment path")}, + {"0000000000000000/0000000000000000", 0, 0, fmt.Errorf("invalid wal segment path")}, + {"0000000000000000/", 0, 0, fmt.Errorf("invalid wal segment path")}, + {"0000000000000000", 0, 0, fmt.Errorf("invalid wal segment path")}, {"", 0, 0, fmt.Errorf("invalid wal segment path")}, } { t.Run("", func(t *testing.T) { index, offset, err := internal.ParseWALSegmentPath(tt.s) if got, want := index, tt.index; got != want { t.Errorf("index=%#v, want %#v", got, want) - } else if got, want := offset, tt.offset; got != want { + } + if got, want := offset, tt.offset; got != want { t.Errorf("offset=%#v, want %#v", got, want) - } else if got, want := err, tt.err; !reflect.DeepEqual(got, want) { + } + if got, want := err, tt.err; !reflect.DeepEqual(got, want) { t.Errorf("err=%#v, want %#v", got, want) } }) diff --git a/litestream.go b/litestream.go index 56144573..c91c6484 100644 --- a/litestream.go +++ b/litestream.go @@ -6,12 +6,14 @@ import ( "errors" "fmt" "io" + "math" "os" "path/filepath" "strconv" "strings" "time" + "github.com/benbjohnson/litestream/internal" "github.com/mattn/go-sqlite3" ) @@ -302,7 +304,7 @@ func (p Pos) String() string { if p.IsZero() { return "" } - return fmt.Sprintf("%s/%08x:%08x", p.Generation, p.Index, p.Offset) + return fmt.Sprintf("%s/%s:%s", p.Generation, FormatIndex(p.Index), FormatOffset(p.Offset)) } // IsZero returns true if p is the zero value. @@ -426,32 +428,36 @@ func IsGenerationName(s string) bool { return true } -// FormatIndex formats an index as an 8-character hex value. +// FormatIndex formats an index as a hex value. func FormatIndex(index int) string { - return fmt.Sprintf("%08x", index) + return fmt.Sprintf("%016x", index) } // ParseIndex parses a hex-formatted index into an integer. func ParseIndex(s string) (int, error) { - v, err := strconv.ParseUint(s, 16, 32) + v, err := strconv.ParseUint(s, 16, 64) if err != nil { return -1, fmt.Errorf("cannot parse index: %q", s) + } else if v > uint64(internal.MaxInt) { + return -1, fmt.Errorf("index too large: %q", s) } return int(v), nil } -// FormatOffset formats an offset as an 8-character hex value. +// FormatOffset formats an offset as a hex value. func FormatOffset(offset int64) string { - return fmt.Sprintf("%08x", offset) + return fmt.Sprintf("%016x", offset) } // ParseOffset parses a hex-formatted offset into an integer. func ParseOffset(s string) (int64, error) { - v, err := strconv.ParseInt(s, 16, 32) + v, err := strconv.ParseUint(s, 16, 64) if err != nil { - return -1, fmt.Errorf("cannot parse index: %q", s) + return -1, fmt.Errorf("cannot parse offset: %q", s) + } else if v > math.MaxInt64 { + return -1, fmt.Errorf("offset too large: %q", s) } - return v, nil + return int64(v), nil } // removeDBFiles deletes the database and related files (journal, shm, wal). diff --git a/replica.go b/replica.go index cc5652d1..50e822e2 100644 --- a/replica.go +++ b/replica.go @@ -514,7 +514,7 @@ func (r *Replica) Snapshot(ctx context.Context) (info SnapshotInfo, err error) { return info, err } - r.Logger.Printf("snapshot written %s/%08x", pos.Generation, pos.Index) + r.Logger.Printf("snapshot written %s/%s", pos.Generation, FormatIndex(pos.Index)) return info, nil } @@ -580,9 +580,9 @@ func (r *Replica) deleteSnapshotsBeforeIndex(ctx context.Context, generation str } if err := r.client.DeleteSnapshot(ctx, info.Generation, info.Index); err != nil { - return fmt.Errorf("delete snapshot %s/%08x: %w", info.Generation, info.Index, err) + return fmt.Errorf("delete snapshot %s/%s: %w", info.Generation, FormatIndex(info.Index), err) } - r.Logger.Printf("snapshot deleted %s/%08x", generation, index) + r.Logger.Printf("snapshot deleted %s/%s", generation, FormatIndex(index)) } return itr.Close() diff --git a/replica_client.go b/replica_client.go index 3bf01b17..ba6d4481 100644 --- a/replica_client.go +++ b/replica_client.go @@ -88,7 +88,7 @@ func FindSnapshotForIndex(ctx context.Context, client ReplicaClient, generation if n == 0 { return 0, ErrNoSnapshots } else if snapshotIndex == -1 { - return 0, fmt.Errorf("no snapshots available at or before index %08x", index) + return 0, fmt.Errorf("no snapshots available at or before index %s", FormatIndex(index)) } return snapshotIndex, nil } @@ -349,7 +349,7 @@ func Restore(ctx context.Context, client ReplicaClient, filename, generation str // Copy snapshot to output path. tmpPath := filename + ".tmp" - logger.Printf("%srestoring snapshot %s/%08x to %s", opt.LogPrefix, generation, snapshotIndex, tmpPath) + logger.Printf("%srestoring snapshot %s/%s to %s", opt.LogPrefix, generation, FormatIndex(snapshotIndex), tmpPath) if err := RestoreSnapshot(ctx, client, tmpPath, generation, snapshotIndex, opt.Mode, opt.Uid, opt.Gid); err != nil { return fmt.Errorf("cannot restore snapshot: %w", err) } @@ -380,7 +380,7 @@ func Restore(ctx context.Context, client ReplicaClient, filename, generation str if err = ApplyWAL(ctx, tmpPath, walPath); err != nil { return fmt.Errorf("cannot apply wal: %w", err) } - logger.Printf("%sapplied wal %s/%08x elapsed=%s", opt.LogPrefix, generation, walIndex, time.Since(startTime).String()) + logger.Printf("%sapplied wal %s/%s elapsed=%s", opt.LogPrefix, generation, FormatIndex(walIndex), time.Since(startTime).String()) } // Copy file to final location. diff --git a/replica_client_test.go b/replica_client_test.go index 65d5d819..fedf9f27 100644 --- a/replica_client_test.go +++ b/replica_client_test.go @@ -19,7 +19,7 @@ func TestFindSnapshotForIndex(t *testing.T) { if snapshotIndex, err := litestream.FindSnapshotForIndex(context.Background(), client, "0000000000000000", 0x000007d0); err != nil { t.Fatal(err) } else if got, want := snapshotIndex, 0x000003e8; got != want { - t.Fatalf("index=%08x, want %08x", got, want) + t.Fatalf("index=%s, want %s", litestream.FormatIndex(got), litestream.FormatIndex(want)) } }) @@ -28,14 +28,14 @@ func TestFindSnapshotForIndex(t *testing.T) { if snapshotIndex, err := litestream.FindSnapshotForIndex(context.Background(), client, "0000000000000000", 0x000003e8); err != nil { t.Fatal(err) } else if got, want := snapshotIndex, 0x000003e8; got != want { - t.Fatalf("index=%08x, want %08x", got, want) + t.Fatalf("index=%s, want %s", litestream.FormatIndex(got), litestream.FormatIndex(want)) } }) t.Run("ErrNoSnapshotsBeforeIndex", func(t *testing.T) { client := litestream.NewFileReplicaClient(filepath.Join("testdata", "find-snapshot-for-index", "no-snapshots-before-index")) _, err := litestream.FindSnapshotForIndex(context.Background(), client, "0000000000000000", 0x000003e8) - if err == nil || err.Error() != `no snapshots available at or before index 000003e8` { + if err == nil || err.Error() != `no snapshots available at or before index 00000000000003e8` { t.Fatalf("unexpected error: %#v", err) } }) @@ -499,7 +499,7 @@ func TestRestore(t *testing.T) { client := litestream.NewFileReplicaClient(testDir) if err := litestream.Restore(context.Background(), client, filepath.Join(tempDir, "db"), "0000000000000000", 0, 2, litestream.NewRestoreOptions()); err != nil { t.Fatal(err) - } else if !fileEqual(t, filepath.Join(testDir, "00000002.db"), filepath.Join(tempDir, "db")) { + } else if !fileEqual(t, filepath.Join(testDir, "0000000000000002.db"), filepath.Join(tempDir, "db")) { t.Fatalf("file mismatch") } }) @@ -511,7 +511,7 @@ func TestRestore(t *testing.T) { client := litestream.NewFileReplicaClient(testDir) if err := litestream.Restore(context.Background(), client, filepath.Join(tempDir, "db"), "0000000000000000", 0, 0, litestream.NewRestoreOptions()); err != nil { t.Fatal(err) - } else if !fileEqual(t, filepath.Join(testDir, "00000000.db"), filepath.Join(tempDir, "db")) { + } else if !fileEqual(t, filepath.Join(testDir, "0000000000000000.db"), filepath.Join(tempDir, "db")) { t.Fatalf("file mismatch") } }) @@ -525,7 +525,7 @@ func TestRestore(t *testing.T) { opt.Parallelism = 0 if err := litestream.Restore(context.Background(), client, filepath.Join(tempDir, "db"), "0000000000000000", 0, 2, opt); err != nil { t.Fatal(err) - } else if !fileEqual(t, filepath.Join(testDir, "00000002.db"), filepath.Join(tempDir, "db")) { + } else if !fileEqual(t, filepath.Join(testDir, "0000000000000002.db"), filepath.Join(tempDir, "db")) { t.Fatalf("file mismatch") } }) diff --git a/s3/replica_client.go b/s3/replica_client.go index a9e3e638..182d713f 100644 --- a/s3/replica_client.go +++ b/s3/replica_client.go @@ -253,8 +253,6 @@ func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, in internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "PUT").Inc() internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "PUT").Add(float64(rc.N())) - // log.Printf("%s(%s): snapshot: creating %s/%08x t=%s", r.db.Path(), r.Name(), generation, index, time.Since(startTime).Truncate(time.Millisecond)) - return litestream.SnapshotInfo{ Generation: generation, Index: index, diff --git a/sftp/replica_client.go b/sftp/replica_client.go index 8b651e97..fd0912cd 100644 --- a/sftp/replica_client.go +++ b/sftp/replica_client.go @@ -282,8 +282,6 @@ func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, in internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "PUT").Inc() internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "PUT").Add(float64(n)) - // log.Printf("%s(%s): snapshot: creating %s/%08x t=%s", r.db.Path(), r.Name(), generation, index, time.Since(startTime).Truncate(time.Millisecond)) - return litestream.SnapshotInfo{ Generation: generation, Index: index, diff --git a/testdata/find-latest-generation/ok/Makefile b/testdata/find-latest-generation/ok/Makefile index c71ce141..45a7e010 100644 --- a/testdata/find-latest-generation/ok/Makefile +++ b/testdata/find-latest-generation/ok/Makefile @@ -1,7 +1,7 @@ .PHONY: default default: - TZ=UTC touch -ct 200001010000 generations/0000000000000000/snapshots/00000000.snapshot.lz4 - TZ=UTC touch -ct 200001020000 generations/0000000000000000/snapshots/00000001.snapshot.lz4 - TZ=UTC touch -ct 200001010000 generations/0000000000000001/snapshots/00000000.snapshot.lz4 - TZ=UTC touch -ct 200001030000 generations/0000000000000001/snapshots/00000001.snapshot.lz4 - TZ=UTC touch -ct 200001010000 generations/0000000000000002/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -ct 200001010000 generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 + TZ=UTC touch -ct 200001010000 generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001030000 generations/0000000000000001/snapshots/0000000000000001.snapshot.lz4 + TZ=UTC touch -ct 200001010000 generations/0000000000000002/snapshots/0000000000000000.snapshot.lz4 diff --git a/testdata/find-latest-generation/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/find-latest-generation/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 similarity index 100% rename from testdata/find-latest-generation/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 rename to testdata/find-latest-generation/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 diff --git a/testdata/find-latest-generation/ok/generations/0000000000000000/snapshots/00000001.snapshot.lz4 b/testdata/find-latest-generation/ok/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 similarity index 100% rename from testdata/find-latest-generation/ok/generations/0000000000000000/snapshots/00000001.snapshot.lz4 rename to testdata/find-latest-generation/ok/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 diff --git a/testdata/find-latest-generation/ok/generations/0000000000000001/snapshots/00000000.snapshot.lz4 b/testdata/find-latest-generation/ok/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 similarity index 100% rename from testdata/find-latest-generation/ok/generations/0000000000000001/snapshots/00000000.snapshot.lz4 rename to testdata/find-latest-generation/ok/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 diff --git a/testdata/find-latest-generation/ok/generations/0000000000000001/snapshots/00000001.snapshot.lz4 b/testdata/find-latest-generation/ok/generations/0000000000000001/snapshots/0000000000000001.snapshot.lz4 similarity index 100% rename from testdata/find-latest-generation/ok/generations/0000000000000001/snapshots/00000001.snapshot.lz4 rename to testdata/find-latest-generation/ok/generations/0000000000000001/snapshots/0000000000000001.snapshot.lz4 diff --git a/testdata/find-latest-generation/ok/generations/0000000000000002/snapshots/00000000.snapshot.lz4 b/testdata/find-latest-generation/ok/generations/0000000000000002/snapshots/0000000000000000.snapshot.lz4 similarity index 100% rename from testdata/find-latest-generation/ok/generations/0000000000000002/snapshots/00000000.snapshot.lz4 rename to testdata/find-latest-generation/ok/generations/0000000000000002/snapshots/0000000000000000.snapshot.lz4 diff --git a/testdata/find-snapshot-for-index/no-snapshots-before-index/generations/0000000000000000/snapshots/00000bb8.snapshot.lz4 b/testdata/find-snapshot-for-index/no-snapshots-before-index/generations/0000000000000000/snapshots/0000000000000bb8.snapshot.lz4 similarity index 100% rename from testdata/find-snapshot-for-index/no-snapshots-before-index/generations/0000000000000000/snapshots/00000bb8.snapshot.lz4 rename to testdata/find-snapshot-for-index/no-snapshots-before-index/generations/0000000000000000/snapshots/0000000000000bb8.snapshot.lz4 diff --git a/testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 similarity index 100% rename from testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 rename to testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 diff --git a/testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/000003e8.snapshot.lz4 b/testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/00000000000003e8.snapshot.lz4 similarity index 100% rename from testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/000003e8.snapshot.lz4 rename to testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/00000000000003e8.snapshot.lz4 diff --git a/testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/00000bb8.snapshot.lz4 b/testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/0000000000000bb8.snapshot.lz4 similarity index 100% rename from testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/00000bb8.snapshot.lz4 rename to testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/0000000000000bb8.snapshot.lz4 diff --git a/testdata/generation-time-bounds/ok/Makefile b/testdata/generation-time-bounds/ok/Makefile index e29f9e4e..7f2ad771 100644 --- a/testdata/generation-time-bounds/ok/Makefile +++ b/testdata/generation-time-bounds/ok/Makefile @@ -1,8 +1,8 @@ .PHONY: default default: - TZ=UTC touch -ct 200001010000 generations/0000000000000000/snapshots/00000000.snapshot.lz4 - TZ=UTC touch -ct 200001020000 generations/0000000000000000/snapshots/00000001.snapshot.lz4 - TZ=UTC touch -ct 200001010000 generations/0000000000000000/wal/00000000/00000000.wal.lz4 - TZ=UTC touch -ct 200001020000 generations/0000000000000000/wal/00000000/00000001.wal.lz4 - TZ=UTC touch -ct 200001030000 generations/0000000000000000/wal/00000001/00000000.wal.lz4 + TZ=UTC touch -ct 200001010000 generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 + TZ=UTC touch -ct 200001010000 generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 + TZ=UTC touch -ct 200001020000 generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 + TZ=UTC touch -ct 200001030000 generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 diff --git a/testdata/generation-time-bounds/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/generation-time-bounds/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 similarity index 100% rename from testdata/generation-time-bounds/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 rename to testdata/generation-time-bounds/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 diff --git a/testdata/generation-time-bounds/ok/generations/0000000000000000/snapshots/00000001.snapshot.lz4 b/testdata/generation-time-bounds/ok/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 similarity index 100% rename from testdata/generation-time-bounds/ok/generations/0000000000000000/snapshots/00000001.snapshot.lz4 rename to testdata/generation-time-bounds/ok/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 diff --git a/testdata/generation-time-bounds/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/testdata/generation-time-bounds/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 similarity index 100% rename from testdata/generation-time-bounds/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 rename to testdata/generation-time-bounds/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 diff --git a/testdata/generation-time-bounds/ok/generations/0000000000000000/wal/00000000/00000001.wal.lz4 b/testdata/generation-time-bounds/ok/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 similarity index 100% rename from testdata/generation-time-bounds/ok/generations/0000000000000000/wal/00000000/00000001.wal.lz4 rename to testdata/generation-time-bounds/ok/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 diff --git a/testdata/generation-time-bounds/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 b/testdata/generation-time-bounds/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 similarity index 100% rename from testdata/generation-time-bounds/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 rename to testdata/generation-time-bounds/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 diff --git a/testdata/generation-time-bounds/snapshots-only/Makefile b/testdata/generation-time-bounds/snapshots-only/Makefile index 6405068a..aa5978bd 100644 --- a/testdata/generation-time-bounds/snapshots-only/Makefile +++ b/testdata/generation-time-bounds/snapshots-only/Makefile @@ -1,5 +1,5 @@ .PHONY: default default: - TZ=UTC touch -ct 200001010000 generations/0000000000000000/snapshots/00000000.snapshot.lz4 - TZ=UTC touch -ct 200001020000 generations/0000000000000000/snapshots/00000001.snapshot.lz4 + TZ=UTC touch -ct 200001010000 generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 diff --git a/testdata/generation-time-bounds/snapshots-only/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/generation-time-bounds/snapshots-only/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 similarity index 100% rename from testdata/generation-time-bounds/snapshots-only/generations/0000000000000000/snapshots/00000000.snapshot.lz4 rename to testdata/generation-time-bounds/snapshots-only/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 diff --git a/testdata/generation-time-bounds/snapshots-only/generations/0000000000000000/snapshots/00000001.snapshot.lz4 b/testdata/generation-time-bounds/snapshots-only/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 similarity index 100% rename from testdata/generation-time-bounds/snapshots-only/generations/0000000000000000/snapshots/00000001.snapshot.lz4 rename to testdata/generation-time-bounds/snapshots-only/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 diff --git a/testdata/max-index/no-wal/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/max-index/no-wal/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 similarity index 100% rename from testdata/max-index/no-wal/generations/0000000000000000/snapshots/00000000.snapshot.lz4 rename to testdata/max-index/no-wal/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 diff --git a/testdata/max-index/no-wal/generations/0000000000000000/snapshots/00000001.snapshot.lz4 b/testdata/max-index/no-wal/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 similarity index 100% rename from testdata/max-index/no-wal/generations/0000000000000000/snapshots/00000001.snapshot.lz4 rename to testdata/max-index/no-wal/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 diff --git a/testdata/max-index/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/max-index/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 similarity index 100% rename from testdata/max-index/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 rename to testdata/max-index/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 diff --git a/testdata/max-index/ok/generations/0000000000000000/snapshots/00000001.snapshot.lz4 b/testdata/max-index/ok/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 similarity index 100% rename from testdata/max-index/ok/generations/0000000000000000/snapshots/00000001.snapshot.lz4 rename to testdata/max-index/ok/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 diff --git a/testdata/max-index/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/testdata/max-index/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 similarity index 100% rename from testdata/max-index/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 rename to testdata/max-index/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 diff --git a/testdata/max-index/ok/generations/0000000000000000/wal/00000000/00001234.wal.lz4 b/testdata/max-index/ok/generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 similarity index 100% rename from testdata/max-index/ok/generations/0000000000000000/wal/00000000/00001234.wal.lz4 rename to testdata/max-index/ok/generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 diff --git a/testdata/max-index/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 b/testdata/max-index/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 similarity index 100% rename from testdata/max-index/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 rename to testdata/max-index/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 diff --git a/testdata/max-index/ok/generations/0000000000000000/wal/00000002/00000000.wal.lz4 b/testdata/max-index/ok/generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 similarity index 100% rename from testdata/max-index/ok/generations/0000000000000000/wal/00000002/00000000.wal.lz4 rename to testdata/max-index/ok/generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 diff --git a/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 similarity index 100% rename from testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/snapshots/00000000.snapshot.lz4 rename to testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 diff --git a/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/snapshots/00000001.snapshot.lz4 b/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 similarity index 100% rename from testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/snapshots/00000001.snapshot.lz4 rename to testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 diff --git a/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 similarity index 100% rename from testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/wal/00000000/00000000.wal.lz4 rename to testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 diff --git a/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/wal/00000000/00001234.wal.lz4 b/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 similarity index 100% rename from testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/wal/00000000/00001234.wal.lz4 rename to testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 diff --git a/testdata/max-snapshot-index/ok/Makefile b/testdata/max-snapshot-index/ok/Makefile index d7b4d6c7..47e186bd 100644 --- a/testdata/max-snapshot-index/ok/Makefile +++ b/testdata/max-snapshot-index/ok/Makefile @@ -1,6 +1,6 @@ .PHONY: default default: - TZ=UTC touch -ct 200001020000 generations/0000000000000000/snapshots/00000000.snapshot.lz4 - TZ=UTC touch -ct 200001010000 generations/0000000000000001/snapshots/00000000.snapshot.lz4 - TZ=UTC touch -ct 200001030000 generations/0000000000000001/snapshots/00000001.snapshot.lz4 - TZ=UTC touch -ct 200001010000 generations/0000000000000002/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001010000 generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001030000 generations/0000000000000001/snapshots/0000000000000001.snapshot.lz4 + TZ=UTC touch -ct 200001010000 generations/0000000000000002/snapshots/0000000000000000.snapshot.lz4 diff --git a/testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 similarity index 100% rename from testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 rename to testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 diff --git a/testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/000003e8.snapshot.lz4 b/testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/00000000000003e8.snapshot.lz4 similarity index 100% rename from testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/000003e8.snapshot.lz4 rename to testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/00000000000003e8.snapshot.lz4 diff --git a/testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/000007d0.snapshot.lz4 b/testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/00000000000007d0.snapshot.lz4 similarity index 100% rename from testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/000007d0.snapshot.lz4 rename to testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/00000000000007d0.snapshot.lz4 diff --git a/testdata/max-wal-index/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/testdata/max-wal-index/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 similarity index 100% rename from testdata/max-wal-index/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 rename to testdata/max-wal-index/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 diff --git a/testdata/max-wal-index/ok/generations/0000000000000000/wal/00000000/00001234.wal.lz4 b/testdata/max-wal-index/ok/generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 similarity index 100% rename from testdata/max-wal-index/ok/generations/0000000000000000/wal/00000000/00001234.wal.lz4 rename to testdata/max-wal-index/ok/generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 diff --git a/testdata/max-wal-index/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 b/testdata/max-wal-index/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 similarity index 100% rename from testdata/max-wal-index/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 rename to testdata/max-wal-index/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 diff --git a/testdata/replica-client-time-bounds/ok/Makefile b/testdata/replica-client-time-bounds/ok/Makefile index d7b4d6c7..47e186bd 100644 --- a/testdata/replica-client-time-bounds/ok/Makefile +++ b/testdata/replica-client-time-bounds/ok/Makefile @@ -1,6 +1,6 @@ .PHONY: default default: - TZ=UTC touch -ct 200001020000 generations/0000000000000000/snapshots/00000000.snapshot.lz4 - TZ=UTC touch -ct 200001010000 generations/0000000000000001/snapshots/00000000.snapshot.lz4 - TZ=UTC touch -ct 200001030000 generations/0000000000000001/snapshots/00000001.snapshot.lz4 - TZ=UTC touch -ct 200001010000 generations/0000000000000002/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001010000 generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001030000 generations/0000000000000001/snapshots/0000000000000001.snapshot.lz4 + TZ=UTC touch -ct 200001010000 generations/0000000000000002/snapshots/0000000000000000.snapshot.lz4 diff --git a/testdata/replica-client-time-bounds/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/replica-client-time-bounds/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 similarity index 100% rename from testdata/replica-client-time-bounds/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 rename to testdata/replica-client-time-bounds/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 diff --git a/testdata/replica-client-time-bounds/ok/generations/0000000000000001/snapshots/00000000.snapshot.lz4 b/testdata/replica-client-time-bounds/ok/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 similarity index 100% rename from testdata/replica-client-time-bounds/ok/generations/0000000000000001/snapshots/00000000.snapshot.lz4 rename to testdata/replica-client-time-bounds/ok/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 diff --git a/testdata/replica-client-time-bounds/ok/generations/0000000000000001/snapshots/00000001.snapshot.lz4 b/testdata/replica-client-time-bounds/ok/generations/0000000000000001/snapshots/0000000000000001.snapshot.lz4 similarity index 100% rename from testdata/replica-client-time-bounds/ok/generations/0000000000000001/snapshots/00000001.snapshot.lz4 rename to testdata/replica-client-time-bounds/ok/generations/0000000000000001/snapshots/0000000000000001.snapshot.lz4 diff --git a/testdata/replica-client-time-bounds/ok/generations/0000000000000002/snapshots/00000000.snapshot.lz4 b/testdata/replica-client-time-bounds/ok/generations/0000000000000002/snapshots/0000000000000000.snapshot.lz4 similarity index 100% rename from testdata/replica-client-time-bounds/ok/generations/0000000000000002/snapshots/00000000.snapshot.lz4 rename to testdata/replica-client-time-bounds/ok/generations/0000000000000002/snapshots/0000000000000000.snapshot.lz4 diff --git a/testdata/restore/bad-permissions/00000000.db b/testdata/restore/bad-permissions/0000000000000000.db similarity index 100% rename from testdata/restore/bad-permissions/00000000.db rename to testdata/restore/bad-permissions/0000000000000000.db diff --git a/testdata/restore/bad-permissions/README b/testdata/restore/bad-permissions/README index 9450f45a..48c0fd4e 100644 --- a/testdata/restore/bad-permissions/README +++ b/testdata/restore/bad-permissions/README @@ -5,9 +5,9 @@ To reproduce this testdata, run sqlite3 and execute: INSERT INTO t (x) VALUES (1); INSERT INTO t (x) VALUES (2); - sl3 split -o generations/0000000000000000/wal/00000000 db-wal - cp db generations/0000000000000000/snapshots/00000000.snapshot - lz4 -c --rm generations/0000000000000000/snapshots/00000000.snapshot + sl3 split -o generations/0000000000000000/wal/0000000000000000 db-wal + cp db generations/0000000000000000/snapshots/0000000000000000.snapshot + lz4 -c --rm generations/0000000000000000/snapshots/0000000000000000.snapshot Then execute: @@ -15,7 +15,7 @@ Then execute: PRAGMA wal_checkpoint(TRUNCATE); INSERT INTO t (x) VALUES (3); - sl3 split -o generations/0000000000000000/wal/00000001 db-wal + sl3 split -o generations/0000000000000000/wal/0000000000000001 db-wal Then execute: @@ -24,13 +24,13 @@ Then execute: INSERT INTO t (x) VALUES (4); INSERT INTO t (x) VALUES (5); - sl3 split -o generations/0000000000000000/wal/00000002 db-wal + sl3 split -o generations/0000000000000000/wal/0000000000000002 db-wal Finally, obtain the final snapshot: PRAGMA wal_checkpoint(TRUNCATE); - cp db 00000002.db + cp db 0000000000000002.db rm db* diff --git a/testdata/restore/bad-permissions/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/restore/bad-permissions/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 similarity index 100% rename from testdata/restore/bad-permissions/generations/0000000000000000/snapshots/00000000.snapshot.lz4 rename to testdata/restore/bad-permissions/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 diff --git a/testdata/restore/ok/00000002.db b/testdata/restore/ok/0000000000000002.db similarity index 100% rename from testdata/restore/ok/00000002.db rename to testdata/restore/ok/0000000000000002.db diff --git a/testdata/restore/ok/README b/testdata/restore/ok/README index 9450f45a..48c0fd4e 100644 --- a/testdata/restore/ok/README +++ b/testdata/restore/ok/README @@ -5,9 +5,9 @@ To reproduce this testdata, run sqlite3 and execute: INSERT INTO t (x) VALUES (1); INSERT INTO t (x) VALUES (2); - sl3 split -o generations/0000000000000000/wal/00000000 db-wal - cp db generations/0000000000000000/snapshots/00000000.snapshot - lz4 -c --rm generations/0000000000000000/snapshots/00000000.snapshot + sl3 split -o generations/0000000000000000/wal/0000000000000000 db-wal + cp db generations/0000000000000000/snapshots/0000000000000000.snapshot + lz4 -c --rm generations/0000000000000000/snapshots/0000000000000000.snapshot Then execute: @@ -15,7 +15,7 @@ Then execute: PRAGMA wal_checkpoint(TRUNCATE); INSERT INTO t (x) VALUES (3); - sl3 split -o generations/0000000000000000/wal/00000001 db-wal + sl3 split -o generations/0000000000000000/wal/0000000000000001 db-wal Then execute: @@ -24,13 +24,13 @@ Then execute: INSERT INTO t (x) VALUES (4); INSERT INTO t (x) VALUES (5); - sl3 split -o generations/0000000000000000/wal/00000002 db-wal + sl3 split -o generations/0000000000000000/wal/0000000000000002 db-wal Finally, obtain the final snapshot: PRAGMA wal_checkpoint(TRUNCATE); - cp db 00000002.db + cp db 0000000000000002.db rm db* diff --git a/testdata/restore/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/restore/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 similarity index 100% rename from testdata/restore/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 rename to testdata/restore/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 diff --git a/testdata/restore/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/testdata/restore/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 similarity index 100% rename from testdata/restore/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 rename to testdata/restore/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 diff --git a/testdata/restore/ok/generations/0000000000000000/wal/00000000/00002050.wal.lz4 b/testdata/restore/ok/generations/0000000000000000/wal/0000000000000000/0000000000002050.wal.lz4 similarity index 100% rename from testdata/restore/ok/generations/0000000000000000/wal/00000000/00002050.wal.lz4 rename to testdata/restore/ok/generations/0000000000000000/wal/0000000000000000/0000000000002050.wal.lz4 diff --git a/testdata/restore/ok/generations/0000000000000000/wal/00000000/00003068.wal.lz4 b/testdata/restore/ok/generations/0000000000000000/wal/0000000000000000/0000000000003068.wal.lz4 similarity index 100% rename from testdata/restore/ok/generations/0000000000000000/wal/00000000/00003068.wal.lz4 rename to testdata/restore/ok/generations/0000000000000000/wal/0000000000000000/0000000000003068.wal.lz4 diff --git a/testdata/restore/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 b/testdata/restore/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 similarity index 100% rename from testdata/restore/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 rename to testdata/restore/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 diff --git a/testdata/restore/ok/generations/0000000000000000/wal/00000002/00000000.wal.lz4 b/testdata/restore/ok/generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 similarity index 100% rename from testdata/restore/ok/generations/0000000000000000/wal/00000002/00000000.wal.lz4 rename to testdata/restore/ok/generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 diff --git a/testdata/restore/ok/generations/0000000000000000/wal/00000002/00001038.wal.lz4 b/testdata/restore/ok/generations/0000000000000000/wal/0000000000000002/0000000000001038.wal.lz4 similarity index 100% rename from testdata/restore/ok/generations/0000000000000000/wal/00000002/00001038.wal.lz4 rename to testdata/restore/ok/generations/0000000000000000/wal/0000000000000002/0000000000001038.wal.lz4 diff --git a/testdata/restore/snapshot-only/00000000.db b/testdata/restore/snapshot-only/0000000000000000.db similarity index 100% rename from testdata/restore/snapshot-only/00000000.db rename to testdata/restore/snapshot-only/0000000000000000.db diff --git a/testdata/restore/snapshot-only/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/restore/snapshot-only/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 similarity index 100% rename from testdata/restore/snapshot-only/generations/0000000000000000/snapshots/00000000.snapshot.lz4 rename to testdata/restore/snapshot-only/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 diff --git a/testdata/snapshot-time-bounds/ok/Makefile b/testdata/snapshot-time-bounds/ok/Makefile index 6c7e69a0..87751339 100644 --- a/testdata/snapshot-time-bounds/ok/Makefile +++ b/testdata/snapshot-time-bounds/ok/Makefile @@ -1,6 +1,6 @@ .PHONY: default default: - TZ=UTC touch -ct 200001010000 generations/0000000000000000/snapshots/00000000.snapshot.lz4 - TZ=UTC touch -ct 200001020000 generations/0000000000000000/snapshots/00000001.snapshot.lz4 - TZ=UTC touch -ct 200001030000 generations/0000000000000000/snapshots/00000002.snapshot.lz4 + TZ=UTC touch -ct 200001010000 generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 + TZ=UTC touch -ct 200001030000 generations/0000000000000000/snapshots/0000000000000002.snapshot.lz4 diff --git a/testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 similarity index 100% rename from testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 rename to testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 diff --git a/testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/00000001.snapshot.lz4 b/testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 similarity index 100% rename from testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/00000001.snapshot.lz4 rename to testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 diff --git a/testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/00000002.snapshot.lz4 b/testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/0000000000000002.snapshot.lz4 similarity index 100% rename from testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/00000002.snapshot.lz4 rename to testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/0000000000000002.snapshot.lz4 diff --git a/testdata/wal-downloader/err-download-wal/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/testdata/wal-downloader/err-download-wal/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 similarity index 100% rename from testdata/wal-downloader/err-download-wal/generations/0000000000000000/wal/00000000/00000000.wal.lz4 rename to testdata/wal-downloader/err-download-wal/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 diff --git a/testdata/wal-downloader/err-read-wal-segment/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/testdata/wal-downloader/err-read-wal-segment/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 similarity index 100% rename from testdata/wal-downloader/err-read-wal-segment/generations/0000000000000000/wal/00000000/00000000.wal.lz4 rename to testdata/wal-downloader/err-read-wal-segment/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 diff --git a/testdata/wal-downloader/err-write-wal/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/testdata/wal-downloader/err-write-wal/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 similarity index 100% rename from testdata/wal-downloader/err-write-wal/generations/0000000000000000/wal/00000000/00000000.wal.lz4 rename to testdata/wal-downloader/err-write-wal/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 diff --git a/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 similarity index 100% rename from testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/00000000/00000000.wal.lz4 rename to testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 diff --git a/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/00000000/00002050.wal.lz4 b/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/0000000000000000/0000000000002050.wal.lz4 similarity index 100% rename from testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/00000000/00002050.wal.lz4 rename to testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/0000000000000000/0000000000002050.wal.lz4 diff --git a/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/00000000/00003068.wal.lz4 b/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/0000000000000000/0000000000003068.wal.lz4 similarity index 100% rename from testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/00000000/00003068.wal.lz4 rename to testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/0000000000000000/0000000000003068.wal.lz4 diff --git a/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/00000001/00000000.wal.lz4 b/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 similarity index 100% rename from testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/00000001/00000000.wal.lz4 rename to testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 diff --git a/testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/00000001/00000000.wal.lz4 b/testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 similarity index 100% rename from testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/00000001/00000000.wal.lz4 rename to testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 diff --git a/testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/00000002/00000000.wal.lz4 b/testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 similarity index 100% rename from testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/00000002/00000000.wal.lz4 rename to testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 diff --git a/testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/00000002/00001038.wal.lz4 b/testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/0000000000000002/0000000000001038.wal.lz4 similarity index 100% rename from testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/00000002/00001038.wal.lz4 rename to testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/0000000000000002/0000000000001038.wal.lz4 diff --git a/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 similarity index 100% rename from testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/00000000/00000000.wal.lz4 rename to testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 diff --git a/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/00000000/00002050.wal.lz4 b/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/0000000000000000/0000000000002050.wal.lz4 similarity index 100% rename from testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/00000000/00002050.wal.lz4 rename to testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/0000000000000000/0000000000002050.wal.lz4 diff --git a/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/00000000/00003068.wal.lz4 b/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/0000000000000000/0000000000003068.wal.lz4 similarity index 100% rename from testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/00000000/00003068.wal.lz4 rename to testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/0000000000000000/0000000000003068.wal.lz4 diff --git a/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/00000002/00000000.wal.lz4 b/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 similarity index 100% rename from testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/00000002/00000000.wal.lz4 rename to testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 diff --git a/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/00000002/00001038.wal.lz4 b/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/0000000000000002/0000000000001038.wal.lz4 similarity index 100% rename from testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/00000002/00001038.wal.lz4 rename to testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/0000000000000002/0000000000001038.wal.lz4 diff --git a/testdata/wal-downloader/missing-offset/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/testdata/wal-downloader/missing-offset/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 similarity index 100% rename from testdata/wal-downloader/missing-offset/generations/0000000000000000/wal/00000000/00000000.wal.lz4 rename to testdata/wal-downloader/missing-offset/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 diff --git a/testdata/wal-downloader/missing-offset/generations/0000000000000000/wal/00000000/00003068.wal.lz4 b/testdata/wal-downloader/missing-offset/generations/0000000000000000/wal/0000000000000000/0000000000003068.wal.lz4 similarity index 100% rename from testdata/wal-downloader/missing-offset/generations/0000000000000000/wal/00000000/00003068.wal.lz4 rename to testdata/wal-downloader/missing-offset/generations/0000000000000000/wal/0000000000000000/0000000000003068.wal.lz4 diff --git a/testdata/wal-downloader/ok/00000000.wal b/testdata/wal-downloader/ok/0000000000000000.wal similarity index 100% rename from testdata/wal-downloader/ok/00000000.wal rename to testdata/wal-downloader/ok/0000000000000000.wal diff --git a/testdata/wal-downloader/ok/00000001.wal b/testdata/wal-downloader/ok/0000000000000001.wal similarity index 100% rename from testdata/wal-downloader/ok/00000001.wal rename to testdata/wal-downloader/ok/0000000000000001.wal diff --git a/testdata/wal-downloader/ok/00000002.wal b/testdata/wal-downloader/ok/0000000000000002.wal similarity index 100% rename from testdata/wal-downloader/ok/00000002.wal rename to testdata/wal-downloader/ok/0000000000000002.wal diff --git a/testdata/wal-downloader/ok/README b/testdata/wal-downloader/ok/README index 63eda968..c4e68f69 100644 --- a/testdata/wal-downloader/ok/README +++ b/testdata/wal-downloader/ok/README @@ -7,8 +7,8 @@ To reproduce this testdata, run sqlite3 and execute: And copy & split the WAL into segments: - sl3 split -o generations/0000000000000000/wal/00000000 db-wal - cp db-wal 00000000.wal + sl3 split -o generations/0000000000000000/wal/0000000000000000 db-wal + cp db-wal 0000000000000000.wal Then execute: @@ -18,8 +18,8 @@ Then execute: And split again: - sl3 split -o generations/0000000000000000/wal/00000001 db-wal - cp db-wal 00000001.wal + sl3 split -o generations/0000000000000000/wal/0000000000000001 db-wal + cp db-wal 0000000000000001.wal Then execute: @@ -30,8 +30,8 @@ Then execute: And split again: - sl3 split -o generations/0000000000000000/wal/00000002 db-wal - cp db-wal 00000002.wal + sl3 split -o generations/0000000000000000/wal/0000000000000002 db-wal + cp db-wal 0000000000000002.wal Finally, remove the original database files: diff --git a/testdata/wal-downloader/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/testdata/wal-downloader/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 similarity index 100% rename from testdata/wal-downloader/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 rename to testdata/wal-downloader/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 diff --git a/testdata/wal-downloader/ok/generations/0000000000000000/wal/00000000/00002050.wal.lz4 b/testdata/wal-downloader/ok/generations/0000000000000000/wal/0000000000000000/0000000000002050.wal.lz4 similarity index 100% rename from testdata/wal-downloader/ok/generations/0000000000000000/wal/00000000/00002050.wal.lz4 rename to testdata/wal-downloader/ok/generations/0000000000000000/wal/0000000000000000/0000000000002050.wal.lz4 diff --git a/testdata/wal-downloader/ok/generations/0000000000000000/wal/00000000/00003068.wal.lz4 b/testdata/wal-downloader/ok/generations/0000000000000000/wal/0000000000000000/0000000000003068.wal.lz4 similarity index 100% rename from testdata/wal-downloader/ok/generations/0000000000000000/wal/00000000/00003068.wal.lz4 rename to testdata/wal-downloader/ok/generations/0000000000000000/wal/0000000000000000/0000000000003068.wal.lz4 diff --git a/testdata/wal-downloader/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 b/testdata/wal-downloader/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 similarity index 100% rename from testdata/wal-downloader/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 rename to testdata/wal-downloader/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 diff --git a/testdata/wal-downloader/ok/generations/0000000000000000/wal/00000002/00000000.wal.lz4 b/testdata/wal-downloader/ok/generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 similarity index 100% rename from testdata/wal-downloader/ok/generations/0000000000000000/wal/00000002/00000000.wal.lz4 rename to testdata/wal-downloader/ok/generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 diff --git a/testdata/wal-downloader/ok/generations/0000000000000000/wal/00000002/00001038.wal.lz4 b/testdata/wal-downloader/ok/generations/0000000000000000/wal/0000000000000002/0000000000001038.wal.lz4 similarity index 100% rename from testdata/wal-downloader/ok/generations/0000000000000000/wal/00000002/00001038.wal.lz4 rename to testdata/wal-downloader/ok/generations/0000000000000000/wal/0000000000000002/0000000000001038.wal.lz4 diff --git a/testdata/wal-downloader/one/00000000.wal b/testdata/wal-downloader/one/0000000000000000.wal similarity index 100% rename from testdata/wal-downloader/one/00000000.wal rename to testdata/wal-downloader/one/0000000000000000.wal diff --git a/testdata/wal-downloader/one/README b/testdata/wal-downloader/one/README index afe550f2..bcad8cf8 100644 --- a/testdata/wal-downloader/one/README +++ b/testdata/wal-downloader/one/README @@ -7,8 +7,8 @@ To reproduce this testdata, run sqlite3 and execute: And copy & split the WAL into segments: - sl3 split -o generations/0000000000000000/wal/00000000 db-wal - cp db-wal 00000000.wal + sl3 split -o generations/0000000000000000/wal/0000000000000000 db-wal + cp db-wal 0000000000000000.wal Finally, remove the original database files: diff --git a/testdata/wal-downloader/one/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/testdata/wal-downloader/one/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 similarity index 100% rename from testdata/wal-downloader/one/generations/0000000000000000/wal/00000000/00000000.wal.lz4 rename to testdata/wal-downloader/one/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 diff --git a/testdata/wal-downloader/one/generations/0000000000000000/wal/00000000/00002050.wal.lz4 b/testdata/wal-downloader/one/generations/0000000000000000/wal/0000000000000000/0000000000002050.wal.lz4 similarity index 100% rename from testdata/wal-downloader/one/generations/0000000000000000/wal/00000000/00002050.wal.lz4 rename to testdata/wal-downloader/one/generations/0000000000000000/wal/0000000000000000/0000000000002050.wal.lz4 diff --git a/testdata/wal-downloader/one/generations/0000000000000000/wal/00000000/00003068.wal.lz4 b/testdata/wal-downloader/one/generations/0000000000000000/wal/0000000000000000/0000000000003068.wal.lz4 similarity index 100% rename from testdata/wal-downloader/one/generations/0000000000000000/wal/00000000/00003068.wal.lz4 rename to testdata/wal-downloader/one/generations/0000000000000000/wal/0000000000000000/0000000000003068.wal.lz4 diff --git a/testdata/wal-time-bounds/ok/Makefile b/testdata/wal-time-bounds/ok/Makefile index fa7ab332..155d281d 100644 --- a/testdata/wal-time-bounds/ok/Makefile +++ b/testdata/wal-time-bounds/ok/Makefile @@ -1,6 +1,6 @@ .PHONY: default default: - TZ=UTC touch -ct 200001010000 generations/0000000000000000/wal/00000000/00000000.wal.lz4 - TZ=UTC touch -ct 200001020000 generations/0000000000000000/wal/00000000/00000001.wal.lz4 - TZ=UTC touch -ct 200001030000 generations/0000000000000000/wal/00000001/00000000.wal.lz4 + TZ=UTC touch -ct 200001010000 generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 + TZ=UTC touch -ct 200001020000 generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 + TZ=UTC touch -ct 200001030000 generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 diff --git a/testdata/wal-time-bounds/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/testdata/wal-time-bounds/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 similarity index 100% rename from testdata/wal-time-bounds/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 rename to testdata/wal-time-bounds/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 diff --git a/testdata/wal-time-bounds/ok/generations/0000000000000000/wal/00000000/00000001.wal.lz4 b/testdata/wal-time-bounds/ok/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 similarity index 100% rename from testdata/wal-time-bounds/ok/generations/0000000000000000/wal/00000000/00000001.wal.lz4 rename to testdata/wal-time-bounds/ok/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 diff --git a/testdata/wal-time-bounds/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 b/testdata/wal-time-bounds/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 similarity index 100% rename from testdata/wal-time-bounds/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 rename to testdata/wal-time-bounds/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 diff --git a/wal_downloader.go b/wal_downloader.go index b87c2454..c4903844 100644 --- a/wal_downloader.go +++ b/wal_downloader.go @@ -272,7 +272,7 @@ func (d *WALDownloader) downloader(ctx context.Context) error { // of the on-disk file on success. func (d *WALDownloader) downloadWAL(ctx context.Context, index int, offsets []int64) (string, error) { // Open handle to destination WAL path. - walPath := fmt.Sprintf("%s-%08x-wal", d.prefix, index) + walPath := fmt.Sprintf("%s-%s-wal", d.prefix, FormatIndex(index)) f, err := internal.CreateFile(walPath, d.Mode, d.Uid, d.Gid) if err != nil { return "", err @@ -285,7 +285,7 @@ func (d *WALDownloader) downloadWAL(ctx context.Context, index int, offsets []in if err := func() error { // Ensure next offset is our current position in the file. if written != offset { - return fmt.Errorf("missing WAL offset: generation=%s index=%08x offset=%08x", d.generation, index, written) + return fmt.Errorf("missing WAL offset: generation=%s index=%s offset=%s", d.generation, FormatIndex(index), FormatOffset(written)) } rd, err := d.client.WALSegmentReader(ctx, Pos{Generation: d.generation, Index: index, Offset: offset}) @@ -331,5 +331,5 @@ type WALNotFoundError struct { // Error returns the error string. func (e *WALNotFoundError) Error() string { - return fmt.Sprintf("wal not found: generation=%s index=%08x", e.Generation, e.Index) + return fmt.Sprintf("wal not found: generation=%s index=%s", e.Generation, FormatIndex(e.Index)) } diff --git a/wal_downloader_test.go b/wal_downloader_test.go index fd1817f9..f43ff177 100644 --- a/wal_downloader_test.go +++ b/wal_downloader_test.go @@ -38,7 +38,7 @@ func testWALDownloader(t *testing.T, parallelism int) { t.Fatal(err) } else if got, want := index, 0; got != want { t.Fatalf("index=%d, want %d", got, want) - } else if !fileEqual(t, filepath.Join(testDir, "00000000.wal"), filename) { + } else if !fileEqual(t, filepath.Join(testDir, "0000000000000000.wal"), filename) { t.Fatalf("output file mismatch: %s", filename) } @@ -46,7 +46,7 @@ func testWALDownloader(t *testing.T, parallelism int) { t.Fatal(err) } else if got, want := index, 1; got != want { t.Fatalf("index=%d, want %d", got, want) - } else if !fileEqual(t, filepath.Join(testDir, "00000001.wal"), filename) { + } else if !fileEqual(t, filepath.Join(testDir, "0000000000000001.wal"), filename) { t.Fatalf("output file mismatch: %s", filename) } @@ -54,7 +54,7 @@ func testWALDownloader(t *testing.T, parallelism int) { t.Fatal(err) } else if got, want := index, 2; got != want { t.Fatalf("index=%d, want %d", got, want) - } else if !fileEqual(t, filepath.Join(testDir, "00000002.wal"), filename) { + } else if !fileEqual(t, filepath.Join(testDir, "0000000000000002.wal"), filename) { t.Fatalf("output file mismatch: %s", filename) } @@ -82,7 +82,7 @@ func testWALDownloader(t *testing.T, parallelism int) { t.Fatal(err) } else if got, want := index, 0; got != want { t.Fatalf("index=%d, want %d", got, want) - } else if !fileEqual(t, filepath.Join(testDir, "00000000.wal"), filename) { + } else if !fileEqual(t, filepath.Join(testDir, "0000000000000000.wal"), filename) { t.Fatalf("output file mismatch: %s", filename) } @@ -106,7 +106,7 @@ func testWALDownloader(t *testing.T, parallelism int) { t.Fatal(err) } else if got, want := index, 1; got != want { t.Fatalf("index=%d, want %d", got, want) - } else if !fileEqual(t, filepath.Join(testDir, "00000001.wal"), filename) { + } else if !fileEqual(t, filepath.Join(testDir, "0000000000000001.wal"), filename) { t.Fatalf("output file mismatch: %s", filename) } @@ -130,7 +130,7 @@ func testWALDownloader(t *testing.T, parallelism int) { t.Fatal(err) } else if got, want := index, 0; got != want { t.Fatalf("index=%d, want %d", got, want) - } else if !fileEqual(t, filepath.Join(testDir, "00000000.wal"), filename) { + } else if !fileEqual(t, filepath.Join(testDir, "0000000000000000.wal"), filename) { t.Fatalf("output file mismatch: %s", filename) } @@ -138,7 +138,7 @@ func testWALDownloader(t *testing.T, parallelism int) { t.Fatal(err) } else if got, want := index, 1; got != want { t.Fatalf("index=%d, want %d", got, want) - } else if !fileEqual(t, filepath.Join(testDir, "00000001.wal"), filename) { + } else if !fileEqual(t, filepath.Join(testDir, "0000000000000001.wal"), filename) { t.Fatalf("output file mismatch: %s", filename) } @@ -162,7 +162,7 @@ func testWALDownloader(t *testing.T, parallelism int) { t.Fatal(err) } else if got, want := index, 1; got != want { t.Fatalf("index=%d, want %d", got, want) - } else if !fileEqual(t, filepath.Join(testDir, "00000001.wal"), filename) { + } else if !fileEqual(t, filepath.Join(testDir, "0000000000000001.wal"), filename) { t.Fatalf("output file mismatch: %s", filename) } @@ -170,7 +170,7 @@ func testWALDownloader(t *testing.T, parallelism int) { t.Fatal(err) } else if got, want := index, 2; got != want { t.Fatalf("index=%d, want %d", got, want) - } else if !fileEqual(t, filepath.Join(testDir, "00000002.wal"), filename) { + } else if !fileEqual(t, filepath.Join(testDir, "0000000000000002.wal"), filename) { t.Fatalf("output file mismatch: %s", filename) } @@ -191,7 +191,7 @@ func testWALDownloader(t *testing.T, parallelism int) { const n = 1000 tempDir := t.TempDir() for i := 0; i < n; i++ { - filename := filepath.Join(tempDir, "generations", "0000000000000000", "wal", fmt.Sprintf("%08x", i), "00000000.wal.lz4") + filename := filepath.Join(tempDir, "generations", "0000000000000000", "wal", litestream.FormatIndex(i), "0000000000000000.wal.lz4") if err := os.MkdirAll(filepath.Dir(filename), 0777); err != nil { t.Fatal(err) } else if err := os.WriteFile(filename, testingutil.CompressLZ4(t, []byte(fmt.Sprint(i))), 0666); err != nil { @@ -316,7 +316,7 @@ func testWALDownloader(t *testing.T, parallelism int) { t.Fatal(err) } else if got, want := index, 0; got != want { t.Fatalf("index=%d, want %d", got, want) - } else if !fileEqual(t, filepath.Join(testDir, "00000000.wal"), filename) { + } else if !fileEqual(t, filepath.Join(testDir, "0000000000000000.wal"), filename) { t.Fatalf("output file mismatch: %s", filename) } @@ -441,7 +441,7 @@ func testWALDownloader(t *testing.T, parallelism int) { d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 0, 0) defer d.Close() - if _, _, err := d.Next(context.Background()); err == nil || err.Error() != `missing WAL offset: generation=0000000000000000 index=00000000 offset=00002050` { + if _, _, err := d.Next(context.Background()); err == nil || err.Error() != `missing WAL offset: generation=0000000000000000 index=0000000000000000 offset=0000000000002050` { t.Fatal(err) } else if err := d.Close(); err != nil { t.Fatal(err) @@ -529,7 +529,7 @@ func testWALDownloader(t *testing.T, parallelism int) { func TestWALNotFoundError(t *testing.T) { err := &litestream.WALNotFoundError{Generation: "0123456789abcdef", Index: 1000} - if got, want := err.Error(), `wal not found: generation=0123456789abcdef index=000003e8`; got != want { + if got, want := err.Error(), `wal not found: generation=0123456789abcdef index=00000000000003e8`; got != want { t.Fatalf("Error()=%q, want %q", got, want) } } From 85891117177d8ab0793a2e45fc03661bdc10f0dc Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Fri, 11 Feb 2022 13:43:50 -0700 Subject: [PATCH 53/95] Implement streaming WAL segment iterator Currently, WALSegmentIterator implementations read to the end of the end of their list of segments and return EOF. This commit adds the ability to push additional segments to in-process iterators and notify their callers that new segments are available. This is only implemented for the file-based iterator but other segment iterators may get this implementation in the future or have a wrapping iterator provide a polling-based implementation. --- db.go | 113 +++++++++++++++++++-------- file_replica_client.go | 147 +++++++++++++++++++++++++++++++++--- file_replica_client_test.go | 131 ++++++++++++++++++++++++++++++++ litestream.go | 10 ++- replica.go | 74 +++++++++++------- replica_client_test.go | 2 - 6 files changed, 400 insertions(+), 77 deletions(-) diff --git a/db.go b/db.go index a8c59dec..7821f62c 100644 --- a/db.go +++ b/db.go @@ -45,15 +45,17 @@ const BusyTimeout = 1 * time.Second // DB represents a managed instance of a SQLite database in the file system. type DB struct { - mu sync.RWMutex - path string // part to database - db *sql.DB // target database - f *os.File // long-running db file descriptor - rtx *sql.Tx // long running read transaction - pos Pos // cached position - pageSize int // page size, in bytes - notifyCh chan struct{} // notifies DB of changes - walNotify chan struct{} // closes on WAL change + mu sync.RWMutex + path string // part to database + db *sql.DB // target database + f *os.File // long-running db file descriptor + rtx *sql.Tx // long running read transaction + pos Pos // cached position + pageSize int // page size, in bytes + notifyCh chan struct{} // notifies DB of changes + + // Iterators used to stream new WAL changes to replicas + itrs map[*FileWALSegmentIterator]struct{} // Cached salt & checksum from current shadow header. hdr []byte @@ -111,9 +113,10 @@ type DB struct { // NewDB returns a new instance of DB for a given path. func NewDB(path string) *DB { db := &DB{ - path: path, - notifyCh: make(chan struct{}, 1), - walNotify: make(chan struct{}), + path: path, + notifyCh: make(chan struct{}, 1), + + itrs: make(map[*FileWALSegmentIterator]struct{}), MinCheckpointPageN: DefaultMinCheckpointPageN, MaxCheckpointPageN: DefaultMaxCheckpointPageN, @@ -245,7 +248,7 @@ func (db *DB) invalidatePos(ctx context.Context) error { } // Iterate over all segments to find the last one. - itr, err := db.WALSegments(context.Background(), generation) + itr, err := db.walSegments(context.Background(), generation, false) if err != nil { return err } @@ -363,13 +366,6 @@ func (db *DB) NotifyCh() chan<- struct{} { return db.notifyCh } -// WALNotify returns a channel that closes when the shadow WAL changes. -func (db *DB) WALNotify() <-chan struct{} { - db.mu.RLock() - defer db.mu.RUnlock() - return db.walNotify -} - // PageSize returns the page size of the underlying database. // Only valid after database exists & Init() has successfully run. func (db *DB) PageSize() int { @@ -440,6 +436,14 @@ func (db *DB) Close() (err error) { } } + // Remove all iterators. + db.mu.Lock() + for itr := range db.itrs { + itr.SetErr(ErrDBClosed) + delete(db.itrs, itr) + } + db.mu.Unlock() + // Release the read lock to allow other applications to handle checkpointing. if db.rtx != nil { if e := db.releaseReadLock(); e != nil && err == nil { @@ -833,7 +837,6 @@ func (db *DB) sync(ctx context.Context) (err error) { return fmt.Errorf("invalidate: %w", err) } } - origPos := db.pos // If sync fails, reset position & cache. defer func() { @@ -934,12 +937,6 @@ func (db *DB) sync(ctx context.Context) (err error) { db.shadowWALIndexGauge.Set(float64(db.pos.Index)) db.shadowWALSizeGauge.Set(float64(db.pos.Offset)) - // Notify replicas of WAL changes. - if db.pos != origPos { - close(db.walNotify) - db.walNotify = make(chan struct{}) - } - return nil } @@ -1263,7 +1260,8 @@ func (db *DB) writeWALSegment(ctx context.Context, pos Pos, rd io.Reader) error } defer f.Close() - if _, err := io.Copy(f, rd); err != nil { + n, err := io.Copy(f, rd) + if err != nil { return err } else if err := f.Sync(); err != nil { return err @@ -1276,14 +1274,47 @@ func (db *DB) writeWALSegment(ctx context.Context, pos Pos, rd io.Reader) error return err } + // Generate + info := WALSegmentInfo{ + Generation: pos.Generation, + Index: pos.Index, + Offset: pos.Offset, + Size: n, + CreatedAt: time.Now(), + } + + // Notify all managed segment iterators. + for itr := range db.itrs { + // Notify iterators of generation change. + if itr.Generation() != pos.Generation { + itr.SetErr(ErrGenerationChanged) + delete(db.itrs, itr) + continue + } + + // Attempt to append segment to end of iterator. + // On error, mark it on the iterator and remove from future notifications. + if err := itr.Append(info); err != nil { + itr.SetErr(fmt.Errorf("cannot append wal segment: %w", err)) + delete(db.itrs, itr) + continue + } + } + return nil } // WALSegments returns an iterator over all available WAL files for a generation. -func (db *DB) WALSegments(ctx context.Context, generation string) (WALSegmentIterator, error) { +func (db *DB) WALSegments(ctx context.Context, generation string) (*FileWALSegmentIterator, error) { + db.mu.Lock() + defer db.mu.Unlock() + return db.walSegments(ctx, generation, true) +} + +func (db *DB) walSegments(ctx context.Context, generation string, managed bool) (*FileWALSegmentIterator, error) { ents, err := os.ReadDir(db.ShadowWALDir(generation)) if os.IsNotExist(err) { - return NewWALSegmentInfoSliceIterator(nil), nil + return NewFileWALSegmentIterator(db.ShadowWALDir(generation), generation, nil), nil } else if err != nil { return nil, err } @@ -1300,7 +1331,27 @@ func (db *DB) WALSegments(ctx context.Context, generation string) (WALSegmentIte sort.Ints(indexes) - return newFileWALSegmentIterator(db.ShadowWALDir(generation), generation, indexes), nil + itr := NewFileWALSegmentIterator(db.ShadowWALDir(generation), generation, indexes) + + // Managed iterators will have new segments pushed to them. + if managed { + itr.closeFunc = func() error { + return db.CloseWALSegmentIterator(itr) + } + + db.itrs[itr] = struct{}{} + } + + return itr, nil +} + +// CloseWALSegmentIterator removes itr from the list of managed iterators. +func (db *DB) CloseWALSegmentIterator(itr *FileWALSegmentIterator) error { + db.mu.Lock() + defer db.mu.Unlock() + + delete(db.itrs, itr) + return nil } // SQLite WAL constants diff --git a/file_replica_client.go b/file_replica_client.go index 2eef73ad..dc323a3a 100644 --- a/file_replica_client.go +++ b/file_replica_client.go @@ -9,6 +9,7 @@ import ( "path/filepath" "sort" "strings" + "sync" "github.com/benbjohnson/litestream/internal" ) @@ -285,7 +286,7 @@ func (c *FileReplicaClient) WALSegments(ctx context.Context, generation string) sort.Ints(indexes) - return newFileWALSegmentIterator(dir, generation, indexes), nil + return NewFileWALSegmentIterator(dir, generation, indexes), nil } // WriteWALSegment writes LZ4 compressed data from rd into a file on disk. @@ -360,33 +361,74 @@ func (c *FileReplicaClient) DeleteWALSegments(ctx context.Context, a []Pos) erro return nil } -type fileWalSegmentIterator struct { +type FileWALSegmentIterator struct { + mu sync.Mutex + notifyCh chan struct{} + closeFunc func() error + dir string generation string indexes []int - infos []WALSegmentInfo - err error + buffered bool + infos []WALSegmentInfo + err error } -func newFileWALSegmentIterator(dir, generation string, indexes []int) *fileWalSegmentIterator { - return &fileWalSegmentIterator{ +func NewFileWALSegmentIterator(dir, generation string, indexes []int) *FileWALSegmentIterator { + return &FileWALSegmentIterator{ dir: dir, generation: generation, indexes: indexes, + + notifyCh: make(chan struct{}, 1), } } -func (itr *fileWalSegmentIterator) Close() (err error) { - return itr.err +func (itr *FileWALSegmentIterator) Close() (err error) { + if itr.closeFunc != nil { + if e := itr.closeFunc(); e != nil && err == nil { + err = e + } + } + + if e := itr.Err(); e != nil && err == nil { + err = e + } + return err +} + +func (itr *FileWALSegmentIterator) NotifyCh() <-chan struct{} { + return itr.notifyCh } -func (itr *fileWalSegmentIterator) Next() bool { +// Generation returns the generation this iterator was initialized with. +func (itr *FileWALSegmentIterator) Generation() string { + return itr.generation +} + +// Indexes returns the pending indexes. Only used for testing. +func (itr *FileWALSegmentIterator) Indexes() []int { + itr.mu.Lock() + defer itr.mu.Unlock() + return itr.indexes +} + +func (itr *FileWALSegmentIterator) Next() bool { + itr.mu.Lock() + defer itr.mu.Unlock() + // Exit if an error has already occurred. if itr.err != nil { return false } + // Read first info, if buffered. + if itr.buffered { + itr.buffered = false + return true + } + for { // Move to the next segment in cache, if available. if len(itr.infos) > 1 { @@ -448,11 +490,94 @@ func (itr *fileWalSegmentIterator) Next() bool { } } -func (itr *fileWalSegmentIterator) Err() error { return itr.err } +// SetErr sets the error on the iterator and notifies it of the change. +func (itr *FileWALSegmentIterator) SetErr(err error) { + itr.mu.Lock() + defer itr.mu.Unlock() + if itr.err == nil { + itr.err = err + } + + select { + case itr.notifyCh <- struct{}{}: + default: + } +} + +// Err returns the first error that occurs on the iterator. +func (itr *FileWALSegmentIterator) Err() error { + itr.mu.Lock() + defer itr.mu.Unlock() + return itr.err +} + +func (itr *FileWALSegmentIterator) WALSegment() WALSegmentInfo { + itr.mu.Lock() + defer itr.mu.Unlock() -func (itr *fileWalSegmentIterator) WALSegment() WALSegmentInfo { if len(itr.infos) == 0 { return WALSegmentInfo{} } return itr.infos[0] } + +// Append add an additional WAL segment to the end of the iterator. This +// function expects that info will always be later than all previous infos +// that the iterator has or has seen. +func (itr *FileWALSegmentIterator) Append(info WALSegmentInfo) error { + itr.mu.Lock() + defer itr.mu.Unlock() + + if itr.err != nil { + return itr.err + } else if itr.generation != info.Generation { + return fmt.Errorf("generation mismatch") + } + + // If the info has an index that is still waiting to be read from disk into + // the cache then simply append it to the end of the indices. + // + // If we have no pending indices, then append to the end of the infos. If + // we don't have either then just append to the infos and avoid validation. + if len(itr.indexes) > 0 { + maxIndex := itr.indexes[len(itr.indexes)-1] + + if info.Index < maxIndex { + return fmt.Errorf("appended index %q below max index %q", FormatIndex(info.Index), FormatIndex(maxIndex)) + } else if info.Index > maxIndex+1 { + return fmt.Errorf("appended index %q skips index %q", FormatIndex(info.Index), FormatIndex(maxIndex+1)) + } else if info.Index == maxIndex+1 { + itr.indexes = append(itr.indexes, info.Index) + } + // NOTE: no-op if segment index matches the current last index + + } else if len(itr.infos) > 0 { + lastInfo := itr.infos[len(itr.infos)-1] + if info.Index < lastInfo.Index { + return fmt.Errorf("appended index %q below current index %q", FormatIndex(info.Index), FormatIndex(lastInfo.Index)) + } else if info.Index > lastInfo.Index+1 { + return fmt.Errorf("appended index %q skips next index %q", FormatIndex(info.Index), FormatIndex(lastInfo.Index+1)) + } else if info.Index == lastInfo.Index+1 { + itr.indexes = append(itr.indexes, info.Index) + } else { + // If the index matches the current infos, verify its offset and append. + if info.Offset < lastInfo.Offset { + return fmt.Errorf("appended offset %s/%s before last offset %s/%s", FormatIndex(info.Index), FormatOffset(info.Offset), FormatIndex(lastInfo.Index), FormatOffset(lastInfo.Offset)) + } else if info.Offset == lastInfo.Offset { + return fmt.Errorf("duplicate offset %s/%s appended", FormatIndex(info.Index), FormatOffset(info.Offset)) + } + itr.infos = append(itr.infos, info) + } + } else { + itr.buffered = true + itr.infos = append(itr.infos, info) + } + + // Signal that a new segment is available. + select { + case itr.notifyCh <- struct{}{}: + default: + } + + return nil +} diff --git a/file_replica_client_test.go b/file_replica_client_test.go index 1a1405f6..a821ff24 100644 --- a/file_replica_client_test.go +++ b/file_replica_client_test.go @@ -1,6 +1,7 @@ package litestream_test import ( + "reflect" "testing" "github.com/benbjohnson/litestream" @@ -133,3 +134,133 @@ func TestReplicaClient_WALSegmentPath(t *testing.T) { } }) } + +func TestFileWALSegmentIterator_Append(t *testing.T) { + t.Run("Empty", func(t *testing.T) { + itr := litestream.NewFileWALSegmentIterator(t.TempDir(), "0123456789abcdef", nil) + if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 0, Offset: 0}); err != nil { + t.Fatal(err) + } + + select { + case <-itr.NotifyCh(): + default: + t.Fatal("expected notification") + } + + if !itr.Next() { + t.Fatal("expected next") + } else if got, want := itr.WALSegment(), (litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 0, Offset: 0}); got != want { + t.Fatalf("info=%#v, want %#v", got, want) + } + }) + + t.Run("MultiOffset", func(t *testing.T) { + itr := litestream.NewFileWALSegmentIterator(t.TempDir(), "0123456789abcdef", nil) + if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 0, Offset: 0}); err != nil { + t.Fatal(err) + } else if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 0, Offset: 1}); err != nil { + t.Fatal(err) + } + + select { + case <-itr.NotifyCh(): + default: + t.Fatal("expected notification") + } + + if !itr.Next() { + t.Fatal("expected next") + } else if got, want := itr.WALSegment(), (litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 0, Offset: 0}); got != want { + t.Fatalf("info=%#v, want %#v", got, want) + } + + if !itr.Next() { + t.Fatal("expected next") + } else if got, want := itr.WALSegment(), (litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 0, Offset: 1}); got != want { + t.Fatalf("info=%#v, want %#v", got, want) + } + }) + + t.Run("MultiIndex", func(t *testing.T) { + itr := litestream.NewFileWALSegmentIterator(t.TempDir(), "0123456789abcdef", nil) + if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 0, Offset: 0}); err != nil { + t.Fatal(err) + } else if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 1, Offset: 0}); err != nil { + t.Fatal(err) + } else if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 1, Offset: 1}); err != nil { + t.Fatal(err) + } else if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 2, Offset: 0}); err != nil { + t.Fatal(err) + } + + if got, want := itr.Indexes(), []int{1, 2}; !reflect.DeepEqual(got, want) { + t.Fatalf("indexes=%v, want %v", got, want) + } + }) + + t.Run("ErrGenerationMismatch", func(t *testing.T) { + itr := litestream.NewFileWALSegmentIterator(t.TempDir(), "0000000000000000", nil) + if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 0, Offset: 0}); err == nil || err.Error() != `generation mismatch` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrBelowMaxIndex", func(t *testing.T) { + itr := litestream.NewFileWALSegmentIterator(t.TempDir(), "0123456789abcdef", nil) + if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 0, Offset: 0}); err != nil { + t.Fatal(err) + } else if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 1, Offset: 0}); err != nil { + t.Fatal(err) + } else if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 0, Offset: 0}); err == nil || err.Error() != `appended index "0000000000000000" below max index "0000000000000001"` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrAboveMaxIndex", func(t *testing.T) { + itr := litestream.NewFileWALSegmentIterator(t.TempDir(), "0123456789abcdef", nil) + if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 0, Offset: 0}); err != nil { + t.Fatal(err) + } else if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 1, Offset: 0}); err != nil { + t.Fatal(err) + } else if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 3, Offset: 0}); err == nil || err.Error() != `appended index "0000000000000003" skips index "0000000000000002"` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrBelowCurrentIndex", func(t *testing.T) { + itr := litestream.NewFileWALSegmentIterator(t.TempDir(), "0123456789abcdef", nil) + if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 1, Offset: 0}); err != nil { + t.Fatal(err) + } else if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 0, Offset: 0}); err == nil || err.Error() != `appended index "0000000000000000" below current index "0000000000000001"` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrSkipsNextIndex", func(t *testing.T) { + itr := litestream.NewFileWALSegmentIterator(t.TempDir(), "0123456789abcdef", nil) + if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 0, Offset: 0}); err != nil { + t.Fatal(err) + } else if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 2, Offset: 0}); err == nil || err.Error() != `appended index "0000000000000002" skips next index "0000000000000001"` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrBelowOffset", func(t *testing.T) { + itr := litestream.NewFileWALSegmentIterator(t.TempDir(), "0123456789abcdef", nil) + if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 0, Offset: 5}); err != nil { + t.Fatal(err) + } else if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 0, Offset: 4}); err == nil || err.Error() != `appended offset 0000000000000000/0000000000000004 before last offset 0000000000000000/0000000000000005` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrDuplicateOffset", func(t *testing.T) { + itr := litestream.NewFileWALSegmentIterator(t.TempDir(), "0123456789abcdef", nil) + if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 0, Offset: 5}); err != nil { + t.Fatal(err) + } else if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 0, Offset: 5}); err == nil || err.Error() != `duplicate offset 0000000000000000/0000000000000005 appended` { + t.Fatalf("unexpected error: %s", err) + } + }) +} diff --git a/litestream.go b/litestream.go index c91c6484..d8cb9e84 100644 --- a/litestream.go +++ b/litestream.go @@ -39,10 +39,12 @@ const ( // Litestream errors. var ( - ErrNoGeneration = errors.New("no generation available") - ErrNoSnapshots = errors.New("no snapshots available") - ErrNoWALSegments = errors.New("no wal segments available") - ErrChecksumMismatch = errors.New("invalid replica, checksum mismatch") + ErrDBClosed = errors.New("database closed") + ErrNoGeneration = errors.New("no generation available") + ErrGenerationChanged = errors.New("generation changed") + ErrNoSnapshots = errors.New("no snapshots available") + ErrNoWALSegments = errors.New("no wal segments available") + ErrChecksumMismatch = errors.New("invalid replica, checksum mismatch") ) var ( diff --git a/replica.go b/replica.go index 50e822e2..6541fea2 100644 --- a/replica.go +++ b/replica.go @@ -33,6 +33,7 @@ type Replica struct { mu sync.RWMutex pos Pos // current replicated position + itr *FileWALSegmentIterator muf sync.Mutex f *os.File // long-running file descriptor to avoid non-OFD lock issues @@ -126,6 +127,11 @@ func (r *Replica) Start(ctx context.Context) { func (r *Replica) Stop() { r.cancel() r.wg.Wait() + + if r.itr != nil { + r.itr.Close() + r.itr = nil + } } // Close will close the DB file descriptor which could release locks on @@ -155,10 +161,24 @@ func (r *Replica) Sync(ctx context.Context) (err error) { // Find current position of database. dpos := r.db.Pos() if dpos.IsZero() { - return fmt.Errorf("no generation, waiting for data") + return ErrNoGeneration } generation := dpos.Generation + // Close out iterator if the generation has changed. + if r.itr != nil && r.itr.Generation() != generation { + _ = r.itr.Close() + r.itr = nil + } + + // Ensure we obtain a WAL iterator before we snapshot so we don't miss any segments. + resetItr := r.itr == nil + if resetItr { + if r.itr, err = r.db.WALSegments(ctx, generation); err != nil { + return fmt.Errorf("wal segments: %w", err) + } + } + // Create snapshot if no snapshots exist for generation. snapshotN, err := r.snapshotN(generation) if err != nil { @@ -174,7 +194,7 @@ func (r *Replica) Sync(ctx context.Context) (err error) { replicaSnapshotTotalGaugeVec.WithLabelValues(r.db.Path(), r.Name()).Set(float64(snapshotN)) // Determine position, if necessary. - if r.Pos().Generation != generation { + if resetItr { pos, err := r.calcPos(ctx, generation) if err != nil { return fmt.Errorf("cannot determine replica position: %s", err) @@ -196,16 +216,11 @@ func (r *Replica) Sync(ctx context.Context) (err error) { func (r *Replica) syncWAL(ctx context.Context) (err error) { pos := r.Pos() - itr, err := r.db.WALSegments(ctx, pos.Generation) - if err != nil { - return err - } - defer itr.Close() - // Group segments by index. var segments [][]WALSegmentInfo - for itr.Next() { - info := itr.WALSegment() + for r.itr.Next() { + info := r.itr.WALSegment() + if cmp, err := ComparePos(pos, info.Pos()); err != nil { return fmt.Errorf("compare pos: %w", err) } else if cmp == 1 { @@ -624,38 +639,39 @@ func (r *Replica) deleteWALSegmentsBeforeIndex(ctx context.Context, generation s // monitor runs in a separate goroutine and continuously replicates the DB. func (r *Replica) monitor(ctx context.Context) { - ticker := time.NewTicker(r.SyncInterval) - defer ticker.Stop() + timer := time.NewTimer(r.SyncInterval) + defer timer.Stop() - // Continuously check for new data to replicate. - ch := make(chan struct{}) - close(ch) - var notify <-chan struct{} = ch + for { + if err := r.Sync(ctx); ctx.Err() != nil { + return + } else if err != nil && err != ErrNoGeneration { + r.Logger.Printf("monitor error: %s", err) + } - for initial := true; ; initial = false { - // Enforce a minimum time between synchronization. - if !initial { + // Wait for a change to the WAL iterator. + if r.itr != nil { select { case <-ctx.Done(): return - case <-ticker.C: + case <-r.itr.NotifyCh(): } } - // Wait for changes to the database. + // Wait for the sync interval to collect additional changes. + timer.Reset(r.SyncInterval) select { case <-ctx.Done(): return - case <-notify: + case <-timer.C: } - // Fetch new notify channel before replicating data. - notify = r.db.WALNotify() - - // Synchronize the shadow wal into the replication directory. - if err := r.Sync(ctx); err != nil { - r.Logger.Printf("monitor error: %s", err) - continue + // Flush any additional notifications from the WAL iterator. + if r.itr != nil { + select { + case <-r.itr.NotifyCh(): + default: + } } } } diff --git a/replica_client_test.go b/replica_client_test.go index fedf9f27..83117b28 100644 --- a/replica_client_test.go +++ b/replica_client_test.go @@ -489,8 +489,6 @@ func TestFindMaxIndexByGeneration(t *testing.T) { }) } -func TestRestoreSnapshot(t *testing.T) { t.Skip("TODO") } - func TestRestore(t *testing.T) { t.Run("OK", func(t *testing.T) { testDir := filepath.Join("testdata", "restore", "ok") From 1a630aed04dad0eb5bffc658348c5bde8eb3cf2a Mon Sep 17 00:00:00 2001 From: Campbell Vertesi Date: Fri, 11 Feb 2022 15:37:26 +0100 Subject: [PATCH 54/95] Add docker multiarch build and push to release Co-authored-by: Ben Johnson --- .github/workflows/release.docker.yml | 56 ++++++++++++++++++++++++++++ .github/workflows/release.linux.yml | 3 +- Dockerfile | 6 ++- 3 files changed, 62 insertions(+), 3 deletions(-) create mode 100644 .github/workflows/release.docker.yml diff --git a/.github/workflows/release.docker.yml b/.github/workflows/release.docker.yml new file mode 100644 index 00000000..13219099 --- /dev/null +++ b/.github/workflows/release.docker.yml @@ -0,0 +1,56 @@ +on: + release: + types: + - published + pull_request: + types: + - opened + - synchronize + - reopened + branches-ignore: + - "dependabot/**" + +name: Release (Docker) +jobs: + docker: + runs-on: ubuntu-latest + strategy: + matrix: + include: + - platform: linux/amd64 + - platform: linux/arm64 + + env: + VERSION: "${{ github.event_name == 'release' && github.event.release.name || github.sha }}" + + steps: + - uses: actions/checkout@v2 + - uses: docker/setup-qemu-action@v1 + - uses: docker/setup-buildx-action@v1 + + - uses: docker/login-action@v1 + with: + username: benbjohnson + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - id: meta + uses: docker/metadata-action@v3 + with: + images: litestream/litestream + tags: | + type=ref,event=branch + type=ref,event=pr + type=sha + type=sha,format=long + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + + - uses: docker/build-push-action@v2 + with: + context: . + push: true + platforms: ${{ matrix.platform }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + build-args: | + LITESTREAM_VERSION=${{ env.VERSION }} diff --git a/.github/workflows/release.linux.yml b/.github/workflows/release.linux.yml index aff5e841..d45c111e 100644 --- a/.github/workflows/release.linux.yml +++ b/.github/workflows/release.linux.yml @@ -1,7 +1,7 @@ on: release: types: - - created + - published pull_request: types: - opened @@ -132,4 +132,3 @@ jobs: run: sleep 60 && gh workflow run deploy.yml -R benbjohnson/litestream-test-runner -f run_id=${{ github.run_id }} -f litestream_version=${{ github.sha }} env: GITHUB_TOKEN: ${{ secrets.PAT_TOKEN }} - diff --git a/Dockerfile b/Dockerfile index 677f27ff..c0dd0cc0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,11 +1,15 @@ -FROM golang:1.16 as builder +FROM golang:1.17 as builder + WORKDIR /src/litestream COPY . . + ARG LITESTREAM_VERSION=latest + RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg \ go build -ldflags "-s -w -X 'main.Version=${LITESTREAM_VERSION}' -extldflags '-static'" -tags osusergo,netgo,sqlite_omit_load_extension -o /usr/local/bin/litestream ./cmd/litestream + FROM alpine COPY --from=builder /usr/local/bin/litestream /usr/local/bin/litestream ENTRYPOINT ["/usr/local/bin/litestream"] From fc42576e47a2e5c5151f4a53048e08f79134a5b9 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Tue, 15 Feb 2022 12:15:35 -0700 Subject: [PATCH 55/95] Add Docker arm/v7 to CI --- .github/workflows/release.docker.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/release.docker.yml b/.github/workflows/release.docker.yml index 13219099..38256697 100644 --- a/.github/workflows/release.docker.yml +++ b/.github/workflows/release.docker.yml @@ -19,6 +19,7 @@ jobs: include: - platform: linux/amd64 - platform: linux/arm64 + - platform: linux/arm/v7 env: VERSION: "${{ github.event_name == 'release' && github.event.release.name || github.sha }}" From fde17d0e62315a287566041a5d877199130f2089 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Tue, 15 Feb 2022 13:10:22 -0700 Subject: [PATCH 56/95] Upgrade dependencies --- go.mod | 4 ++-- go.sum | 16 ++++++++-------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/go.mod b/go.mod index 592c596a..32134ea7 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.16 require ( cloud.google.com/go/storage v1.20.0 github.com/Azure/azure-storage-blob-go v0.14.0 - github.com/aws/aws-sdk-go v1.42.48 + github.com/aws/aws-sdk-go v1.42.53 github.com/mattn/go-shellwords v1.0.12 github.com/mattn/go-sqlite3 v1.14.11 github.com/pierrec/lz4/v4 v4.1.14 @@ -14,6 +14,6 @@ require ( golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a - google.golang.org/api v0.67.0 + google.golang.org/api v0.68.0 gopkg.in/yaml.v2 v2.4.0 ) diff --git a/go.sum b/go.sum index 23897fc6..37990216 100644 --- a/go.sum +++ b/go.sum @@ -35,8 +35,9 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v0.1.0 h1:rSUBvAyVwNJ5uQCKNJFMwPtTvJkfN38b6Pvb9zZoqJ8= cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.2.0 h1:EKki8sSdvDU0OO9mAXGwPXOTOgPz2l08R0/IutDH11I= +cloud.google.com/go/compute v1.2.0/go.mod h1:xlogom/6gr8RJGBe7nT2eGsQYAFUbbv8dbC29qE3Xmw= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/iam v0.1.1 h1:4CapQyNFjiksks1/x7jsvsygFPhihslYk5GptIrlX68= @@ -77,8 +78,8 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/aws/aws-sdk-go v1.42.48 h1:8ZVBAsA9X2eCpSr/8SrWDk4BOT91wRdqxpAog875+K0= -github.com/aws/aws-sdk-go v1.42.48/go.mod h1:OGr6lGMAKGlG9CVrYnWYDKIyb829c6EVBRjxqjmPepc= +github.com/aws/aws-sdk-go v1.42.53 h1:56T04NWcmc0ZVYFbUc6HdewDQ9iHQFlmS6hj96dRjJs= +github.com/aws/aws-sdk-go v1.42.53/go.mod h1:OGr6lGMAKGlG9CVrYnWYDKIyb829c6EVBRjxqjmPepc= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -484,7 +485,6 @@ golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a h1:ppl5mZgokTT8uPkmYOyEUmPTr3ypaKkg5eFOGrAmxxE= golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= @@ -590,8 +590,8 @@ google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3h google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= google.golang.org/api v0.64.0/go.mod h1:931CdxA8Rm4t6zqTFGSsgwbAEZ2+GMYurbndwSimebM= google.golang.org/api v0.66.0/go.mod h1:I1dmXYpX7HGwz/ejRxwQp2qj5bFAz93HiCU1C1oYd9M= -google.golang.org/api v0.67.0 h1:lYaaLa+x3VVUhtosaK9xihwQ9H9KRa557REHwwZ2orM= -google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.68.0 h1:9eJiHhwJKIYX6sX2fUZxQLi7pDRA/MYu8c12q6WbJik= +google.golang.org/api v0.68.0/go.mod h1:sOM8pTpwgflXRhz+oC8H2Dr+UcbMqkPPWNJo88Q7TH8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -663,9 +663,9 @@ google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220111164026-67b88f271998/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220114231437-d2e6a121cae0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220201184016-50beb8ab5c44 h1:0UVUC7VWA/mIU+5a4hVWH6xa234gLcRX8ZcrFKmWWKA= google.golang.org/genproto v0.0.0-20220201184016-50beb8ab5c44/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220204002441-d6cc3cc0770e h1:hXl9hnyOkeznztYpYxVPAVZfPzcbO6Q0C+nLXodza8k= +google.golang.org/genproto v0.0.0-20220204002441-d6cc3cc0770e/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= From 4027c87a02c99cdecf9fca4be0cccc8d255a04b7 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Tue, 15 Feb 2022 15:19:42 -0700 Subject: [PATCH 57/95] Fix Docker arch mismatch --- .github/workflows/release.docker.yml | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/.github/workflows/release.docker.yml b/.github/workflows/release.docker.yml index 38256697..89d3f68f 100644 --- a/.github/workflows/release.docker.yml +++ b/.github/workflows/release.docker.yml @@ -14,14 +14,8 @@ name: Release (Docker) jobs: docker: runs-on: ubuntu-latest - strategy: - matrix: - include: - - platform: linux/amd64 - - platform: linux/arm64 - - platform: linux/arm/v7 - env: + PLATFORMS: "${{ github.event_name == 'release' && 'linux/amd64,linux/arm64,linux/arm/v7' || 'linux/amd64,linux/arm64' }}" VERSION: "${{ github.event_name == 'release' && github.event.release.name || github.sha }}" steps: @@ -50,7 +44,7 @@ jobs: with: context: . push: true - platforms: ${{ matrix.platform }} + platforms: ${{ env.PLATFORMS }} tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} build-args: | From 6f8cd5a9c4872fba47bdcd2380581f240c0505bb Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Fri, 18 Feb 2022 14:26:07 -0700 Subject: [PATCH 58/95] Configurable monitor-delay-interval The `monitor-delay-interval` has been added to the DB config so that users can change the time period between WAL checks after a file change notification has occurred. This can be useful to batch up changes in larger files in the shadow WAL or to reduce or eliminate the delay in propagating changes during read replication. Setting the interval to zero or less will disable it. --- cmd/litestream/main.go | 12 +++-- db.go | 34 ++++++++----- integration/cmd_test.go | 49 +++++++++++++++++++ .../replicate/high-load/litestream.yml | 1 - .../no-monitor-delay-interval/litestream.yml | 5 ++ .../testdata/replicate/ok/litestream.yml | 1 - .../testdata/replicate/resume/litestream.yml | 1 - 7 files changed, 84 insertions(+), 19 deletions(-) create mode 100644 integration/testdata/replicate/no-monitor-delay-interval/litestream.yml diff --git a/cmd/litestream/main.go b/cmd/litestream/main.go index 490ba187..a02d8628 100644 --- a/cmd/litestream/main.go +++ b/cmd/litestream/main.go @@ -266,10 +266,11 @@ func ReadConfigFile(filename string, expandEnv bool) (_ Config, err error) { // DBConfig represents the configuration for a single database. type DBConfig struct { - Path string `yaml:"path"` - CheckpointInterval *time.Duration `yaml:"checkpoint-interval"` - MinCheckpointPageN *int `yaml:"min-checkpoint-page-count"` - MaxCheckpointPageN *int `yaml:"max-checkpoint-page-count"` + Path string `yaml:"path"` + MonitorDelayInterval *time.Duration `yaml:"monitor-delay-interval"` + CheckpointInterval *time.Duration `yaml:"checkpoint-interval"` + MinCheckpointPageN *int `yaml:"min-checkpoint-page-count"` + MaxCheckpointPageN *int `yaml:"max-checkpoint-page-count"` Replicas []*ReplicaConfig `yaml:"replicas"` } @@ -289,6 +290,9 @@ func NewDBFromConfigWithPath(dbc *DBConfig, path string) (*litestream.DB, error) db := litestream.NewDB(path) // Override default database settings if specified in configuration. + if dbc.MonitorDelayInterval != nil { + db.MonitorDelayInterval = *dbc.MonitorDelayInterval + } if dbc.CheckpointInterval != nil { db.CheckpointInterval = *dbc.CheckpointInterval } diff --git a/db.go b/db.go index 7821f62c..ff81c4eb 100644 --- a/db.go +++ b/db.go @@ -27,15 +27,13 @@ import ( // Default DB settings. const ( - DefaultCheckpointInterval = 1 * time.Minute + DefaultMonitorDelayInterval = 10 * time.Millisecond + DefaultCheckpointInterval = 1 * time.Minute + DefaultMinCheckpointPageN = 1000 DefaultMaxCheckpointPageN = 10000 ) -// MonitorDelayInterval is the time Litestream will wait after receiving a file -// change notification before processing the WAL file for changes. -const MonitorDelayInterval = 10 * time.Millisecond - // MaxIndex is the maximum possible WAL index. // If this index is reached then a new generation will be started. const MaxIndex = 0x7FFFFFFF @@ -98,6 +96,11 @@ type DB struct { // unbounded if there are always read transactions occurring. MaxCheckpointPageN int + // Time after receiving change notification before reading next WAL segment. + // Used for batching changes into fewer files instead of every transaction + // creating its own file. + MonitorDelayInterval time.Duration + // Time between automatic checkpoints in the WAL. This is done to allow // more fine-grained WAL files so that restores can be performed with // better precision. @@ -118,9 +121,10 @@ func NewDB(path string) *DB { itrs: make(map[*FileWALSegmentIterator]struct{}), - MinCheckpointPageN: DefaultMinCheckpointPageN, - MaxCheckpointPageN: DefaultMaxCheckpointPageN, - CheckpointInterval: DefaultCheckpointInterval, + MinCheckpointPageN: DefaultMinCheckpointPageN, + MaxCheckpointPageN: DefaultMaxCheckpointPageN, + MonitorDelayInterval: DefaultMonitorDelayInterval, + CheckpointInterval: DefaultCheckpointInterval, Logger: log.New(LogWriter, fmt.Sprintf("%s: ", logPrefixPath(path)), LogFlags), } @@ -1497,8 +1501,12 @@ func (db *DB) execCheckpoint(mode string) (err error) { // monitor runs in a separate goroutine and monitors the database & WAL. func (db *DB) monitor() { - timer := time.NewTimer(MonitorDelayInterval) - defer timer.Stop() + var timer *time.Timer + + if db.MonitorDelayInterval > 0 { + timer := time.NewTimer(db.MonitorDelayInterval) + defer timer.Stop() + } for { // Wait for a file change notification from the file system. @@ -1509,8 +1517,10 @@ func (db *DB) monitor() { } // Wait for small delay before processing changes. - timer.Reset(MonitorDelayInterval) - <-timer.C + if timer != nil { + timer.Reset(db.MonitorDelayInterval) + <-timer.C + } // Clear any additional change notifications that occurred during delay. select { diff --git a/integration/cmd_test.go b/integration/cmd_test.go index a663f9c2..a70ad536 100644 --- a/integration/cmd_test.go +++ b/integration/cmd_test.go @@ -215,6 +215,55 @@ func TestCmd_Replicate_ResumeWithNewGeneration(t *testing.T) { restoreAndVerify(t, ctx, env, filepath.Join(testDir, "litestream.yml"), filepath.Join(tempDir, "db")) } +// Ensure the monitor interval can be turned off. +func TestCmd_Replicate_NoMonitorDelayInterval(t *testing.T) { + ctx := context.Background() + testDir, tempDir := filepath.Join("testdata", "replicate", "no-monitor-delay-interval"), t.TempDir() + env := []string{"LITESTREAM_TEMPDIR=" + tempDir} + + cmd, stdout, _ := commandContext(ctx, env, "replicate", "-config", filepath.Join(testDir, "litestream.yml")) + if err := cmd.Start(); err != nil { + t.Fatal(err) + } + + db, err := sql.Open("sqlite3", filepath.Join(tempDir, "db")) + if err != nil { + t.Fatal(err) + } else if _, err := db.ExecContext(ctx, `PRAGMA journal_mode = wal`); err != nil { + t.Fatal(err) + } else if _, err := db.ExecContext(ctx, `CREATE TABLE t (id INTEGER PRIMARY KEY)`); err != nil { + t.Fatal(err) + } + defer db.Close() + + time.Sleep(1 * time.Second) + + // Execute writes periodically. + for i := 0; i < 10; i++ { + t.Logf("[exec] INSERT INTO t (id) VALUES (%d)", i) + if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (?)`, i); err != nil { + t.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + } + + // Stop & wait for Litestream command. + killLitestreamCmd(t, cmd, stdout) + + // Ensure signal and shutdown are logged. + if s := stdout.String(); !strings.Contains(s, `signal received, litestream shutting down`) { + t.Fatal("missing log output for signal received") + } else if s := stdout.String(); !strings.Contains(s, `litestream shut down`) { + t.Fatal("missing log output for shut down") + } + + // Checkpoint & verify original SQLite database. + if _, err := db.ExecContext(ctx, `PRAGMA wal_checkpoint(TRUNCATE)`); err != nil { + t.Fatal(err) + } + restoreAndVerify(t, ctx, env, filepath.Join(testDir, "litestream.yml"), filepath.Join(tempDir, "db")) +} + // Ensure the default configuration works with heavy write load. func TestCmd_Replicate_HighLoad(t *testing.T) { if testing.Short() { diff --git a/integration/testdata/replicate/high-load/litestream.yml b/integration/testdata/replicate/high-load/litestream.yml index 26fb1195..5e116355 100644 --- a/integration/testdata/replicate/high-load/litestream.yml +++ b/integration/testdata/replicate/high-load/litestream.yml @@ -3,5 +3,4 @@ dbs: replicas: - path: $LITESTREAM_TEMPDIR/replica - monitor-interval: 100ms max-checkpoint-page-count: 20 diff --git a/integration/testdata/replicate/no-monitor-delay-interval/litestream.yml b/integration/testdata/replicate/no-monitor-delay-interval/litestream.yml new file mode 100644 index 00000000..e597b313 --- /dev/null +++ b/integration/testdata/replicate/no-monitor-delay-interval/litestream.yml @@ -0,0 +1,5 @@ +dbs: + - path: $LITESTREAM_TEMPDIR/db + monitor-delay-interval: 0 + replicas: + - path: $LITESTREAM_TEMPDIR/replica diff --git a/integration/testdata/replicate/ok/litestream.yml b/integration/testdata/replicate/ok/litestream.yml index 26fb1195..5e116355 100644 --- a/integration/testdata/replicate/ok/litestream.yml +++ b/integration/testdata/replicate/ok/litestream.yml @@ -3,5 +3,4 @@ dbs: replicas: - path: $LITESTREAM_TEMPDIR/replica - monitor-interval: 100ms max-checkpoint-page-count: 20 diff --git a/integration/testdata/replicate/resume/litestream.yml b/integration/testdata/replicate/resume/litestream.yml index 0bdd84ed..494ece05 100644 --- a/integration/testdata/replicate/resume/litestream.yml +++ b/integration/testdata/replicate/resume/litestream.yml @@ -3,5 +3,4 @@ dbs: replicas: - path: $LITESTREAM_TEMPDIR/replica - monitor-interval: 100ms max-checkpoint-page-count: 10 From 4898fc2fc1649bffffb5b72f80464ed3d7f134d0 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Fri, 18 Feb 2022 14:34:56 -0700 Subject: [PATCH 59/95] Remove Docker linux/arm64 for PR builds --- .github/workflows/release.docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.docker.yml b/.github/workflows/release.docker.yml index 89d3f68f..5b56f540 100644 --- a/.github/workflows/release.docker.yml +++ b/.github/workflows/release.docker.yml @@ -15,7 +15,7 @@ jobs: docker: runs-on: ubuntu-latest env: - PLATFORMS: "${{ github.event_name == 'release' && 'linux/amd64,linux/arm64,linux/arm/v7' || 'linux/amd64,linux/arm64' }}" + PLATFORMS: "${{ github.event_name == 'release' && 'linux/amd64,linux/arm64,linux/arm/v7' || 'linux/amd64' }}" VERSION: "${{ github.event_name == 'release' && github.event.release.name || github.sha }}" steps: From a090706421488331bf20d8f5f2c508a86edfd804 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Sat, 19 Feb 2022 07:46:01 -0700 Subject: [PATCH 60/95] Implement live read replication This commit adds an http server and client for streaming snapshots and WAL pages from an upstream Litestream primary to a read-only replica. --- cmd/litestream/main.go | 15 + cmd/litestream/replicate.go | 39 +- db.go | 408 ++++++++++++++++-- db_bsd.go | 21 + db_linux.go | 18 + http/http.go | 373 ++++++++++++++++ integration/cmd_test.go | 64 +++ .../testdata/replicate/http/litestream.0.yml | 5 + .../testdata/replicate/http/litestream.1.yml | 5 + internal/internal.go | 15 + litestream.go | 71 +++ testdata/wal-writer/live/README.md | 19 + testdata/wal-writer/live/db | Bin 0 -> 8192 bytes testdata/wal-writer/live/db-shm | Bin 0 -> 32768 bytes testdata/wal-writer/live/db-wal | Bin 0 -> 4152 bytes testdata/wal-writer/static/README.md | 26 ++ testdata/wal-writer/static/db-wal | Bin 0 -> 12392 bytes wal_writer.go | 103 +++++ wal_writer_test.go | 116 +++++ 19 files changed, 1241 insertions(+), 57 deletions(-) create mode 100644 db_bsd.go create mode 100644 db_linux.go create mode 100644 http/http.go create mode 100644 integration/testdata/replicate/http/litestream.0.yml create mode 100644 integration/testdata/replicate/http/litestream.1.yml create mode 100644 testdata/wal-writer/live/README.md create mode 100644 testdata/wal-writer/live/db create mode 100644 testdata/wal-writer/live/db-shm create mode 100644 testdata/wal-writer/live/db-wal create mode 100644 testdata/wal-writer/static/README.md create mode 100644 testdata/wal-writer/static/db-wal create mode 100644 wal_writer.go create mode 100644 wal_writer_test.go diff --git a/cmd/litestream/main.go b/cmd/litestream/main.go index a02d8628..65eb6af0 100644 --- a/cmd/litestream/main.go +++ b/cmd/litestream/main.go @@ -23,6 +23,7 @@ import ( "github.com/benbjohnson/litestream" "github.com/benbjohnson/litestream/abs" "github.com/benbjohnson/litestream/gcs" + "github.com/benbjohnson/litestream/http" "github.com/benbjohnson/litestream/s3" "github.com/benbjohnson/litestream/sftp" _ "github.com/mattn/go-sqlite3" @@ -267,6 +268,7 @@ func ReadConfigFile(filename string, expandEnv bool) (_ Config, err error) { // DBConfig represents the configuration for a single database. type DBConfig struct { Path string `yaml:"path"` + Upstream UpstreamConfig `yaml:"upstream"` MonitorDelayInterval *time.Duration `yaml:"monitor-delay-interval"` CheckpointInterval *time.Duration `yaml:"checkpoint-interval"` MinCheckpointPageN *int `yaml:"min-checkpoint-page-count"` @@ -289,6 +291,14 @@ func NewDBFromConfigWithPath(dbc *DBConfig, path string) (*litestream.DB, error) // Initialize database with given path. db := litestream.NewDB(path) + // Attach upstream HTTP client if specified. + if upstreamURL := dbc.Upstream.URL; upstreamURL != "" { + if dbc.Upstream.Path == "" { + return nil, fmt.Errorf("upstream path required") + } + db.StreamClient = http.NewClient(upstreamURL, dbc.Upstream.Path) + } + // Override default database settings if specified in configuration. if dbc.MonitorDelayInterval != nil { db.MonitorDelayInterval = *dbc.MonitorDelayInterval @@ -315,6 +325,11 @@ func NewDBFromConfigWithPath(dbc *DBConfig, path string) (*litestream.DB, error) return db, nil } +type UpstreamConfig struct { + URL string `yaml:"url"` + Path string `yaml:"path"` +} + // ReplicaConfig represents the configuration for a single replica in a database. type ReplicaConfig struct { Type string `yaml:"type"` // "file", "s3" diff --git a/cmd/litestream/replicate.go b/cmd/litestream/replicate.go index e0ae7bd1..284fda7f 100644 --- a/cmd/litestream/replicate.go +++ b/cmd/litestream/replicate.go @@ -6,19 +6,16 @@ import ( "fmt" "io" "log" - "net" - "net/http" - _ "net/http/pprof" "os" "os/exec" "github.com/benbjohnson/litestream" "github.com/benbjohnson/litestream/abs" "github.com/benbjohnson/litestream/gcs" + "github.com/benbjohnson/litestream/http" "github.com/benbjohnson/litestream/s3" "github.com/benbjohnson/litestream/sftp" "github.com/mattn/go-shellwords" - "github.com/prometheus/client_golang/prometheus/promhttp" ) // ReplicateCommand represents a command that continuously replicates SQLite databases. @@ -35,7 +32,8 @@ type ReplicateCommand struct { Config Config - server *litestream.Server + server *litestream.Server + httpServer *http.Server } // NewReplicateCommand returns a new instance of ReplicateCommand. @@ -143,22 +141,12 @@ func (c *ReplicateCommand) Run(ctx context.Context) (err error) { } } - // Serve metrics over HTTP if enabled. + // Serve HTTP if enabled. if c.Config.Addr != "" { - hostport := c.Config.Addr - if host, port, _ := net.SplitHostPort(c.Config.Addr); port == "" { - return fmt.Errorf("must specify port for bind address: %q", c.Config.Addr) - } else if host == "" { - hostport = net.JoinHostPort("localhost", port) + c.httpServer = http.NewServer(c.server, c.Config.Addr) + if err := c.httpServer.Open(); err != nil { + return fmt.Errorf("cannot start http server: %w", err) } - - log.Printf("serving metrics on http://%s/metrics", hostport) - go func() { - http.Handle("/metrics", promhttp.Handler()) - if err := http.ListenAndServe(c.Config.Addr, nil); err != nil { - log.Printf("cannot start metrics server: %s", err) - } - }() } // Parse exec commands args & start subprocess. @@ -183,10 +171,17 @@ func (c *ReplicateCommand) Run(ctx context.Context) (err error) { return nil } -// Close closes all open databases. +// Close closes the HTTP server & all open databases. func (c *ReplicateCommand) Close() (err error) { - if e := c.server.Close(); e != nil && err == nil { - err = e + if c.httpServer != nil { + if e := c.httpServer.Close(); e != nil && err == nil { + err = e + } + } + if c.server != nil { + if e := c.server.Close(); e != nil && err == nil { + err = e + } } return err } diff --git a/db.go b/db.go index ff81c4eb..06957e32 100644 --- a/db.go +++ b/db.go @@ -23,6 +23,7 @@ import ( "github.com/pierrec/lz4/v4" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "golang.org/x/sync/errgroup" ) // Default DB settings. @@ -68,7 +69,7 @@ type DB struct { ctx context.Context cancel func() - wg sync.WaitGroup + g errgroup.Group // Metrics dbSizeGauge prometheus.Gauge @@ -83,6 +84,11 @@ type DB struct { checkpointErrorNCounterVec *prometheus.CounterVec checkpointSecondsCounterVec *prometheus.CounterVec + // Client used to receive live, upstream changes. If specified, then + // DB should be used as read-only as local changes will conflict with + // upstream changes. + StreamClient StreamClient + // Minimum threshold of WAL size, in pages, before a passive checkpoint. // A passive checkpoint will attempt a checkpoint but fail if there are // active transactions occurring at the same time. @@ -161,6 +167,11 @@ func (db *DB) WALPath() string { return db.path + "-wal" } +// SHMPath returns the path to the database's shared memory file. +func (db *DB) SHMPath() string { + return db.path + "-shm" +} + // MetaPath returns the path to the database metadata. func (db *DB) MetaPath() string { dir, file := filepath.Split(db.path) @@ -179,6 +190,12 @@ func (db *DB) GenerationPath(generation string) string { return filepath.Join(db.MetaPath(), "generations", generation) } +// PositionPath returns the path of the file that stores the current position. +// This file is only used to communicate state to external processes. +func (db *DB) PositionPath() string { + return filepath.Join(db.MetaPath(), "position") +} + // ShadowWALDir returns the path of the shadow wal directory. // Panics if generation is blank. func (db *DB) ShadowWALDir(generation string) string { @@ -399,9 +416,10 @@ func (db *DB) Open() (err error) { return fmt.Errorf("cannot remove tmp files: %w", err) } - // Start monitoring SQLite database in a separate goroutine. - db.wg.Add(1) - go func() { defer db.wg.Done(); db.monitor() }() + // If an upstream client is specified, then we should simply stream changes + // into the database. If it is not specified, then we should monitor the + // database for local changes and replicate them out. + db.g.Go(func() error { return db.monitor(db.ctx) }) return nil } @@ -410,7 +428,9 @@ func (db *DB) Open() (err error) { // and closes the database. func (db *DB) Close() (err error) { db.cancel() - db.wg.Wait() + if e := db.g.Wait(); e != nil && err == nil { + err = e + } // Start a new context for shutdown since we canceled the DB context. ctx := context.Background() @@ -484,8 +504,8 @@ func (db *DB) UpdatedAt() (time.Time, error) { return t, nil } -// init initializes the connection to the database. -// Skipped if already initialized or if the database file does not exist. +// init initializes the connection to the database. Skipped if already +// initialized or if the database file does not exist. func (db *DB) init() (err error) { // Exit if already initialized. if db.db != nil { @@ -493,17 +513,15 @@ func (db *DB) init() (err error) { } // Exit if no database file exists. - fi, err := os.Stat(db.path) - if os.IsNotExist(err) { + if _, err := os.Stat(db.path); os.IsNotExist(err) { return nil } else if err != nil { return err } - db.fileMode = fi.Mode() - db.uid, db.gid = internal.Fileinfo(fi) // Obtain permissions for parent directory. - if fi, err = os.Stat(filepath.Dir(db.path)); err != nil { + fi, err := os.Stat(filepath.Dir(db.path)) + if err != nil { return err } db.dirMode = fi.Mode() @@ -517,22 +535,6 @@ func (db *DB) init() (err error) { return err } - // Open long-running database file descriptor. Required for non-OFD locks. - if db.f, err = os.Open(db.path); err != nil { - return fmt.Errorf("open db file descriptor: %w", err) - } - - // Ensure database is closed if init fails. - // Initialization can retry on next sync. - defer func() { - if err != nil { - _ = db.releaseReadLock() - db.db.Close() - db.f.Close() - db.db, db.f = nil, nil - } - }() - // Enable WAL and ensure it is set. New mode should be returned on success: // https://www.sqlite.org/pragma.html#pragma_journal_mode var mode string @@ -559,6 +561,30 @@ func (db *DB) init() (err error) { return fmt.Errorf("create _litestream_lock table: %w", err) } + // Open long-running database file descriptor. Required for non-OFD locks. + if db.f, err = os.Open(db.path); err != nil { + return fmt.Errorf("open db file descriptor: %w", err) + } + + // Ensure database is closed if init fails. + // Initialization can retry on next sync. + defer func() { + if err != nil { + _ = db.releaseReadLock() + db.db.Close() + db.f.Close() + db.db, db.f = nil, nil + } + }() + + // Obtain file info once we know the database exists. + fi, err = os.Stat(db.path) + if err != nil { + return fmt.Errorf("init file stat: %w", err) + } + db.fileMode = fi.Mode() + db.uid, db.gid = internal.Fileinfo(fi) + // Start a long-running read transaction to prevent other transactions // from checkpointing. if err := db.acquireReadLock(); err != nil { @@ -603,6 +629,76 @@ func (db *DB) init() (err error) { return nil } +// initReplica initializes a new database file as a replica of an upstream database. +func (db *DB) initReplica(pageSize int) (err error) { + // Exit if already initialized. + if db.db != nil { + return nil + } + + // Obtain permissions for parent directory. + fi, err := os.Stat(filepath.Dir(db.path)) + if err != nil { + return err + } + db.dirMode = fi.Mode() + + dsn := db.path + dsn += fmt.Sprintf("?_busy_timeout=%d", BusyTimeout.Milliseconds()) + + // Connect to SQLite database. Use the driver registered with a hook to + // prevent WAL files from being removed. + if db.db, err = sql.Open("litestream-sqlite3", dsn); err != nil { + return err + } + + // Initialize database file if it doesn't exist. It doesn't matter what we + // store in it as it will be erased by the replication. We just need to + // ensure a WAL file is created and there is at least a page in the database. + if _, err := os.Stat(db.path); os.IsNotExist(err) { + if _, err := db.db.ExecContext(db.ctx, fmt.Sprintf(`PRAGMA page_size = %d`, pageSize)); err != nil { + return fmt.Errorf("set page size: %w", err) + } + + var mode string + if err := db.db.QueryRow(`PRAGMA journal_mode = wal`).Scan(&mode); err != nil { + return err + } else if mode != "wal" { + return fmt.Errorf("enable wal failed, mode=%q", mode) + } + + // TODO: Set page size. + + if _, err := db.db.ExecContext(db.ctx, `CREATE TABLE IF NOT EXISTS _litestream (id INTEGER)`); err != nil { + return fmt.Errorf("create _litestream table: %w", err) + } else if _, err := db.db.ExecContext(db.ctx, `PRAGMA wal_checkpoint(TRUNCATE)`); err != nil { + return fmt.Errorf("create _litestream table: %w", err) + } + } + + // Obtain file info once we know the database exists. + fi, err = os.Stat(db.path) + if err != nil { + return fmt.Errorf("init file stat: %w", err) + } + db.fileMode = fi.Mode() + db.uid, db.gid = internal.Fileinfo(fi) + + // Verify page size matches. + if err := db.db.QueryRowContext(db.ctx, `PRAGMA page_size;`).Scan(&db.pageSize); err != nil { + return fmt.Errorf("read page size: %w", err) + } else if db.pageSize != pageSize { + return fmt.Errorf("page size mismatch: %d <> %d", db.pageSize, pageSize) + } + + // Ensure meta directory structure exists. + if err := internal.MkdirAll(db.MetaPath(), db.dirMode, db.uid, db.gid); err != nil { + return err + } + + return nil +} + func (db *DB) clearGeneration(ctx context.Context) error { if err := os.Remove(db.GenerationNamePath()); err != nil && !os.IsNotExist(err) { return err @@ -1278,6 +1374,11 @@ func (db *DB) writeWALSegment(ctx context.Context, pos Pos, rd io.Reader) error return err } + // Write position to file so other processes can read it. + if err := db.writePositionFile(pos); err != nil { + return fmt.Errorf("write position file: %w", err) + } + // Generate info := WALSegmentInfo{ Generation: pos.Generation, @@ -1308,6 +1409,11 @@ func (db *DB) writeWALSegment(ctx context.Context, pos Pos, rd io.Reader) error return nil } +// writePositionFile writes pos as the current position. +func (db *DB) writePositionFile(pos Pos) error { + return internal.WriteFile(db.PositionPath(), []byte(pos.String()+"\n"), db.fileMode, db.uid, db.gid) +} + // WALSegments returns an iterator over all available WAL files for a generation. func (db *DB) WALSegments(ctx context.Context, generation string) (*FileWALSegmentIterator, error) { db.mu.Lock() @@ -1499,20 +1605,26 @@ func (db *DB) execCheckpoint(mode string) (err error) { return nil } -// monitor runs in a separate goroutine and monitors the database & WAL. -func (db *DB) monitor() { - var timer *time.Timer +func (db *DB) monitor(ctx context.Context) error { + if db.StreamClient != nil { + return db.monitorUpstream(ctx) + } + return db.monitorLocal(ctx) +} +// monitor runs in a separate goroutine and monitors the local database & WAL. +func (db *DB) monitorLocal(ctx context.Context) error { + var timer *time.Timer if db.MonitorDelayInterval > 0 { - timer := time.NewTimer(db.MonitorDelayInterval) + timer = time.NewTimer(db.MonitorDelayInterval) defer timer.Stop() } for { // Wait for a file change notification from the file system. select { - case <-db.ctx.Done(): - return + case <-ctx.Done(): + return nil case <-db.notifyCh: } @@ -1528,12 +1640,193 @@ func (db *DB) monitor() { default: } - if err := db.Sync(db.ctx); err != nil && !errors.Is(err, context.Canceled) { + if err := db.Sync(ctx); err != nil && !errors.Is(err, context.Canceled) { db.Logger.Printf("sync error: %s", err) } } } +// monitorUpstream runs in a separate goroutine and streams data into the local DB. +func (db *DB) monitorUpstream(ctx context.Context) error { + for { + if err := db.stream(ctx); err != nil { + if ctx.Err() != nil { + return nil + } + db.Logger.Printf("stream error, retrying: %s", err) + } + + // Delay before retrying stream. + select { + case <-ctx.Done(): + return nil + case <-time.After(1 * time.Second): + } + } +} + +// stream initializes the local database and continuously streams new upstream data. +func (db *DB) stream(ctx context.Context) error { + // Continuously stream and apply records from client. + sr, err := db.StreamClient.Stream(ctx) + if err != nil { + return fmt.Errorf("stream connect: %w", err) + } + defer sr.Close() + + // TODO: Determine page size of upstream database before creating local. + const pageSize = 4096 + + // Initialize the database and create it if it doesn't exist. + if err := db.initReplica(pageSize); err != nil { + return fmt.Errorf("init replica: %w", err) + } + + for { + hdr, err := sr.Next() + if err != nil { + return err + } + + switch hdr.Type { + case StreamRecordTypeSnapshot: + if err := db.streamSnapshot(ctx, hdr, sr); err != nil { + return fmt.Errorf("snapshot: %w", err) + } + case StreamRecordTypeWALSegment: + if err := db.streamWALSegment(ctx, hdr, sr); err != nil { + return fmt.Errorf("wal segment: %w", err) + } + default: + return fmt.Errorf("invalid stream record type: 0x%02x", hdr.Type) + } + } +} + +// streamSnapshot reads the snapshot into the WAL and applies it to the main database. +func (db *DB) streamSnapshot(ctx context.Context, hdr *StreamRecordHeader, r io.Reader) error { + // Truncate WAL file. + if _, err := db.db.ExecContext(ctx, `PRAGMA wal_checkpoint(TRUNCATE)`); err != nil { + return fmt.Errorf("truncate: %w", err) + } + + // Determine total page count. + pageN := int(hdr.Size / int64(db.pageSize)) + + ww := NewWALWriter(db.WALPath(), db.fileMode, db.pageSize) + if err := ww.Open(); err != nil { + return fmt.Errorf("open wal writer: %w", err) + } + defer func() { _ = ww.Close() }() + + if err := ww.WriteHeader(); err != nil { + return fmt.Errorf("write wal header: %w", err) + } + + // Iterate over pages + buf := make([]byte, db.pageSize) + for pgno := uint32(1); ; pgno++ { + // Read snapshot page into a buffer. + if _, err := io.ReadFull(r, buf); err == io.EOF { + break + } else if err != nil { + return fmt.Errorf("read snapshot page %d: %w", pgno, err) + } + + // Issue a commit flag when the last page is reached. + var commit uint32 + if pgno == uint32(pageN) { + commit = uint32(pageN) + } + + // Write page into WAL frame. + if err := ww.WriteFrame(pgno, commit, buf); err != nil { + return fmt.Errorf("write wal frame: %w", err) + } + } + + // Close WAL file writer. + if err := ww.Close(); err != nil { + return fmt.Errorf("close wal writer: %w", err) + } + + // Invalidate WAL index. + if err := invalidateSHMFile(db.path); err != nil { + return fmt.Errorf("invalidate shm file: %w", err) + } + + // Write position to file so other processes can read it. + if err := db.writePositionFile(hdr.Pos()); err != nil { + return fmt.Errorf("write position file: %w", err) + } + + db.Logger.Printf("snapshot applied") + + return nil +} + +// streamWALSegment rewrites a WAL segment into the local WAL and applies it to the main database. +func (db *DB) streamWALSegment(ctx context.Context, hdr *StreamRecordHeader, r io.Reader) error { + // Decompress incoming segment + zr := lz4.NewReader(r) + + // Drop WAL header if starting from offset zero. + if hdr.Offset == 0 { + if _, err := io.CopyN(io.Discard, zr, WALHeaderSize); err != nil { + return fmt.Errorf("read wal header: %w", err) + } + } + + ww := NewWALWriter(db.WALPath(), db.fileMode, db.pageSize) + if err := ww.Open(); err != nil { + return fmt.Errorf("open wal writer: %w", err) + } + defer func() { _ = ww.Close() }() + + if err := ww.WriteHeader(); err != nil { + return fmt.Errorf("write wal header: %w", err) + } + + // Iterate over incoming WAL pages. + buf := make([]byte, WALFrameHeaderSize+db.pageSize) + for i := 0; ; i++ { + // Read snapshot page into a buffer. + if _, err := io.ReadFull(zr, buf); err == io.EOF { + break + } else if err != nil { + return fmt.Errorf("read wal frame %d: %w", i, err) + } + + // Read page number & commit field. + pgno := binary.BigEndian.Uint32(buf[0:]) + commit := binary.BigEndian.Uint32(buf[4:]) + + // Write page into WAL frame. + if err := ww.WriteFrame(pgno, commit, buf[WALFrameHeaderSize:]); err != nil { + return fmt.Errorf("write wal frame: %w", err) + } + } + + // Close WAL file writer. + if err := ww.Close(); err != nil { + return fmt.Errorf("close wal writer: %w", err) + } + + // Invalidate WAL index. + if err := invalidateSHMFile(db.path); err != nil { + return fmt.Errorf("invalidate shm file: %w", err) + } + + // Write position to file so other processes can read it. + if err := db.writePositionFile(hdr.Pos()); err != nil { + return fmt.Errorf("write position file: %w", err) + } + + db.Logger.Printf("wal segment applied: %s", hdr.Pos().String()) + + return nil +} + // ApplyWAL performs a truncating checkpoint on the given database. func ApplyWAL(ctx context.Context, dbPath, walPath string) error { // Copy WAL file from it's staging path to the correct "-wal" location. @@ -1681,6 +1974,51 @@ func logPrefixPath(path string) string { return path } +// invalidateSHMFile clears the iVersion field of the -shm file in order that +// the next transaction will rebuild it. +func invalidateSHMFile(dbPath string) error { + db, err := sql.Open("sqlite3", dbPath) + if err != nil { + return fmt.Errorf("reopen db: %w", err) + } + defer func() { _ = db.Close() }() + + if _, err := db.Exec(`PRAGMA wal_checkpoint(PASSIVE)`); err != nil { + return fmt.Errorf("passive checkpoint: %w", err) + } + + f, err := os.OpenFile(dbPath+"-shm", os.O_RDWR, 0666) + if err != nil { + return fmt.Errorf("open shm index: %w", err) + } + defer f.Close() + + buf := make([]byte, WALIndexHeaderSize) + if _, err := io.ReadFull(f, buf); err != nil { + return fmt.Errorf("read shm index: %w", err) + } + + // Invalidate "isInit" fields. + buf[12], buf[60] = 0, 0 + + // Rewrite header. + if _, err := f.Seek(0, io.SeekStart); err != nil { + return fmt.Errorf("seek shm index: %w", err) + } else if _, err := f.Write(buf); err != nil { + return fmt.Errorf("overwrite shm index: %w", err) + } else if err := f.Close(); err != nil { + return fmt.Errorf("close shm index: %w", err) + } + + // Truncate WAL file again. + var row [3]int + if err := db.QueryRow(`PRAGMA wal_checkpoint(TRUNCATE)`).Scan(&row[0], &row[1], &row[2]); err != nil { + return fmt.Errorf("truncate: %w", err) + } + + return nil +} + // A marker error to indicate that a restart checkpoint could not verify // continuity between WAL indices and a new generation should be started. var errRestartGeneration = errors.New("restart generation") diff --git a/db_bsd.go b/db_bsd.go new file mode 100644 index 00000000..9c0c6bba --- /dev/null +++ b/db_bsd.go @@ -0,0 +1,21 @@ +//go:build !linux + +package litestream + +import ( + "io" + "os" +) + +// WithFile executes fn with a file handle for the main database file. +// On Linux, this is a unique file handle for each call. On non-Linux +// systems, the file handle is shared because of lock semantics. +func (db *DB) WithFile(fn func(f *os.File) error) error { + db.mu.Lock() + defer db.mu.Unlock() + + if _, err := db.f.Seek(0, io.SeekStart); err != nil { + return err + } + return fn(db.f) +} diff --git a/db_linux.go b/db_linux.go new file mode 100644 index 00000000..b6691090 --- /dev/null +++ b/db_linux.go @@ -0,0 +1,18 @@ +//go:build linux + +package litestream + +import "os" + +// WithFile executes fn with a file handle for the main database file. +// On Linux, this is a unique file handle for each call. On non-Linux +// systems, the file handle is shared because of lock semantics. +func (db *DB) WithFile(fn func(f *os.File) error) error { + f, err := os.Open(db.path) + if err != nil { + return err + } + defer f.Close() + + return fn(f) +} diff --git a/http/http.go b/http/http.go new file mode 100644 index 00000000..fc35f0fe --- /dev/null +++ b/http/http.go @@ -0,0 +1,373 @@ +package http + +import ( + "context" + "fmt" + "io" + "log" + "net" + "net/http" + httppprof "net/http/pprof" + "net/url" + "os" + "strings" + + "github.com/benbjohnson/litestream" + "github.com/prometheus/client_golang/prometheus/promhttp" + "golang.org/x/sync/errgroup" +) + +// Server represents an HTTP API server for Litestream. +type Server struct { + ln net.Listener + closed bool + + httpServer *http.Server + promHandler http.Handler + + addr string + server *litestream.Server + + g errgroup.Group + + Logger *log.Logger +} + +func NewServer(server *litestream.Server, addr string) *Server { + s := &Server{ + addr: addr, + server: server, + Logger: log.New(os.Stderr, "http: ", litestream.LogFlags), + } + + s.promHandler = promhttp.Handler() + s.httpServer = &http.Server{ + Handler: http.HandlerFunc(s.serveHTTP), + } + return s +} + +func (s *Server) Open() (err error) { + if s.ln, err = net.Listen("tcp", s.addr); err != nil { + return err + } + + s.g.Go(func() error { + if err := s.httpServer.Serve(s.ln); err != nil && !s.closed { + return err + } + return nil + }) + + return nil +} + +func (s *Server) Close() (err error) { + s.closed = true + + if s.ln != nil { + if e := s.ln.Close(); e != nil && err == nil { + err = e + } + } + + if e := s.g.Wait(); e != nil && err == nil { + err = e + } + return err +} + +// Port returns the port the listener is running on. +func (s *Server) Port() int { + if s.ln == nil { + return 0 + } + return s.ln.Addr().(*net.TCPAddr).Port +} + +// URL returns the full base URL for the running server. +func (s *Server) URL() string { + host, _, _ := net.SplitHostPort(s.addr) + if host == "" { + host = "localhost" + } + return fmt.Sprintf("http://%s", net.JoinHostPort(host, fmt.Sprint(s.Port()))) +} + +func (s *Server) serveHTTP(w http.ResponseWriter, r *http.Request) { + if strings.HasPrefix(r.URL.Path, "/debug/pprof") { + switch r.URL.Path { + case "/debug/pprof/cmdline": + httppprof.Cmdline(w, r) + case "/debug/pprof/profile": + httppprof.Profile(w, r) + case "/debug/pprof/symbol": + httppprof.Symbol(w, r) + case "/debug/pprof/trace": + httppprof.Trace(w, r) + default: + httppprof.Index(w, r) + } + return + } + + switch r.URL.Path { + case "/metrics": + s.promHandler.ServeHTTP(w, r) + + case "/stream": + switch r.Method { + case http.MethodGet: + s.handleGetStream(w, r) + default: + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + } + default: + http.NotFound(w, r) + } +} + +func (s *Server) handleGetStream(w http.ResponseWriter, r *http.Request) { + q := r.URL.Query() + + // TODO: Listen for all databases matching query criteria. + path := q.Get("path") + if path == "" { + http.Error(w, "Database name required", http.StatusBadRequest) + return + } + db := s.server.DB(path) + if db == nil { + http.Error(w, "Database not found", http.StatusNotFound) + return + } + + // TODO: Restart stream from a previous position, if specified. + + // Determine starting position. + pos := db.Pos() + if pos.Generation == "" { + http.Error(w, "No generation available", http.StatusServiceUnavailable) + return + } + pos.Offset = 0 + + s.Logger.Printf("stream connected @ %s", pos) + defer s.Logger.Printf("stream disconnected") + + // Obtain iterator before snapshot so we don't miss any WAL segments. + itr, err := db.WALSegments(r.Context(), pos.Generation) + if err != nil { + http.Error(w, fmt.Sprintf("Cannot obtain WAL iterator: %s", err), http.StatusInternalServerError) + return + } + defer itr.Close() + + // Write snapshot to response body. + if err := db.WithFile(func(f *os.File) error { + fi, err := f.Stat() + if err != nil { + return err + } + + // Write snapshot header with current position & size. + hdr := litestream.StreamRecordHeader{ + Type: litestream.StreamRecordTypeSnapshot, + Generation: pos.Generation, + Index: pos.Index, + Size: fi.Size(), + } + if buf, err := hdr.MarshalBinary(); err != nil { + return fmt.Errorf("marshal snapshot stream record header: %w", err) + } else if _, err := w.Write(buf); err != nil { + return fmt.Errorf("write snapshot stream record header: %w", err) + } + + if _, err := io.CopyN(w, f, fi.Size()); err != nil { + return fmt.Errorf("copy snapshot: %w", err) + } + + return nil + }); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + // Flush after snapshot has been written. + w.(http.Flusher).Flush() + + for { + // Wait for notification of new entries. + select { + case <-r.Context().Done(): + return + case <-itr.NotifyCh(): + } + + for itr.Next() { + info := itr.WALSegment() + + // Skip any segments before our initial position. + if cmp, err := litestream.ComparePos(info.Pos(), pos); err != nil { + s.Logger.Printf("pos compare: %s", err) + return + } else if cmp == -1 { + continue + } + + hdr := litestream.StreamRecordHeader{ + Type: litestream.StreamRecordTypeWALSegment, + Flags: 0, + Generation: info.Generation, + Index: info.Index, + Offset: info.Offset, + Size: info.Size, + } + + // Write record header. + data, err := hdr.MarshalBinary() + if err != nil { + s.Logger.Printf("marshal WAL segment stream record header: %s", err) + return + } else if _, err := w.Write(data); err != nil { + s.Logger.Printf("write WAL segment stream record header: %s", err) + return + } + + // Copy WAL segment data to writer. + if err := func() error { + rd, err := db.WALSegmentReader(r.Context(), info.Pos()) + if err != nil { + return fmt.Errorf("cannot fetch wal segment reader: %w", err) + } + defer rd.Close() + + if _, err := io.CopyN(w, rd, hdr.Size); err != nil { + return fmt.Errorf("cannot copy wal segment: %w", err) + } + return nil + }(); err != nil { + log.Print(err) + return + } + + // Flush after WAL segment has been written. + w.(http.Flusher).Flush() + } + if itr.Err() != nil { + s.Logger.Printf("wal iterator error: %s", err) + return + } + } +} + +type Client struct { + // Upstream endpoint + URL string + + // Path of database on upstream server. + Path string + + // Underlying HTTP client + HTTPClient *http.Client +} + +func NewClient(rawurl, path string) *Client { + return &Client{ + URL: rawurl, + Path: path, + HTTPClient: http.DefaultClient, + } +} + +func (c *Client) Stream(ctx context.Context) (litestream.StreamReader, error) { + u, err := url.Parse(c.URL) + if err != nil { + return nil, fmt.Errorf("invalid client URL: %w", err) + } else if u.Scheme != "http" && u.Scheme != "https" { + return nil, fmt.Errorf("invalid URL scheme") + } else if u.Host == "" { + return nil, fmt.Errorf("URL host required") + } + + // Strip off everything but the scheme & host. + *u = url.URL{ + Scheme: u.Scheme, + Host: u.Host, + Path: "/stream", + RawQuery: (url.Values{ + "path": []string{c.Path}, + }).Encode(), + } + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + + resp, err := c.HTTPClient.Do(req) + if err != nil { + return nil, err + } else if resp.StatusCode != http.StatusOK { + resp.Body.Close() + return nil, fmt.Errorf("invalid response: code=%d", resp.StatusCode) + } + + return &StreamReader{ + body: resp.Body, + file: io.LimitedReader{R: resp.Body}, + }, nil +} + +type StreamReader struct { + body io.ReadCloser + file io.LimitedReader + err error +} + +func (r *StreamReader) Close() error { + if e := r.body.Close(); e != nil && r.err == nil { + r.err = e + } + return r.err +} + +func (r *StreamReader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } else if r.file.R == nil { + return 0, io.EOF + } + return r.file.Read(p) +} + +func (r *StreamReader) Next() (*litestream.StreamRecordHeader, error) { + if r.err != nil { + return nil, r.err + } + + // If bytes remain on the current file, discard. + if r.file.N > 0 { + if _, r.err = io.Copy(io.Discard, &r.file); r.err != nil { + return nil, r.err + } + } + + // Read record header. + buf := make([]byte, litestream.StreamRecordHeaderSize) + if _, err := io.ReadFull(r.body, buf); err != nil { + r.err = fmt.Errorf("http.StreamReader.Next(): %w", err) + return nil, r.err + } + + var hdr litestream.StreamRecordHeader + if r.err = hdr.UnmarshalBinary(buf); r.err != nil { + return nil, r.err + } + + // Update remaining bytes on file reader. + r.file.N = hdr.Size + + return &hdr, nil +} diff --git a/integration/cmd_test.go b/integration/cmd_test.go index a70ad536..3cab314c 100644 --- a/integration/cmd_test.go +++ b/integration/cmd_test.go @@ -391,6 +391,69 @@ LOOP: restoreAndVerify(t, ctx, env, filepath.Join(testDir, "litestream.yml"), filepath.Join(tempDir, "db")) } +// Ensure a database can be replicated over HTTP. +func TestCmd_Replicate_HTTP(t *testing.T) { + ctx := context.Background() + testDir, tempDir := filepath.Join("testdata", "replicate", "http"), t.TempDir() + if err := os.Mkdir(filepath.Join(tempDir, "0"), 0777); err != nil { + t.Fatal(err) + } else if err := os.Mkdir(filepath.Join(tempDir, "1"), 0777); err != nil { + t.Fatal(err) + } + + env0 := []string{"LITESTREAM_TEMPDIR=" + tempDir} + env1 := []string{"LITESTREAM_TEMPDIR=" + tempDir, "LITESTREAM_UPSTREAM_URL=http://localhost:10001"} + + cmd0, stdout0, _ := commandContext(ctx, env0, "replicate", "-config", filepath.Join(testDir, "litestream.0.yml")) + if err := cmd0.Start(); err != nil { + t.Fatal(err) + } + cmd1, stdout1, _ := commandContext(ctx, env1, "replicate", "-config", filepath.Join(testDir, "litestream.1.yml")) + if err := cmd1.Start(); err != nil { + t.Fatal(err) + } + + db0, err := sql.Open("sqlite3", filepath.Join(tempDir, "0", "db")) + if err != nil { + t.Fatal(err) + } else if _, err := db0.ExecContext(ctx, `PRAGMA journal_mode = wal`); err != nil { + t.Fatal(err) + } else if _, err := db0.ExecContext(ctx, `CREATE TABLE t (id INTEGER PRIMARY KEY)`); err != nil { + t.Fatal(err) + } + defer db0.Close() + + // Execute writes periodically. + for i := 0; i < 100; i++ { + t.Logf("[exec] INSERT INTO t (id) VALUES (%d)", i) + if _, err := db0.ExecContext(ctx, `INSERT INTO t (id) VALUES (?)`, i); err != nil { + t.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + } + + // Wait for replica to catch up. + time.Sleep(1 * time.Second) + + // Verify count in replica table. + db1, err := sql.Open("sqlite3", filepath.Join(tempDir, "1", "db")) + if err != nil { + t.Fatal(err) + } + defer db1.Close() + + var n int + if err := db1.QueryRowContext(ctx, `SELECT COUNT(*) FROM t`).Scan(&n); err != nil { + t.Fatal(err) + } else if got, want := n, 100; got != want { + t.Fatalf("replica count=%d, want %d", got, want) + } + + // Stop & wait for Litestream command. + killLitestreamCmd(t, cmd1, stdout1) // kill + killLitestreamCmd(t, cmd0, stdout0) +} + // commandContext returns a "litestream" command with stdout/stderr buffers. func commandContext(ctx context.Context, env []string, arg ...string) (cmd *exec.Cmd, stdout, stderr *internal.LockingBuffer) { cmd = exec.CommandContext(ctx, "litestream", arg...) @@ -428,6 +491,7 @@ func waitForLogMessage(tb testing.TB, b *internal.LockingBuffer, msg string) { // killLitestreamCmd interrupts the process and waits for a clean shutdown. func killLitestreamCmd(tb testing.TB, cmd *exec.Cmd, stdout *internal.LockingBuffer) { + tb.Helper() if err := cmd.Process.Signal(os.Interrupt); err != nil { tb.Fatal("kill litestream: signal:", err) } else if err := cmd.Wait(); err != nil { diff --git a/integration/testdata/replicate/http/litestream.0.yml b/integration/testdata/replicate/http/litestream.0.yml new file mode 100644 index 00000000..e30e651a --- /dev/null +++ b/integration/testdata/replicate/http/litestream.0.yml @@ -0,0 +1,5 @@ +addr: :10001 + +dbs: + - path: $LITESTREAM_TEMPDIR/0/db + max-checkpoint-page-count: 20 diff --git a/integration/testdata/replicate/http/litestream.1.yml b/integration/testdata/replicate/http/litestream.1.yml new file mode 100644 index 00000000..e9735700 --- /dev/null +++ b/integration/testdata/replicate/http/litestream.1.yml @@ -0,0 +1,5 @@ +dbs: + - path: $LITESTREAM_TEMPDIR/1/db + upstream: + url: "$LITESTREAM_UPSTREAM_URL" + path: "$LITESTREAM_TEMPDIR/0/db" diff --git a/internal/internal.go b/internal/internal.go index 0c70d4d7..c6713790 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -113,6 +113,21 @@ func CreateFile(filename string, mode os.FileMode, uid, gid int) (*os.File, erro return f, nil } +// WriteFile writes data to a named file and sets the mode & uid/gid. +func WriteFile(name string, data []byte, perm os.FileMode, uid, gid int) error { + f, err := os.OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + _ = f.Chown(uid, gid) + + _, err = f.Write(data) + if err1 := f.Close(); err1 != nil && err == nil { + err = err1 + } + return err +} + // MkdirAll is a copy of os.MkdirAll() except that it attempts to set the // mode/uid/gid to match fi for each created directory. func MkdirAll(path string, mode os.FileMode, uid, gid int) error { diff --git a/litestream.go b/litestream.go index d8cb9e84..829e0ffc 100644 --- a/litestream.go +++ b/litestream.go @@ -1,6 +1,7 @@ package litestream import ( + "context" "database/sql" "encoding/binary" "errors" @@ -357,6 +358,9 @@ const ( // WALFrameHeaderSize is the size of the WAL frame header, in bytes. WALFrameHeaderSize = 24 + + // WALIndexHeaderSize is the size of the SHM index header, in bytes. + WALIndexHeaderSize = 136 ) // calcWALSize returns the size of the WAL, in bytes, for a given number of pages. @@ -462,6 +466,73 @@ func ParseOffset(s string) (int64, error) { return int64(v), nil } +const ( + StreamRecordTypeSnapshot = 1 + StreamRecordTypeWALSegment = 2 +) + +const StreamRecordHeaderSize = 0 + + 4 + 4 + // type, flags + 8 + 8 + 8 + 8 // generation, index, offset, size + +type StreamRecordHeader struct { + Type int + Flags int + Generation string + Index int + Offset int64 + Size int64 +} + +func (hdr *StreamRecordHeader) Pos() Pos { + return Pos{ + Generation: hdr.Generation, + Index: hdr.Index, + Offset: hdr.Offset, + } +} + +func (hdr *StreamRecordHeader) MarshalBinary() ([]byte, error) { + generation, err := strconv.ParseUint(hdr.Generation, 16, 64) + if err != nil { + return nil, fmt.Errorf("invalid generation: %q", generation) + } + + data := make([]byte, StreamRecordHeaderSize) + binary.BigEndian.PutUint32(data[0:4], uint32(hdr.Type)) + binary.BigEndian.PutUint32(data[4:8], uint32(hdr.Flags)) + binary.BigEndian.PutUint64(data[8:16], generation) + binary.BigEndian.PutUint64(data[16:24], uint64(hdr.Index)) + binary.BigEndian.PutUint64(data[24:32], uint64(hdr.Offset)) + binary.BigEndian.PutUint64(data[32:40], uint64(hdr.Size)) + return data, nil +} + +// UnmarshalBinary from data into hdr. +func (hdr *StreamRecordHeader) UnmarshalBinary(data []byte) error { + if len(data) < StreamRecordHeaderSize { + return io.ErrUnexpectedEOF + } + hdr.Type = int(binary.BigEndian.Uint32(data[0:4])) + hdr.Flags = int(binary.BigEndian.Uint32(data[4:8])) + hdr.Generation = fmt.Sprintf("%16x", binary.BigEndian.Uint64(data[8:16])) + hdr.Index = int(binary.BigEndian.Uint64(data[16:24])) + hdr.Offset = int64(binary.BigEndian.Uint64(data[24:32])) + hdr.Size = int64(binary.BigEndian.Uint64(data[32:40])) + return nil +} + +// StreamClient represents a client for streaming changes to a replica DB. +type StreamClient interface { + Stream(ctx context.Context) (StreamReader, error) +} + +// StreamReader represents a reader that streams snapshot and WAL records. +type StreamReader interface { + io.ReadCloser + Next() (*StreamRecordHeader, error) +} + // removeDBFiles deletes the database and related files (journal, shm, wal). func removeDBFiles(filename string) error { if err := os.Remove(filename); err != nil && !os.IsNotExist(err) { diff --git a/testdata/wal-writer/live/README.md b/testdata/wal-writer/live/README.md new file mode 100644 index 00000000..23874310 --- /dev/null +++ b/testdata/wal-writer/live/README.md @@ -0,0 +1,19 @@ +WAL Writer Live +================= + +This test is to ensure we can copy a WAL file into place with a live DB and +trigger a checkpoint into the main DB file. + +To reproduce the data files: + +```sh +$ sqlite3 db + +sqlite> PRAGMA journal_mode = 'wal'; +sqlite> CREATE TABLE t (x); +sqlite> PRAGMA wal_checkpoint(TRUNCATE); +sqlite> INSERT INTO t (x) VALUES (1); + +sqlite> CTRL-\ +``` + diff --git a/testdata/wal-writer/live/db b/testdata/wal-writer/live/db new file mode 100644 index 0000000000000000000000000000000000000000..6a6344771e02c6901b01bcd0a1aa19f61dd52a02 GIT binary patch literal 8192 zcmeI#u?hk)494*^h?~^SHKQ(q=mU64w_DVEffG&;P6Wx|)A}s7a=VL@{6Eqrg-*XM zr$c@9&Mx=It#w<=R8(baCL%hV+&&5U9u0G8<^3%x%Z*L)uMl67iy(ji0tg_000Iag zfB*srAb`Nn1uTqbF@}}5=e~2-{V`QdvZmVA$=v48McK{s(oYaT009ILKmY**5I_I{ K1Q0;rj|JZ7;}DMk literal 0 HcmV?d00001 diff --git a/testdata/wal-writer/live/db-shm b/testdata/wal-writer/live/db-shm new file mode 100644 index 0000000000000000000000000000000000000000..1d5fdd8ef23c32be2e8bed7a15c6d929d6296802 GIT binary patch literal 32768 zcmeI)u?fOJ6b9ha&dxG|D`Wt{8C=5JHn@O=WeUv1`{8lN z<8cqZ0WN#zA!a$&B82r=-}~lx*jDXz|LS)4)#+CBmvh-X>qqwIe0)3Q^OMv|XWTCY z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oRWyK>X`P2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs u0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjZ3B`^SZwI=)k literal 0 HcmV?d00001 diff --git a/testdata/wal-writer/live/db-wal b/testdata/wal-writer/live/db-wal new file mode 100644 index 0000000000000000000000000000000000000000..43300fc8bd47245330fc80ee0e036247f3dbe0b1 GIT binary patch literal 4152 zcmXr7XKP~6eI&uaAiw|wj0YZCPUFm-T9%{mV!Z literal 0 HcmV?d00001 diff --git a/testdata/wal-writer/static/README.md b/testdata/wal-writer/static/README.md new file mode 100644 index 00000000..99ffadb8 --- /dev/null +++ b/testdata/wal-writer/static/README.md @@ -0,0 +1,26 @@ +WAL Writer Static +================= + +This test is to ensure that WALWriter will generate the same WAL file as +the `sqlite3` command line. + +To reproduce the data file: + +```sh +$ sqlite3 db + +sqlite> PRAGMA journal_mode = 'wal'; + +sqlite> CREATE TABLE t (x); + +sqlite> INSERT INTO t (x) VALUES (1); + +sqlite> CTRL-\ +``` + +then remove the db & shm files: + +```sh +$ rm db db-shm +``` + diff --git a/testdata/wal-writer/static/db-wal b/testdata/wal-writer/static/db-wal new file mode 100644 index 0000000000000000000000000000000000000000..5cac19ea1b75122d5551b82add4eea119b458620 GIT binary patch literal 12392 zcmeI%ze~eF6u|MjqZULYT|~z>wu6FHaT2T=u+T;Am@5$mu-w%6LIgF;px+S`20jP8eF3F zM(+O`8nv(_)9#55+R7aS0tg_000IagfB*srAbu_4cs(J>mE4!&XA 0; b = b[litestream.WALFrameHeaderSize+4096:] { + pgno := binary.BigEndian.Uint32(b[0:]) + commit := binary.BigEndian.Uint32(b[4:]) + if err := w.WriteFrame(pgno, commit, b[litestream.WALFrameHeaderSize:][:4096]); err != nil { + t.Fatal(err) + } + } + + if err := w.Close(); err != nil { + t.Fatal(err) + } + + // Read generated WAL and compare with original. + if buf2, err := os.ReadFile(filepath.Join(tempDir, "db-wal")); err != nil { + t.Fatal(err) + } else if !bytes.Equal(buf, buf2) { + t.Fatal("wal file mismatch") + } +} + +func TestWALWriter_Live(t *testing.T) { + testDir := filepath.Join("testdata", "wal-writer", "live") + tempDir := t.TempDir() + + // Copy DB file into temporary dir. + testingutil.CopyFile(t, filepath.Join(testDir, "db"), filepath.Join(tempDir, "db")) + + // Open database. + db, err := sql.Open("sqlite3", filepath.Join(tempDir, "db")) + if err != nil { + t.Fatal(err) + } + defer db.Close() + + // Verify that table is empty. + var n int + if err := db.QueryRow(`SELECT COUNT(*) FROM t`).Scan(&n); err != nil { + t.Fatal(err) + } else if got, want := n, 0; got != want { + t.Fatalf("init: n=%d, want %d", got, want) + } + + // Copy WAL file into place. + testingutil.CopyFile(t, filepath.Join(testDir, "db-wal"), filepath.Join(tempDir, "db-wal")) + + // Invalidate both copies of the WAL index headers. + f, err := os.OpenFile(filepath.Join(tempDir, "db-shm"), os.O_RDWR, 0666) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + // Read index header. + idx := make([]byte, 136) + if _, err := io.ReadFull(f, idx); err != nil { + t.Fatal(err) + } + + // Invalidate "isInit" flags + idx[12], idx[48+12] = 0, 0 + + // Write header back into index. + if _, err := f.Seek(0, io.SeekStart); err != nil { + t.Fatal(err) + } else if _, err := f.Write(idx); err != nil { + t.Fatal(err) + } + + // Verify that table now has one row. + if err := db.QueryRow(`SELECT COUNT(*) FROM t`).Scan(&n); err != nil { + t.Fatal(err) + } else if got, want := n, 1; got != want { + t.Fatalf("post-wal: n=%d, want %d", got, want) + } +} From 06ea1b13c1940781c17f0729717c6af8172a3608 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tobias=20Nie=C3=9Fen?= Date: Sat, 26 Feb 2022 01:09:51 +0100 Subject: [PATCH 61/95] Improve iterator Next() descriptions --- litestream.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/litestream.go b/litestream.go index 829e0ffc..9bb945ec 100644 --- a/litestream.go +++ b/litestream.go @@ -71,9 +71,9 @@ func init() { type SnapshotIterator interface { io.Closer - // Prepares the the next snapshot for reading with the Snapshot() method. + // Prepares the next snapshot for reading with the Snapshot() method. // Returns true if another snapshot is available. Returns false if no more - // snapshots are available or if an error occured. + // snapshots are available or if an error occurred. Next() bool // Returns an error that occurred during iteration. @@ -133,9 +133,9 @@ func (itr *SnapshotInfoSliceIterator) Snapshot() SnapshotInfo { type WALSegmentIterator interface { io.Closer - // Prepares the the next WAL for reading with the WAL() method. + // Prepares the next WAL for reading with the WAL() method. // Returns true if another WAL is available. Returns false if no more - // WAL files are available or if an error occured. + // WAL files are available or if an error occurred. Next() bool // Returns an error that occurred during iteration. From 62e301afd0c14e608c568cea504617aacd9e0049 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Sat, 26 Feb 2022 08:45:34 -0700 Subject: [PATCH 62/95] Change dependabot from weekly to monthly --- .github/dependabot.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 4ff45e18..826b3679 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -5,4 +5,4 @@ updates: assignees: - "benbjohnson" schedule: - interval: "weekly" + interval: "monthly" From c435b6b6729a90ba4addbe9c4a9d588ef62bc26d Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Sat, 5 Mar 2022 08:35:49 -0700 Subject: [PATCH 63/95] Pass first DB path to child process --- cmd/litestream/replicate.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/cmd/litestream/replicate.go b/cmd/litestream/replicate.go index 284fda7f..ec7de82d 100644 --- a/cmd/litestream/replicate.go +++ b/cmd/litestream/replicate.go @@ -156,8 +156,14 @@ func (c *ReplicateCommand) Run(ctx context.Context) (err error) { return fmt.Errorf("cannot parse exec command: %w", err) } + // Pass first database path to child process. + env := os.Environ() + if dbs := c.server.DBs(); len(dbs) > 0 { + env = append(env, fmt.Sprintf("LITESTREAM_DB_PATH=%s", dbs[0].Path())) + } + c.cmd = exec.CommandContext(ctx, execArgs[0], execArgs[1:]...) - c.cmd.Env = os.Environ() + c.cmd.Env = env c.cmd.Stdout = os.Stdout c.cmd.Stderr = os.Stderr if err := c.cmd.Start(); err != nil { From 59de3a01ba97c14b2ea353f4a96e7589f5ef0f82 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Sat, 5 Mar 2022 08:47:53 -0700 Subject: [PATCH 64/95] Upgrade mattn/go-sqlite3 to v1.14.12 --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 32134ea7..3f5b3d90 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/Azure/azure-storage-blob-go v0.14.0 github.com/aws/aws-sdk-go v1.42.53 github.com/mattn/go-shellwords v1.0.12 - github.com/mattn/go-sqlite3 v1.14.11 + github.com/mattn/go-sqlite3 v1.14.12 github.com/pierrec/lz4/v4 v4.1.14 github.com/pkg/sftp v1.13.4 github.com/prometheus/client_golang v1.12.1 diff --git a/go.sum b/go.sum index 37990216..895540f1 100644 --- a/go.sum +++ b/go.sum @@ -238,6 +238,8 @@ github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebG github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-sqlite3 v1.14.11 h1:gt+cp9c0XGqe9S/wAHTL3n/7MqY+siPWgWJgqdsFrzQ= github.com/mattn/go-sqlite3 v1.14.11/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.12 h1:TJ1bhYJPV44phC+IMu1u2K/i5RriLTPe+yc68XDJ1Z0= +github.com/mattn/go-sqlite3 v1.14.12/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= From 14026421b2c1be28a0f5cff8e563991e9e32e58d Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Sat, 5 Mar 2022 08:48:47 -0700 Subject: [PATCH 65/95] Disable dependabot --- .github/dependabot.yml | 8 -------- 1 file changed, 8 deletions(-) delete mode 100644 .github/dependabot.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml deleted file mode 100644 index 826b3679..00000000 --- a/.github/dependabot.yml +++ /dev/null @@ -1,8 +0,0 @@ -version: 2 -updates: - - package-ecosystem: "gomod" - directory: "/" - assignees: - - "benbjohnson" - schedule: - interval: "monthly" From 7fe79d38836809956cdfc6a23629804abe0df663 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Sat, 5 Mar 2022 09:46:23 -0700 Subject: [PATCH 66/95] Add -addr flag to replicate command --- cmd/litestream/replicate.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/cmd/litestream/replicate.go b/cmd/litestream/replicate.go index ec7de82d..80b73594 100644 --- a/cmd/litestream/replicate.go +++ b/cmd/litestream/replicate.go @@ -51,6 +51,7 @@ func NewReplicateCommand(stdin io.Reader, stdout, stderr io.Writer) *ReplicateCo func (c *ReplicateCommand) ParseFlags(ctx context.Context, args []string) (err error) { fs := flag.NewFlagSet("litestream-replicate", flag.ContinueOnError) execFlag := fs.String("exec", "", "execute subcommand") + addr := fs.String("addr", "", "HTTP bind address (host:port)") registerConfigFlag(fs, &c.configPath, &c.noExpandEnv) fs.Usage = c.Usage if err := fs.Parse(args); err != nil { @@ -83,7 +84,10 @@ func (c *ReplicateCommand) ParseFlags(ctx context.Context, args []string) (err e } } - // Override config exec command, if specified. + // Override config with flags, if specified. + if *addr != "" { + c.Config.Addr = *addr + } if *execFlag != "" { c.Config.Exec = *execFlag } @@ -147,6 +151,7 @@ func (c *ReplicateCommand) Run(ctx context.Context) (err error) { if err := c.httpServer.Open(); err != nil { return fmt.Errorf("cannot start http server: %w", err) } + log.Printf("http server running at %s", c.httpServer.URL()) } // Parse exec commands args & start subprocess. From 8ee5fcb591d70e0362714ab601de210950ce5e46 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Sat, 5 Mar 2022 10:59:28 -0700 Subject: [PATCH 67/95] Read config file from present working directory, if present --- cmd/litestream/main.go | 38 +++++++++++++++++++++++++++----------- 1 file changed, 27 insertions(+), 11 deletions(-) diff --git a/cmd/litestream/main.go b/cmd/litestream/main.go index 65eb6af0..1850b469 100644 --- a/cmd/litestream/main.go +++ b/cmd/litestream/main.go @@ -217,13 +217,34 @@ func (c *Config) DBConfig(path string) *DBConfig { // ReadConfigFile unmarshals config from filename. Expands path if needed. // If expandEnv is true then environment variables are expanded in the config. // If filename is blank then the default config path is used. -func ReadConfigFile(filename string, expandEnv bool) (_ Config, err error) { - config := DefaultConfig() +func ReadConfigFile(filename string, expandEnv bool) (config Config, err error) { + var filenames []string + if filename != "" { + filenames = append(filenames, filename) + } + filenames = append(filenames, "./litestream.yml") + filenames = append(filenames, DefaultConfigPath()) - useDefaultPath := filename == "" - if useDefaultPath { - filename = DefaultConfigPath() + for _, name := range filenames { + isDefaultPath := name != filename + + if config, err = readConfigFile(name, expandEnv); os.IsNotExist(err) { + if isDefaultPath { + continue + } + return config, fmt.Errorf("config file not found: %s", filename) + } else if err != nil { + return config, err + } + break } + return config, nil +} + +// readConfigFile unmarshals config from filename. Expands path if needed. +// If expandEnv is true then environment variables are expanded in the config. +func readConfigFile(filename string, expandEnv bool) (_ Config, err error) { + config := DefaultConfig() // Expand filename, if necessary. filename, err = expand(filename) @@ -234,12 +255,7 @@ func ReadConfigFile(filename string, expandEnv bool) (_ Config, err error) { // Read configuration. // Do not return an error if using default path and file is missing. buf, err := ioutil.ReadFile(filename) - if os.IsNotExist(err) { - if useDefaultPath { - return config, nil - } - return config, fmt.Errorf("config file not found: %s", filename) - } else if err != nil { + if err != nil { return config, err } From 07d220028ab0246a6ac93f86ed75dffede63bfe5 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Sat, 5 Mar 2022 11:12:29 -0700 Subject: [PATCH 68/95] Rename 'gcs' to 'gs' for consistency --- .github/workflows/integration_test.yml | 4 ++-- cmd/litestream/main.go | 18 +++++++++--------- cmd/litestream/main_test.go | 8 ++++---- cmd/litestream/replicate.go | 4 ++-- {gcs => gs}/replica_client.go | 14 +++++++------- integration/replica_client_test.go | 20 ++++++++++---------- sftp/replica_client.go | 4 ++-- 7 files changed, 36 insertions(+), 36 deletions(-) rename {gcs => gs}/replica_client.go (97%) diff --git a/.github/workflows/integration_test.yml b/.github/workflows/integration_test.yml index e1b21192..7a97545f 100644 --- a/.github/workflows/integration_test.yml +++ b/.github/workflows/integration_test.yml @@ -52,10 +52,10 @@ jobs: env: GOOGLE_APPLICATION_CREDENTIALS: ${{secrets.GOOGLE_APPLICATION_CREDENTIALS}} - - run: go test -v -run=TestReplicaClient ./integration -replica-type gcs + - run: go test -v -run=TestReplicaClient ./integration -replica-type gs env: GOOGLE_APPLICATION_CREDENTIALS: /opt/gcp.json - LITESTREAM_GCS_BUCKET: integration.litestream.io + LITESTREAM_GS_BUCKET: integration.litestream.io abs-integration-test: name: Run Azure Blob Store Integration Tests diff --git a/cmd/litestream/main.go b/cmd/litestream/main.go index 1850b469..bc75bf8a 100644 --- a/cmd/litestream/main.go +++ b/cmd/litestream/main.go @@ -22,7 +22,7 @@ import ( "github.com/benbjohnson/litestream" "github.com/benbjohnson/litestream/abs" - "github.com/benbjohnson/litestream/gcs" + "github.com/benbjohnson/litestream/gs" "github.com/benbjohnson/litestream/http" "github.com/benbjohnson/litestream/s3" "github.com/benbjohnson/litestream/sftp" @@ -396,8 +396,8 @@ func NewReplicaFromConfig(c *ReplicaConfig, db *litestream.DB) (_ *litestream.Re if client, err = newS3ReplicaClientFromConfig(c); err != nil { return nil, err } - case "gcs": - if client, err = newGCSReplicaClientFromConfig(c); err != nil { + case "gs": + if client, err = newGSReplicaClientFromConfig(c); err != nil { return nil, err } case "abs": @@ -525,13 +525,13 @@ func newS3ReplicaClientFromConfig(c *ReplicaConfig) (_ *s3.ReplicaClient, err er return client, nil } -// newGCSReplicaClientFromConfig returns a new instance of gcs.ReplicaClient built from config. -func newGCSReplicaClientFromConfig(c *ReplicaConfig) (_ *gcs.ReplicaClient, err error) { +// newGSReplicaClientFromConfig returns a new instance of gs.ReplicaClient built from config. +func newGSReplicaClientFromConfig(c *ReplicaConfig) (_ *gs.ReplicaClient, err error) { // Ensure URL & constituent parts are not both specified. if c.URL != "" && c.Path != "" { - return nil, fmt.Errorf("cannot specify url & path for gcs replica") + return nil, fmt.Errorf("cannot specify url & path for gs replica") } else if c.URL != "" && c.Bucket != "" { - return nil, fmt.Errorf("cannot specify url & bucket for gcs replica") + return nil, fmt.Errorf("cannot specify url & bucket for gs replica") } bucket, path := c.Bucket, c.Path @@ -554,11 +554,11 @@ func newGCSReplicaClientFromConfig(c *ReplicaConfig) (_ *gcs.ReplicaClient, err // Ensure required settings are set. if bucket == "" { - return nil, fmt.Errorf("bucket required for gcs replica") + return nil, fmt.Errorf("bucket required for gs replica") } // Build replica. - client := gcs.NewReplicaClient() + client := gs.NewReplicaClient() client.Bucket = bucket client.Path = path return client, nil diff --git a/cmd/litestream/main_test.go b/cmd/litestream/main_test.go index d3d0af8c..f37ff467 100644 --- a/cmd/litestream/main_test.go +++ b/cmd/litestream/main_test.go @@ -11,7 +11,7 @@ import ( "github.com/benbjohnson/litestream" main "github.com/benbjohnson/litestream/cmd/litestream" - "github.com/benbjohnson/litestream/gcs" + "github.com/benbjohnson/litestream/gs" "github.com/benbjohnson/litestream/s3" ) @@ -170,11 +170,11 @@ func TestNewS3ReplicaFromConfig(t *testing.T) { }) } -func TestNewGCSReplicaFromConfig(t *testing.T) { - r, err := main.NewReplicaFromConfig(&main.ReplicaConfig{URL: "gcs://foo/bar"}, nil) +func TestNewGSReplicaFromConfig(t *testing.T) { + r, err := main.NewReplicaFromConfig(&main.ReplicaConfig{URL: "gs://foo/bar"}, nil) if err != nil { t.Fatal(err) - } else if client, ok := r.Client().(*gcs.ReplicaClient); !ok { + } else if client, ok := r.Client().(*gs.ReplicaClient); !ok { t.Fatal("unexpected replica type") } else if got, want := client.Bucket, "foo"; got != want { t.Fatalf("Bucket=%s, want %s", got, want) diff --git a/cmd/litestream/replicate.go b/cmd/litestream/replicate.go index 80b73594..708ca8b8 100644 --- a/cmd/litestream/replicate.go +++ b/cmd/litestream/replicate.go @@ -11,7 +11,7 @@ import ( "github.com/benbjohnson/litestream" "github.com/benbjohnson/litestream/abs" - "github.com/benbjohnson/litestream/gcs" + "github.com/benbjohnson/litestream/gs" "github.com/benbjohnson/litestream/http" "github.com/benbjohnson/litestream/s3" "github.com/benbjohnson/litestream/sftp" @@ -133,7 +133,7 @@ func (c *ReplicateCommand) Run(ctx context.Context) (err error) { log.Printf("replicating to: name=%q type=%q path=%q", r.Name(), client.Type(), client.Path()) case *s3.ReplicaClient: log.Printf("replicating to: name=%q type=%q bucket=%q path=%q region=%q endpoint=%q sync-interval=%s", r.Name(), client.Type(), client.Bucket, client.Path, client.Region, client.Endpoint, r.SyncInterval) - case *gcs.ReplicaClient: + case *gs.ReplicaClient: log.Printf("replicating to: name=%q type=%q bucket=%q path=%q sync-interval=%s", r.Name(), client.Type(), client.Bucket, client.Path, r.SyncInterval) case *abs.ReplicaClient: log.Printf("replicating to: name=%q type=%q bucket=%q path=%q endpoint=%q sync-interval=%s", r.Name(), client.Type(), client.Bucket, client.Path, client.Endpoint, r.SyncInterval) diff --git a/gcs/replica_client.go b/gs/replica_client.go similarity index 97% rename from gcs/replica_client.go rename to gs/replica_client.go index 7f5d91c3..efd7bc54 100644 --- a/gcs/replica_client.go +++ b/gs/replica_client.go @@ -1,4 +1,4 @@ -package gcs +package gs import ( "context" @@ -17,17 +17,17 @@ import ( ) // ReplicaClientType is the client type for this package. -const ReplicaClientType = "gcs" +const ReplicaClientType = "gs" var _ litestream.ReplicaClient = (*ReplicaClient)(nil) // ReplicaClient is a client for writing snapshots & WAL segments to disk. type ReplicaClient struct { mu sync.Mutex - client *storage.Client // gcs client - bkt *storage.BucketHandle // gcs bucket handle + client *storage.Client // gs client + bkt *storage.BucketHandle // gs bucket handle - // GCS bucket information + // GS bucket information Bucket string Path string } @@ -37,12 +37,12 @@ func NewReplicaClient() *ReplicaClient { return &ReplicaClient{} } -// Type returns "gcs" as the client type. +// Type returns "gs" as the client type. func (c *ReplicaClient) Type() string { return ReplicaClientType } -// Init initializes the connection to GCS. No-op if already initialized. +// Init initializes the connection to GS. No-op if already initialized. func (c *ReplicaClient) Init(ctx context.Context) (err error) { c.mu.Lock() defer c.mu.Unlock() diff --git a/integration/replica_client_test.go b/integration/replica_client_test.go index 8761965e..b76034b8 100644 --- a/integration/replica_client_test.go +++ b/integration/replica_client_test.go @@ -16,7 +16,7 @@ import ( "github.com/benbjohnson/litestream" "github.com/benbjohnson/litestream/abs" - "github.com/benbjohnson/litestream/gcs" + "github.com/benbjohnson/litestream/gs" "github.com/benbjohnson/litestream/s3" "github.com/benbjohnson/litestream/sftp" ) @@ -45,8 +45,8 @@ var ( // Google cloud storage settings var ( - gcsBucket = flag.String("gcs-bucket", os.Getenv("LITESTREAM_GCS_BUCKET"), "") - gcsPath = flag.String("gcs-path", os.Getenv("LITESTREAM_GCS_PATH"), "") + gsBucket = flag.String("gs-bucket", os.Getenv("LITESTREAM_GS_BUCKET"), "") + gsPath = flag.String("gs-path", os.Getenv("LITESTREAM_GS_PATH"), "") ) // Azure blob storage settings @@ -480,8 +480,8 @@ func NewReplicaClient(tb testing.TB, typ string) litestream.ReplicaClient { return litestream.NewFileReplicaClient(tb.TempDir()) case s3.ReplicaClientType: return NewS3ReplicaClient(tb) - case gcs.ReplicaClientType: - return NewGCSReplicaClient(tb) + case gs.ReplicaClientType: + return NewGSReplicaClient(tb) case abs.ReplicaClientType: return NewABSReplicaClient(tb) case sftp.ReplicaClientType: @@ -508,13 +508,13 @@ func NewS3ReplicaClient(tb testing.TB) *s3.ReplicaClient { return c } -// NewGCSReplicaClient returns a new client for integration testing. -func NewGCSReplicaClient(tb testing.TB) *gcs.ReplicaClient { +// NewGSReplicaClient returns a new client for integration testing. +func NewGSReplicaClient(tb testing.TB) *gs.ReplicaClient { tb.Helper() - c := gcs.NewReplicaClient() - c.Bucket = *gcsBucket - c.Path = path.Join(*gcsPath, fmt.Sprintf("%016x", rand.Uint64())) + c := gs.NewReplicaClient() + c.Bucket = *gsBucket + c.Path = path.Join(*gsPath, fmt.Sprintf("%016x", rand.Uint64())) return c } diff --git a/sftp/replica_client.go b/sftp/replica_client.go index fd0912cd..8566ab01 100644 --- a/sftp/replica_client.go +++ b/sftp/replica_client.go @@ -52,12 +52,12 @@ func NewReplicaClient() *ReplicaClient { } } -// Type returns "gcs" as the client type. +// Type returns "sftp" as the client type. func (c *ReplicaClient) Type() string { return ReplicaClientType } -// Init initializes the connection to GCS. No-op if already initialized. +// Init initializes the connection to SFTP. No-op if already initialized. func (c *ReplicaClient) Init(ctx context.Context) (_ *sftp.Client, err error) { c.mu.Lock() defer c.mu.Unlock() From d5792c42b97cf57c20a0be857319d486c170177b Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Sat, 5 Mar 2022 11:28:31 -0700 Subject: [PATCH 69/95] Prevent double-close for SFTP client --- internal/internal.go | 16 ++++++++++++++++ internal/internal_test.go | 25 +++++++++++++++++++++++++ sftp/replica_client.go | 10 ++++++---- 3 files changed, 47 insertions(+), 4 deletions(-) diff --git a/internal/internal.go b/internal/internal.go index c6713790..95d0f789 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -8,6 +8,7 @@ import ( "os" "regexp" "strconv" + "sync" "syscall" "time" @@ -264,3 +265,18 @@ func TruncateDuration(d time.Duration) time.Duration { func MD5Hash(b []byte) string { return fmt.Sprintf("%x", md5.Sum(b)) } + +// OnceCloser returns a closer that will only ignore duplicate closes. +func OnceCloser(c io.Closer) io.Closer { + return &onceCloser{Closer: c} +} + +type onceCloser struct { + sync.Once + io.Closer +} + +func (c *onceCloser) Close() (err error) { + c.Once.Do(func() { err = c.Closer.Close() }) + return err +} diff --git a/internal/internal_test.go b/internal/internal_test.go index 9d2c49b2..11d0f6d9 100644 --- a/internal/internal_test.go +++ b/internal/internal_test.go @@ -7,6 +7,7 @@ import ( "time" "github.com/benbjohnson/litestream/internal" + "github.com/benbjohnson/litestream/mock" ) func TestParseSnapshotPath(t *testing.T) { @@ -100,3 +101,27 @@ func TestTruncateDuration(t *testing.T) { }) } } + +func TestOnceCloser(t *testing.T) { + var closed bool + var rc = &mock.ReadCloser{ + CloseFunc: func() error { + if closed { + t.Fatal("already closed") + } + closed = true + return nil + }, + } + + oc := internal.OnceCloser(rc) + if err := oc.Close(); err != nil { + t.Fatalf("first close: %s", err) + } else if err := oc.Close(); err != nil { + t.Fatalf("second close: %s", err) + } + + if !closed { + t.Fatal("expected close") + } +} diff --git a/sftp/replica_client.go b/sftp/replica_client.go index 8566ab01..269e8ca1 100644 --- a/sftp/replica_client.go +++ b/sftp/replica_client.go @@ -270,12 +270,13 @@ func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, in if err != nil { return info, fmt.Errorf("cannot open snapshot file for writing: %w", err) } - defer f.Close() + closer := internal.OnceCloser(f) + defer closer.Close() n, err := io.Copy(f, rd) if err != nil { return info, err - } else if err := f.Close(); err != nil { + } else if err := closer.Close(); err != nil { return info, err } @@ -391,12 +392,13 @@ func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, if err != nil { return info, fmt.Errorf("cannot open snapshot file for writing: %w", err) } - defer f.Close() + closer := internal.OnceCloser(f) + defer closer.Close() n, err := io.Copy(f, rd) if err != nil { return info, err - } else if err := f.Close(); err != nil { + } else if err := closer.Close(); err != nil { return info, err } From 00bad4308d147569542670eef4b70193f547a5cc Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Sun, 6 Mar 2022 08:22:51 -0700 Subject: [PATCH 70/95] Set permission on file replica client on init --- db.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/db.go b/db.go index 06957e32..a2396ec5 100644 --- a/db.go +++ b/db.go @@ -585,6 +585,16 @@ func (db *DB) init() (err error) { db.fileMode = fi.Mode() db.uid, db.gid = internal.Fileinfo(fi) + // Pass permissions to file replicas, if they exist. + for _, r := range db.Replicas { + if client, ok := r.Client().(*FileReplicaClient); ok { + client.FileMode = db.fileMode + client.DirMode = db.dirMode + client.Uid = db.uid + client.Gid = db.gid + } + } + // Start a long-running read transaction to prevent other transactions // from checkpointing. if err := db.acquireReadLock(); err != nil { From 8d10881278fdfda9cc11fd6cff840b3e7e460a43 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Sat, 2 Apr 2022 11:43:49 -0600 Subject: [PATCH 71/95] Use database page size in read replication --- db.go | 2 - http/client.go | 135 ++++++++++++++++++++++++++++++++++++ http/{http.go => server.go} | 117 ++----------------------------- litestream.go | 1 + 4 files changed, 140 insertions(+), 115 deletions(-) create mode 100644 http/client.go rename http/{http.go => server.go} (71%) diff --git a/db.go b/db.go index a2396ec5..c1ac7dad 100644 --- a/db.go +++ b/db.go @@ -677,8 +677,6 @@ func (db *DB) initReplica(pageSize int) (err error) { return fmt.Errorf("enable wal failed, mode=%q", mode) } - // TODO: Set page size. - if _, err := db.db.ExecContext(db.ctx, `CREATE TABLE IF NOT EXISTS _litestream (id INTEGER)`); err != nil { return fmt.Errorf("create _litestream table: %w", err) } else if _, err := db.db.ExecContext(db.ctx, `PRAGMA wal_checkpoint(TRUNCATE)`); err != nil { diff --git a/http/client.go b/http/client.go new file mode 100644 index 00000000..5c5ae759 --- /dev/null +++ b/http/client.go @@ -0,0 +1,135 @@ +package http + +import ( + "context" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + + "github.com/benbjohnson/litestream" +) + +// Client represents an client for a streaming Litestream HTTP server. +type Client struct { + // Upstream endpoint + URL string + + // Path of database on upstream server. + Path string + + // Underlying HTTP client + HTTPClient *http.Client +} + +// NewClient returns an instance of Client. +func NewClient(rawurl, path string) *Client { + return &Client{ + URL: rawurl, + Path: path, + HTTPClient: http.DefaultClient, + } +} + +// Stream returns a snapshot and continuous stream of WAL updates. +func (c *Client) Stream(ctx context.Context) (litestream.StreamReader, error) { + u, err := url.Parse(c.URL) + if err != nil { + return nil, fmt.Errorf("invalid client URL: %w", err) + } else if u.Scheme != "http" && u.Scheme != "https" { + return nil, fmt.Errorf("invalid URL scheme") + } else if u.Host == "" { + return nil, fmt.Errorf("URL host required") + } + + // Strip off everything but the scheme & host. + *u = url.URL{ + Scheme: u.Scheme, + Host: u.Host, + Path: "/stream", + RawQuery: (url.Values{ + "path": []string{c.Path}, + }).Encode(), + } + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + + resp, err := c.HTTPClient.Do(req) + if err != nil { + return nil, err + } else if resp.StatusCode != http.StatusOK { + resp.Body.Close() + return nil, fmt.Errorf("invalid response: code=%d", resp.StatusCode) + } + + pageSize, _ := strconv.Atoi(resp.Header.Get("Litestream-page-size")) + if pageSize <= 0 { + resp.Body.Close() + return nil, fmt.Errorf("stream page size unavailable") + } + + return &StreamReader{ + pageSize: pageSize, + rc: resp.Body, + lr: io.LimitedReader{R: resp.Body}, + }, nil +} + +// StreamReader represents an optional snapshot followed by a continuous stream +// of WAL updates. It is used to implement live read replication from a single +// primary Litestream server to one or more remote Litestream replicas. +type StreamReader struct { + pageSize int + rc io.ReadCloser + lr io.LimitedReader +} + +// Close closes the underlying reader. +func (r *StreamReader) Close() (err error) { + if e := r.rc.Close(); err == nil { + err = e + } + return err +} + +// PageSize returns the page size on the remote database. +func (r *StreamReader) PageSize() int { return r.pageSize } + +// Read reads bytes of the current payload into p. Only valid after a successful +// call to Next(). On io.EOF, call Next() again to begin reading next record. +func (r *StreamReader) Read(p []byte) (n int, err error) { + return r.lr.Read(p) +} + +// Next returns the next available record. This call will block until a record +// is available. After calling Next(), read the payload from the reader using +// Read() until io.EOF is reached. +func (r *StreamReader) Next() (*litestream.StreamRecordHeader, error) { + // If bytes remain on the current file, discard. + if r.lr.N > 0 { + if _, err := io.Copy(io.Discard, &r.lr); err != nil { + return nil, err + } + } + + // Read record header. + buf := make([]byte, litestream.StreamRecordHeaderSize) + if _, err := io.ReadFull(r.rc, buf); err != nil { + return nil, fmt.Errorf("http.StreamReader.Next(): %w", err) + } + + var hdr litestream.StreamRecordHeader + if err := hdr.UnmarshalBinary(buf); err != nil { + return nil, err + } + + // Update remaining bytes on file reader. + r.lr.N = hdr.Size + + return &hdr, nil +} diff --git a/http/http.go b/http/server.go similarity index 71% rename from http/http.go rename to http/server.go index fc35f0fe..9763c73c 100644 --- a/http/http.go +++ b/http/server.go @@ -1,15 +1,14 @@ package http import ( - "context" "fmt" "io" "log" "net" "net/http" httppprof "net/http/pprof" - "net/url" "os" + "strconv" "strings" "github.com/benbjohnson/litestream" @@ -142,6 +141,9 @@ func (s *Server) handleGetStream(w http.ResponseWriter, r *http.Request) { return } + // Set the page size in the header. + w.Header().Set("Litestream-page-size", strconv.Itoa(db.PageSize())) + // TODO: Restart stream from a previous position, if specified. // Determine starting position. @@ -260,114 +262,3 @@ func (s *Server) handleGetStream(w http.ResponseWriter, r *http.Request) { } } } - -type Client struct { - // Upstream endpoint - URL string - - // Path of database on upstream server. - Path string - - // Underlying HTTP client - HTTPClient *http.Client -} - -func NewClient(rawurl, path string) *Client { - return &Client{ - URL: rawurl, - Path: path, - HTTPClient: http.DefaultClient, - } -} - -func (c *Client) Stream(ctx context.Context) (litestream.StreamReader, error) { - u, err := url.Parse(c.URL) - if err != nil { - return nil, fmt.Errorf("invalid client URL: %w", err) - } else if u.Scheme != "http" && u.Scheme != "https" { - return nil, fmt.Errorf("invalid URL scheme") - } else if u.Host == "" { - return nil, fmt.Errorf("URL host required") - } - - // Strip off everything but the scheme & host. - *u = url.URL{ - Scheme: u.Scheme, - Host: u.Host, - Path: "/stream", - RawQuery: (url.Values{ - "path": []string{c.Path}, - }).Encode(), - } - - req, err := http.NewRequest("GET", u.String(), nil) - if err != nil { - return nil, err - } - req = req.WithContext(ctx) - - resp, err := c.HTTPClient.Do(req) - if err != nil { - return nil, err - } else if resp.StatusCode != http.StatusOK { - resp.Body.Close() - return nil, fmt.Errorf("invalid response: code=%d", resp.StatusCode) - } - - return &StreamReader{ - body: resp.Body, - file: io.LimitedReader{R: resp.Body}, - }, nil -} - -type StreamReader struct { - body io.ReadCloser - file io.LimitedReader - err error -} - -func (r *StreamReader) Close() error { - if e := r.body.Close(); e != nil && r.err == nil { - r.err = e - } - return r.err -} - -func (r *StreamReader) Read(p []byte) (int, error) { - if r.err != nil { - return 0, r.err - } else if r.file.R == nil { - return 0, io.EOF - } - return r.file.Read(p) -} - -func (r *StreamReader) Next() (*litestream.StreamRecordHeader, error) { - if r.err != nil { - return nil, r.err - } - - // If bytes remain on the current file, discard. - if r.file.N > 0 { - if _, r.err = io.Copy(io.Discard, &r.file); r.err != nil { - return nil, r.err - } - } - - // Read record header. - buf := make([]byte, litestream.StreamRecordHeaderSize) - if _, err := io.ReadFull(r.body, buf); err != nil { - r.err = fmt.Errorf("http.StreamReader.Next(): %w", err) - return nil, r.err - } - - var hdr litestream.StreamRecordHeader - if r.err = hdr.UnmarshalBinary(buf); r.err != nil { - return nil, r.err - } - - // Update remaining bytes on file reader. - r.file.N = hdr.Size - - return &hdr, nil -} diff --git a/litestream.go b/litestream.go index 9bb945ec..a367b33a 100644 --- a/litestream.go +++ b/litestream.go @@ -530,6 +530,7 @@ type StreamClient interface { // StreamReader represents a reader that streams snapshot and WAL records. type StreamReader interface { io.ReadCloser + PageSize() int Next() (*StreamRecordHeader, error) } From 6aba41665654f264dc372395271294c8f5e6ff68 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Sat, 2 Apr 2022 11:53:22 -0600 Subject: [PATCH 72/95] Remove CI task for executing long running test runner on each build --- .github/workflows/release.linux.yml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/.github/workflows/release.linux.yml b/.github/workflows/release.linux.yml index d45c111e..86541990 100644 --- a/.github/workflows/release.linux.yml +++ b/.github/workflows/release.linux.yml @@ -126,9 +126,3 @@ jobs: asset_path: ./dist/litestream-${{ env.VERSION }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}${{ env.SUFFIX }}.deb asset_name: litestream-${{ env.VERSION }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}${{ env.SUFFIX }}.deb asset_content_type: application/octet-stream - - - name: Dispatch test runner - if: matrix.deploy_test_runner && github.actor != 'dependabot' - run: sleep 60 && gh workflow run deploy.yml -R benbjohnson/litestream-test-runner -f run_id=${{ github.run_id }} -f litestream_version=${{ github.sha }} - env: - GITHUB_TOKEN: ${{ secrets.PAT_TOKEN }} From 46888530b2321280cd7e6a240a18bdbfb96da330 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Sun, 3 Apr 2022 09:09:50 -0600 Subject: [PATCH 73/95] Default upstream path if not specified --- cmd/litestream/main.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/cmd/litestream/main.go b/cmd/litestream/main.go index bc75bf8a..d6d145e8 100644 --- a/cmd/litestream/main.go +++ b/cmd/litestream/main.go @@ -309,10 +309,12 @@ func NewDBFromConfigWithPath(dbc *DBConfig, path string) (*litestream.DB, error) // Attach upstream HTTP client if specified. if upstreamURL := dbc.Upstream.URL; upstreamURL != "" { - if dbc.Upstream.Path == "" { - return nil, fmt.Errorf("upstream path required") + // Use local database path if upstream path is not specified. + upstreamPath := dbc.Upstream.Path + if upstreamPath == "" { + upstreamPath = db.Path() } - db.StreamClient = http.NewClient(upstreamURL, dbc.Upstream.Path) + db.StreamClient = http.NewClient(upstreamURL, upstreamPath) } // Override default database settings if specified in configuration. From 2c3e28c78678d633b905c648851ee446c6e63ca1 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Sun, 3 Apr 2022 11:46:45 -0600 Subject: [PATCH 74/95] Improve http error logging --- http/server.go | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/http/server.go b/http/server.go index 9763c73c..dc226567 100644 --- a/http/server.go +++ b/http/server.go @@ -119,7 +119,7 @@ func (s *Server) serveHTTP(w http.ResponseWriter, r *http.Request) { case http.MethodGet: s.handleGetStream(w, r) default: - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + s.writeError(w, r, "Method not allowed", http.StatusMethodNotAllowed) } default: http.NotFound(w, r) @@ -132,12 +132,12 @@ func (s *Server) handleGetStream(w http.ResponseWriter, r *http.Request) { // TODO: Listen for all databases matching query criteria. path := q.Get("path") if path == "" { - http.Error(w, "Database name required", http.StatusBadRequest) + s.writeError(w, r, "Database name required", http.StatusBadRequest) return } db := s.server.DB(path) if db == nil { - http.Error(w, "Database not found", http.StatusNotFound) + s.writeError(w, r, "Database not found", http.StatusNotFound) return } @@ -149,7 +149,7 @@ func (s *Server) handleGetStream(w http.ResponseWriter, r *http.Request) { // Determine starting position. pos := db.Pos() if pos.Generation == "" { - http.Error(w, "No generation available", http.StatusServiceUnavailable) + s.writeError(w, r, "No generation available", http.StatusServiceUnavailable) return } pos.Offset = 0 @@ -160,7 +160,7 @@ func (s *Server) handleGetStream(w http.ResponseWriter, r *http.Request) { // Obtain iterator before snapshot so we don't miss any WAL segments. itr, err := db.WALSegments(r.Context(), pos.Generation) if err != nil { - http.Error(w, fmt.Sprintf("Cannot obtain WAL iterator: %s", err), http.StatusInternalServerError) + s.writeError(w, r, fmt.Sprintf("Cannot obtain WAL iterator: %s", err), http.StatusInternalServerError) return } defer itr.Close() @@ -191,7 +191,7 @@ func (s *Server) handleGetStream(w http.ResponseWriter, r *http.Request) { return nil }); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) + s.writeError(w, r, err.Error(), http.StatusInternalServerError) return } @@ -262,3 +262,8 @@ func (s *Server) handleGetStream(w http.ResponseWriter, r *http.Request) { } } } + +func (s *Server) writeError(w http.ResponseWriter, r *http.Request, err string, code int) { + s.Logger.Printf("error: %s", err) + http.Error(w, err, code) +} From 44662022fa3a02541fd1589d3440676741333cfb Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Sun, 3 Apr 2022 09:18:54 -0600 Subject: [PATCH 75/95] Allow read replication recovery from last position --- db.go | 31 ++++- http/client.go | 19 +-- http/server.go | 115 +++++++++++------- integration/cmd_test.go | 92 ++++++++++++++ .../replicate/http-recovery/litestream.0.yml | 5 + .../replicate/http-recovery/litestream.1.yml | 5 + litestream.go | 74 ++++++++++- litestream_test.go | 72 +++++++++++ 8 files changed, 359 insertions(+), 54 deletions(-) create mode 100644 integration/testdata/replicate/http-recovery/litestream.0.yml create mode 100644 integration/testdata/replicate/http-recovery/litestream.1.yml diff --git a/db.go b/db.go index c1ac7dad..b244698a 100644 --- a/db.go +++ b/db.go @@ -914,6 +914,11 @@ func (db *DB) createGeneration(ctx context.Context) (string, error) { // Sync copies pending data from the WAL to the shadow WAL. func (db *DB) Sync(ctx context.Context) error { + if db.StreamClient != nil { + db.Logger.Printf("using upstream client, skipping sync") + return nil + } + const retryN = 5 for i := 0; i < retryN; i++ { @@ -1417,6 +1422,20 @@ func (db *DB) writeWALSegment(ctx context.Context, pos Pos, rd io.Reader) error return nil } +// readPositionFile reads the position from the position file. +func (db *DB) readPositionFile() (Pos, error) { + buf, err := os.ReadFile(db.PositionPath()) + if os.IsNotExist(err) { + return Pos{}, nil + } else if err != nil { + return Pos{}, err + } + + // Treat invalid format as a non-existent file so we return an empty position. + pos, _ := ParsePos(strings.TrimSpace(string(buf))) + return pos, nil +} + // writePositionFile writes pos as the current position. func (db *DB) writePositionFile(pos Pos) error { return internal.WriteFile(db.PositionPath(), []byte(pos.String()+"\n"), db.fileMode, db.uid, db.gid) @@ -1675,18 +1694,20 @@ func (db *DB) monitorUpstream(ctx context.Context) error { // stream initializes the local database and continuously streams new upstream data. func (db *DB) stream(ctx context.Context) error { + pos, err := db.readPositionFile() + if err != nil { + return fmt.Errorf("read position file: %w", err) + } + // Continuously stream and apply records from client. - sr, err := db.StreamClient.Stream(ctx) + sr, err := db.StreamClient.Stream(ctx, pos) if err != nil { return fmt.Errorf("stream connect: %w", err) } defer sr.Close() - // TODO: Determine page size of upstream database before creating local. - const pageSize = 4096 - // Initialize the database and create it if it doesn't exist. - if err := db.initReplica(pageSize); err != nil { + if err := db.initReplica(sr.PageSize()); err != nil { return fmt.Errorf("init replica: %w", err) } diff --git a/http/client.go b/http/client.go index 5c5ae759..2975ec6b 100644 --- a/http/client.go +++ b/http/client.go @@ -33,7 +33,7 @@ func NewClient(rawurl, path string) *Client { } // Stream returns a snapshot and continuous stream of WAL updates. -func (c *Client) Stream(ctx context.Context) (litestream.StreamReader, error) { +func (c *Client) Stream(ctx context.Context, pos litestream.Pos) (litestream.StreamReader, error) { u, err := url.Parse(c.URL) if err != nil { return nil, fmt.Errorf("invalid client URL: %w", err) @@ -43,14 +43,19 @@ func (c *Client) Stream(ctx context.Context) (litestream.StreamReader, error) { return nil, fmt.Errorf("URL host required") } + // Add path & position to query path. + q := url.Values{"path": []string{c.Path}} + if !pos.IsZero() { + q.Set("generation", pos.Generation) + q.Set("index", litestream.FormatIndex(pos.Index)) + } + // Strip off everything but the scheme & host. *u = url.URL{ - Scheme: u.Scheme, - Host: u.Host, - Path: "/stream", - RawQuery: (url.Values{ - "path": []string{c.Path}, - }).Encode(), + Scheme: u.Scheme, + Host: u.Host, + Path: "/stream", + RawQuery: q.Encode(), } req, err := http.NewRequest("GET", u.String(), nil) diff --git a/http/server.go b/http/server.go index dc226567..dbc0a617 100644 --- a/http/server.go +++ b/http/server.go @@ -128,13 +128,25 @@ func (s *Server) serveHTTP(w http.ResponseWriter, r *http.Request) { func (s *Server) handleGetStream(w http.ResponseWriter, r *http.Request) { q := r.URL.Query() - - // TODO: Listen for all databases matching query criteria. path := q.Get("path") if path == "" { s.writeError(w, r, "Database name required", http.StatusBadRequest) return } + + // Parse current client position, if available. + var pos litestream.Pos + if generation, index := q.Get("generation"), q.Get("index"); generation != "" && index != "" { + pos.Generation = generation + + var err error + if pos.Index, err = litestream.ParseIndex(index); err != nil { + s.writeError(w, r, "Invalid index query parameter", http.StatusBadRequest) + return + } + } + + // Fetch database instance from the primary server. db := s.server.DB(path) if db == nil { s.writeError(w, r, "Database not found", http.StatusNotFound) @@ -144,70 +156,91 @@ func (s *Server) handleGetStream(w http.ResponseWriter, r *http.Request) { // Set the page size in the header. w.Header().Set("Litestream-page-size", strconv.Itoa(db.PageSize())) - // TODO: Restart stream from a previous position, if specified. - // Determine starting position. - pos := db.Pos() - if pos.Generation == "" { + dbPos := db.Pos() + if dbPos.Generation == "" { s.writeError(w, r, "No generation available", http.StatusServiceUnavailable) return } - pos.Offset = 0 + dbPos.Offset = 0 - s.Logger.Printf("stream connected @ %s", pos) - defer s.Logger.Printf("stream disconnected") + // Use database position if generation has changed. + var snapshotRequired bool + if pos.Generation != dbPos.Generation { + s.Logger.Printf("stream generation mismatch, using primary position: client.pos=%s", pos) + pos, snapshotRequired = dbPos, true + } // Obtain iterator before snapshot so we don't miss any WAL segments. - itr, err := db.WALSegments(r.Context(), pos.Generation) + fitr, err := db.WALSegments(r.Context(), pos.Generation) if err != nil { s.writeError(w, r, fmt.Sprintf("Cannot obtain WAL iterator: %s", err), http.StatusInternalServerError) return } - defer itr.Close() + defer fitr.Close() + + bitr := litestream.NewBufferedWALSegmentIterator(fitr) + + // Peek at first position to see if client is too old. + if info, ok := bitr.Peek(); !ok { + s.writeError(w, r, "cannot peek WAL iterator, no segments available", http.StatusInternalServerError) + return + } else if cmp, err := litestream.ComparePos(pos, info.Pos()); err != nil { + s.writeError(w, r, fmt.Sprintf("cannot compare pos: %s", err), http.StatusInternalServerError) + return + } else if cmp == -1 { + s.Logger.Printf("stream position no longer available, using using primary position: client.pos=%s", pos) + pos, snapshotRequired = dbPos, true + } + + s.Logger.Printf("stream connected: pos=%s snapshot=%v", pos, snapshotRequired) + defer s.Logger.Printf("stream disconnected") // Write snapshot to response body. - if err := db.WithFile(func(f *os.File) error { - fi, err := f.Stat() - if err != nil { - return err - } + if snapshotRequired { + if err := db.WithFile(func(f *os.File) error { + fi, err := f.Stat() + if err != nil { + return err + } - // Write snapshot header with current position & size. - hdr := litestream.StreamRecordHeader{ - Type: litestream.StreamRecordTypeSnapshot, - Generation: pos.Generation, - Index: pos.Index, - Size: fi.Size(), - } - if buf, err := hdr.MarshalBinary(); err != nil { - return fmt.Errorf("marshal snapshot stream record header: %w", err) - } else if _, err := w.Write(buf); err != nil { - return fmt.Errorf("write snapshot stream record header: %w", err) - } + // Write snapshot header with current position & size. + hdr := litestream.StreamRecordHeader{ + Type: litestream.StreamRecordTypeSnapshot, + Generation: pos.Generation, + Index: pos.Index, + Size: fi.Size(), + } + if buf, err := hdr.MarshalBinary(); err != nil { + return fmt.Errorf("marshal snapshot stream record header: %w", err) + } else if _, err := w.Write(buf); err != nil { + return fmt.Errorf("write snapshot stream record header: %w", err) + } + + if _, err := io.CopyN(w, f, fi.Size()); err != nil { + return fmt.Errorf("copy snapshot: %w", err) + } - if _, err := io.CopyN(w, f, fi.Size()); err != nil { - return fmt.Errorf("copy snapshot: %w", err) + return nil + }); err != nil { + s.writeError(w, r, err.Error(), http.StatusInternalServerError) + return } - return nil - }); err != nil { - s.writeError(w, r, err.Error(), http.StatusInternalServerError) - return + // Flush after snapshot has been written. + w.(http.Flusher).Flush() } - // Flush after snapshot has been written. - w.(http.Flusher).Flush() - for { // Wait for notification of new entries. select { case <-r.Context().Done(): return - case <-itr.NotifyCh(): + case <-fitr.NotifyCh(): } - for itr.Next() { - info := itr.WALSegment() + for bitr.Next() { + info := bitr.WALSegment() // Skip any segments before our initial position. if cmp, err := litestream.ComparePos(info.Pos(), pos); err != nil { @@ -256,7 +289,7 @@ func (s *Server) handleGetStream(w http.ResponseWriter, r *http.Request) { // Flush after WAL segment has been written. w.(http.Flusher).Flush() } - if itr.Err() != nil { + if bitr.Err() != nil { s.Logger.Printf("wal iterator error: %s", err) return } diff --git a/integration/cmd_test.go b/integration/cmd_test.go index 3cab314c..96b85581 100644 --- a/integration/cmd_test.go +++ b/integration/cmd_test.go @@ -454,6 +454,98 @@ func TestCmd_Replicate_HTTP(t *testing.T) { killLitestreamCmd(t, cmd0, stdout0) } +// Ensure a database can recover when disconnected from HTTP. +func TestCmd_Replicate_HTTP_Recovery(t *testing.T) { + ctx := context.Background() + testDir, tempDir := filepath.Join("testdata", "replicate", "http-recovery"), t.TempDir() + if err := os.Mkdir(filepath.Join(tempDir, "0"), 0777); err != nil { + t.Fatal(err) + } else if err := os.Mkdir(filepath.Join(tempDir, "1"), 0777); err != nil { + t.Fatal(err) + } + + env0 := []string{"LITESTREAM_TEMPDIR=" + tempDir} + env1 := []string{"LITESTREAM_TEMPDIR=" + tempDir, "LITESTREAM_UPSTREAM_URL=http://localhost:10002"} + + cmd0, stdout0, _ := commandContext(ctx, env0, "replicate", "-config", filepath.Join(testDir, "litestream.0.yml")) + if err := cmd0.Start(); err != nil { + t.Fatal(err) + } + cmd1, stdout1, _ := commandContext(ctx, env1, "replicate", "-config", filepath.Join(testDir, "litestream.1.yml")) + if err := cmd1.Start(); err != nil { + t.Fatal(err) + } + + db0, err := sql.Open("sqlite3", filepath.Join(tempDir, "0", "db")) + if err != nil { + t.Fatal(err) + } else if _, err := db0.ExecContext(ctx, `PRAGMA journal_mode = wal`); err != nil { + t.Fatal(err) + } else if _, err := db0.ExecContext(ctx, `CREATE TABLE t (id INTEGER PRIMARY KEY)`); err != nil { + t.Fatal(err) + } + defer db0.Close() + + var index int + insertAndWait := func() { + index++ + t.Logf("[exec] INSERT INTO t (id) VALUES (%d)", index) + if _, err := db0.ExecContext(ctx, `INSERT INTO t (id) VALUES (?)`, index); err != nil { + t.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + } + + // Execute writes periodically. + for i := 0; i < 50; i++ { + insertAndWait() + } + + // Kill the replica. + t.Logf("Killing replica...") + killLitestreamCmd(t, cmd1, stdout1) + t.Logf("Replica killed") + + // Keep writing. + for i := 0; i < 25; i++ { + insertAndWait() + } + + // Restart replica. + t.Logf("Restarting replica...") + cmd1, stdout1, _ = commandContext(ctx, env1, "replicate", "-config", filepath.Join(testDir, "litestream.1.yml")) + if err := cmd1.Start(); err != nil { + t.Fatal(err) + } + t.Logf("Replica restarted") + + // Continue writing... + for i := 0; i < 25; i++ { + insertAndWait() + } + + // Wait for replica to catch up. + time.Sleep(1 * time.Second) + + // Verify count in replica table. + db1, err := sql.Open("sqlite3", filepath.Join(tempDir, "1", "db")) + if err != nil { + t.Fatal(err) + } + defer db1.Close() + + var n int + if err := db1.QueryRowContext(ctx, `SELECT COUNT(*) FROM t`).Scan(&n); err != nil { + t.Fatal(err) + } else if got, want := n, 100; got != want { + t.Fatalf("replica count=%d, want %d", got, want) + } + + // Stop & wait for Litestream command. + killLitestreamCmd(t, cmd1, stdout1) // kill + killLitestreamCmd(t, cmd0, stdout0) +} + // commandContext returns a "litestream" command with stdout/stderr buffers. func commandContext(ctx context.Context, env []string, arg ...string) (cmd *exec.Cmd, stdout, stderr *internal.LockingBuffer) { cmd = exec.CommandContext(ctx, "litestream", arg...) diff --git a/integration/testdata/replicate/http-recovery/litestream.0.yml b/integration/testdata/replicate/http-recovery/litestream.0.yml new file mode 100644 index 00000000..41c7b1b3 --- /dev/null +++ b/integration/testdata/replicate/http-recovery/litestream.0.yml @@ -0,0 +1,5 @@ +addr: :10002 + +dbs: + - path: $LITESTREAM_TEMPDIR/0/db + max-checkpoint-page-count: 10 diff --git a/integration/testdata/replicate/http-recovery/litestream.1.yml b/integration/testdata/replicate/http-recovery/litestream.1.yml new file mode 100644 index 00000000..e9735700 --- /dev/null +++ b/integration/testdata/replicate/http-recovery/litestream.1.yml @@ -0,0 +1,5 @@ +dbs: + - path: $LITESTREAM_TEMPDIR/1/db + upstream: + url: "$LITESTREAM_UPSTREAM_URL" + path: "$LITESTREAM_TEMPDIR/0/db" diff --git a/litestream.go b/litestream.go index a367b33a..0e9509c2 100644 --- a/litestream.go +++ b/litestream.go @@ -10,6 +10,7 @@ import ( "math" "os" "path/filepath" + "regexp" "strconv" "strings" "time" @@ -191,6 +192,49 @@ func (itr *WALSegmentInfoSliceIterator) WALSegment() WALSegmentInfo { return itr.a[0] } +type BufferedWALSegmentIterator struct { + itr WALSegmentIterator + buffered bool +} + +// NewBufferedWALSegmentIterator returns a new instance of BufferedWALSegmentIterator. +func NewBufferedWALSegmentIterator(itr WALSegmentIterator) *BufferedWALSegmentIterator { + return &BufferedWALSegmentIterator{itr: itr} +} + +// Close closes the underlying iterator. +func (itr *BufferedWALSegmentIterator) Close() error { + return itr.itr.Close() +} + +// Peek returns the next segment without moving the iterator forward. +func (itr *BufferedWALSegmentIterator) Peek() (info WALSegmentInfo, ok bool) { + if !itr.Next() { + return WALSegmentInfo{}, false + } + itr.buffered = true + return itr.itr.WALSegment(), true +} + +// Next returns the next segment. If buffer is full, this call is a no-op. +func (itr *BufferedWALSegmentIterator) Next() bool { + if itr.buffered { + itr.buffered = false + return true + } + return itr.itr.Next() +} + +// Returns an error that occurred during iteration. +func (itr *BufferedWALSegmentIterator) Err() error { + return itr.itr.Err() +} + +// Returns metadata for the currently positioned WAL segment file. +func (itr *BufferedWALSegmentIterator) WALSegment() WALSegmentInfo { + return itr.itr.WALSegment() +} + // SnapshotInfo represents file information about a snapshot. type SnapshotInfo struct { Generation string @@ -302,6 +346,32 @@ type Pos struct { Offset int64 // offset within wal file } +// ParsePos parses a position generated by Pos.String(). +func ParsePos(s string) (Pos, error) { + a := posRegex.FindStringSubmatch(s) + if a == nil { + return Pos{}, fmt.Errorf("invalid pos: %q", s) + } + + index, err := ParseIndex(a[2]) + if err != nil { + return Pos{}, err + } + + offset, err := ParseOffset(a[3]) + if err != nil { + return Pos{}, err + } + + return Pos{ + Generation: a[1], + Index: index, + Offset: offset, + }, nil +} + +var posRegex = regexp.MustCompile(`^(\w+)/(\w+):(\w+)$`) + // String returns a string representation. func (p Pos) String() string { if p.IsZero() { @@ -524,7 +594,9 @@ func (hdr *StreamRecordHeader) UnmarshalBinary(data []byte) error { // StreamClient represents a client for streaming changes to a replica DB. type StreamClient interface { - Stream(ctx context.Context) (StreamReader, error) + // Stream returns a reader which contains and optional snapshot followed + // by a series of WAL segments. This stream begins from the given position. + Stream(ctx context.Context, pos Pos) (StreamReader, error) } // StreamReader represents a reader that streams snapshot and WAL records. diff --git a/litestream_test.go b/litestream_test.go index 2be6ba27..860b6588 100644 --- a/litestream_test.go +++ b/litestream_test.go @@ -52,6 +52,78 @@ func TestFindMinSnapshotByGeneration(t *testing.T) { } } +func TestBufferedWALSegmentIterator(t *testing.T) { + t.Run("OK", func(t *testing.T) { + a := []litestream.WALSegmentInfo{{Index: 1}, {Index: 2}} + itr := litestream.NewBufferedWALSegmentIterator(litestream.NewWALSegmentInfoSliceIterator(a)) + + if info, ok := itr.Peek(); !ok { + t.Fatal("expected info") + } else if got, want := info.Index, 1; got != want { + t.Fatalf("index=%d, want %d", got, want) + } + + if !itr.Next() { + t.Fatal("expected next") + } else if got, want := itr.WALSegment().Index, 1; got != want { + t.Fatalf("index=%d, want %d", got, want) + } + + if !itr.Next() { + t.Fatal("expected next") + } else if got, want := itr.WALSegment().Index, 2; got != want { + t.Fatalf("index=%d, want %d", got, want) + } + + if itr.Next() { + t.Fatal("expected eof") + } + }) + + t.Run("Empty", func(t *testing.T) { + itr := litestream.NewBufferedWALSegmentIterator(litestream.NewWALSegmentInfoSliceIterator(nil)) + + if info, ok := itr.Peek(); ok { + t.Fatal("expected eof") + } else if got, want := info.Index, 0; got != want { + t.Fatalf("index=%d, want %d", got, want) + } + }) +} + +func TestParsePos(t *testing.T) { + t.Run("OK", func(t *testing.T) { + if pos, err := litestream.ParsePos("29cf4bced74e92ab/00000000000003e8:00000000000007d0"); err != nil { + t.Fatal(err) + } else if got, want := pos.Generation, "29cf4bced74e92ab"; got != want { + t.Fatalf("generation=%s, want %s", got, want) + } else if got, want := pos.Index, 1000; got != want { + t.Fatalf("index=%v, want %v", got, want) + } else if got, want := pos.Offset, 2000; got != int64(want) { + t.Fatalf("offset=%v, want %v", got, want) + } + }) + + t.Run("ErrMismatch", func(t *testing.T) { + _, err := litestream.ParsePos("29cf4bced74e92ab-00000000000003e8-00000000000007d0") + if err == nil || err.Error() != `invalid pos: "29cf4bced74e92ab-00000000000003e8-00000000000007d0"` { + t.Fatal(err) + } + }) + t.Run("ErrInvalidIndex", func(t *testing.T) { + _, err := litestream.ParsePos("29cf4bced74e92ab/0000000000000xxx:00000000000007d0") + if err == nil || err.Error() != `cannot parse index: "0000000000000xxx"` { + t.Fatal(err) + } + }) + t.Run("ErrInvalidIndex", func(t *testing.T) { + _, err := litestream.ParsePos("29cf4bced74e92ab/00000000000003e8:0000000000000xxx") + if err == nil || err.Error() != `cannot parse offset: "0000000000000xxx"` { + t.Fatal(err) + } + }) +} + func decodeHexString(tb testing.TB, s string) []byte { tb.Helper() From f53857e1ada85b0b5b61230aadc1ceae053b44fc Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Mon, 4 Apr 2022 21:18:05 -0600 Subject: [PATCH 76/95] Add minimum shadow WAL retention --- cmd/litestream/main.go | 4 + db.go | 27 ++++-- integration/cmd_test.go | 97 ++++++++++++++++++- .../http-full-recovery/litestream.0.yml | 6 ++ .../litestream.1.yml | 0 .../litestream.0.yml | 0 .../http-partial-recovery/litestream.1.yml | 5 + litestream.go | 2 +- 8 files changed, 131 insertions(+), 10 deletions(-) create mode 100644 integration/testdata/replicate/http-full-recovery/litestream.0.yml rename integration/testdata/replicate/{http-recovery => http-full-recovery}/litestream.1.yml (100%) rename integration/testdata/replicate/{http-recovery => http-partial-recovery}/litestream.0.yml (100%) create mode 100644 integration/testdata/replicate/http-partial-recovery/litestream.1.yml diff --git a/cmd/litestream/main.go b/cmd/litestream/main.go index d6d145e8..3cf5a0ea 100644 --- a/cmd/litestream/main.go +++ b/cmd/litestream/main.go @@ -289,6 +289,7 @@ type DBConfig struct { CheckpointInterval *time.Duration `yaml:"checkpoint-interval"` MinCheckpointPageN *int `yaml:"min-checkpoint-page-count"` MaxCheckpointPageN *int `yaml:"max-checkpoint-page-count"` + ShadowRetentionN *int `yaml:"shadow-retention-count"` Replicas []*ReplicaConfig `yaml:"replicas"` } @@ -330,6 +331,9 @@ func NewDBFromConfigWithPath(dbc *DBConfig, path string) (*litestream.DB, error) if dbc.MaxCheckpointPageN != nil { db.MaxCheckpointPageN = *dbc.MaxCheckpointPageN } + if dbc.ShadowRetentionN != nil { + db.ShadowRetentionN = *dbc.ShadowRetentionN + } // Instantiate and attach replicas. for _, rc := range dbc.Replicas { diff --git a/db.go b/db.go index b244698a..160cd474 100644 --- a/db.go +++ b/db.go @@ -33,6 +33,7 @@ const ( DefaultMinCheckpointPageN = 1000 DefaultMaxCheckpointPageN = 10000 + DefaultShadowRetentionN = 32 ) // MaxIndex is the maximum possible WAL index. @@ -102,6 +103,10 @@ type DB struct { // unbounded if there are always read transactions occurring. MaxCheckpointPageN int + // Number of shadow WAL indexes to retain. This keeps files long enough for + // live replicas to retrieve the data but allows files to eventually be removed. + ShadowRetentionN int + // Time after receiving change notification before reading next WAL segment. // Used for batching changes into fewer files instead of every transaction // creating its own file. @@ -129,6 +134,7 @@ func NewDB(path string) *DB { MinCheckpointPageN: DefaultMinCheckpointPageN, MaxCheckpointPageN: DefaultMaxCheckpointPageN, + ShadowRetentionN: DefaultShadowRetentionN, MonitorDelayInterval: DefaultMonitorDelayInterval, CheckpointInterval: DefaultCheckpointInterval, @@ -778,21 +784,26 @@ func (db *DB) cleanWAL(ctx context.Context) error { generation, err := db.CurrentGeneration() if err != nil { return fmt.Errorf("current generation: %w", err) + } else if generation == "" { + return nil } // Determine lowest index that's been replicated to all replicas. - minIndex := -1 + minReplicaIndex := -1 for _, r := range db.Replicas { pos := r.Pos().Truncate() if pos.Generation != generation { continue // different generation, skip - } else if minIndex == -1 || pos.Index < minIndex { - minIndex = pos.Index + } else if minReplicaIndex == -1 || pos.Index < minReplicaIndex { + minReplicaIndex = pos.Index } } - // Skip if our lowest position is too small. - if minIndex <= 0 { + // Retain a certain number of WAL indexes since + minRetentionIndex := db.pos.Index - db.ShadowRetentionN + + // Skip if we have replicas but none have replicated this generation yet. + if len(db.Replicas) > 0 && minReplicaIndex <= 0 { return nil } @@ -807,8 +818,10 @@ func (db *DB) cleanWAL(ctx context.Context) error { index, err := ParseIndex(ent.Name()) if err != nil { continue - } else if index >= minIndex { - continue // not below min, skip + } else if len(db.Replicas) > 0 && index >= minReplicaIndex { + continue // not replicated yet, skip + } else if index >= minRetentionIndex { + continue // retain certain number of indexes, skip } if err := os.RemoveAll(filepath.Join(dir, FormatIndex(index))); err != nil { diff --git a/integration/cmd_test.go b/integration/cmd_test.go index 96b85581..62ee336b 100644 --- a/integration/cmd_test.go +++ b/integration/cmd_test.go @@ -455,9 +455,102 @@ func TestCmd_Replicate_HTTP(t *testing.T) { } // Ensure a database can recover when disconnected from HTTP. -func TestCmd_Replicate_HTTP_Recovery(t *testing.T) { +func TestCmd_Replicate_HTTP_PartialRecovery(t *testing.T) { ctx := context.Background() - testDir, tempDir := filepath.Join("testdata", "replicate", "http-recovery"), t.TempDir() + testDir, tempDir := filepath.Join("testdata", "replicate", "http-partial-recovery"), t.TempDir() + if err := os.Mkdir(filepath.Join(tempDir, "0"), 0777); err != nil { + t.Fatal(err) + } else if err := os.Mkdir(filepath.Join(tempDir, "1"), 0777); err != nil { + t.Fatal(err) + } + + env0 := []string{"LITESTREAM_TEMPDIR=" + tempDir} + env1 := []string{"LITESTREAM_TEMPDIR=" + tempDir, "LITESTREAM_UPSTREAM_URL=http://localhost:10002"} + + cmd0, stdout0, _ := commandContext(ctx, env0, "replicate", "-config", filepath.Join(testDir, "litestream.0.yml")) + if err := cmd0.Start(); err != nil { + t.Fatal(err) + } + cmd1, stdout1, _ := commandContext(ctx, env1, "replicate", "-config", filepath.Join(testDir, "litestream.1.yml")) + if err := cmd1.Start(); err != nil { + t.Fatal(err) + } + + db0, err := sql.Open("sqlite3", filepath.Join(tempDir, "0", "db")) + if err != nil { + t.Fatal(err) + } else if _, err := db0.ExecContext(ctx, `PRAGMA journal_mode = wal`); err != nil { + t.Fatal(err) + } else if _, err := db0.ExecContext(ctx, `CREATE TABLE t (id INTEGER PRIMARY KEY)`); err != nil { + t.Fatal(err) + } + defer db0.Close() + + var index int + insertAndWait := func() { + index++ + t.Logf("[exec] INSERT INTO t (id) VALUES (%d)", index) + if _, err := db0.ExecContext(ctx, `INSERT INTO t (id) VALUES (?)`, index); err != nil { + t.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + } + + // Execute writes periodically. + for i := 0; i < 50; i++ { + insertAndWait() + } + + // Kill the replica. + t.Logf("Killing replica...") + killLitestreamCmd(t, cmd1, stdout1) + t.Logf("Replica killed") + + // Keep writing. + for i := 0; i < 25; i++ { + insertAndWait() + } + + // Restart replica. + t.Logf("Restarting replica...") + cmd1, stdout1, _ = commandContext(ctx, env1, "replicate", "-config", filepath.Join(testDir, "litestream.1.yml")) + if err := cmd1.Start(); err != nil { + t.Fatal(err) + } + t.Logf("Replica restarted") + + // Continue writing... + for i := 0; i < 25; i++ { + insertAndWait() + } + + // Wait for replica to catch up. + time.Sleep(1 * time.Second) + + // Verify count in replica table. + db1, err := sql.Open("sqlite3", filepath.Join(tempDir, "1", "db")) + if err != nil { + t.Fatal(err) + } + defer db1.Close() + + var n int + if err := db1.QueryRowContext(ctx, `SELECT COUNT(*) FROM t`).Scan(&n); err != nil { + t.Fatal(err) + } else if got, want := n, 100; got != want { + t.Fatalf("replica count=%d, want %d", got, want) + } + + // Stop & wait for Litestream command. + killLitestreamCmd(t, cmd1, stdout1) // kill + killLitestreamCmd(t, cmd0, stdout0) +} + +// Ensure a database can recover when disconnected from HTTP but when last index +// is no longer available. +func TestCmd_Replicate_HTTP_FullRecovery(t *testing.T) { + ctx := context.Background() + testDir, tempDir := filepath.Join("testdata", "replicate", "http-full-recovery"), t.TempDir() if err := os.Mkdir(filepath.Join(tempDir, "0"), 0777); err != nil { t.Fatal(err) } else if err := os.Mkdir(filepath.Join(tempDir, "1"), 0777); err != nil { diff --git a/integration/testdata/replicate/http-full-recovery/litestream.0.yml b/integration/testdata/replicate/http-full-recovery/litestream.0.yml new file mode 100644 index 00000000..88dea072 --- /dev/null +++ b/integration/testdata/replicate/http-full-recovery/litestream.0.yml @@ -0,0 +1,6 @@ +addr: :10002 + +dbs: + - path: $LITESTREAM_TEMPDIR/0/db + max-checkpoint-page-count: 5 + shadow-retention-count: 3 diff --git a/integration/testdata/replicate/http-recovery/litestream.1.yml b/integration/testdata/replicate/http-full-recovery/litestream.1.yml similarity index 100% rename from integration/testdata/replicate/http-recovery/litestream.1.yml rename to integration/testdata/replicate/http-full-recovery/litestream.1.yml diff --git a/integration/testdata/replicate/http-recovery/litestream.0.yml b/integration/testdata/replicate/http-partial-recovery/litestream.0.yml similarity index 100% rename from integration/testdata/replicate/http-recovery/litestream.0.yml rename to integration/testdata/replicate/http-partial-recovery/litestream.0.yml diff --git a/integration/testdata/replicate/http-partial-recovery/litestream.1.yml b/integration/testdata/replicate/http-partial-recovery/litestream.1.yml new file mode 100644 index 00000000..e9735700 --- /dev/null +++ b/integration/testdata/replicate/http-partial-recovery/litestream.1.yml @@ -0,0 +1,5 @@ +dbs: + - path: $LITESTREAM_TEMPDIR/1/db + upstream: + url: "$LITESTREAM_UPSTREAM_URL" + path: "$LITESTREAM_TEMPDIR/0/db" diff --git a/litestream.go b/litestream.go index 0e9509c2..b6ca7f05 100644 --- a/litestream.go +++ b/litestream.go @@ -585,7 +585,7 @@ func (hdr *StreamRecordHeader) UnmarshalBinary(data []byte) error { } hdr.Type = int(binary.BigEndian.Uint32(data[0:4])) hdr.Flags = int(binary.BigEndian.Uint32(data[4:8])) - hdr.Generation = fmt.Sprintf("%16x", binary.BigEndian.Uint64(data[8:16])) + hdr.Generation = fmt.Sprintf("%016x", binary.BigEndian.Uint64(data[8:16])) hdr.Index = int(binary.BigEndian.Uint64(data[16:24])) hdr.Offset = int64(binary.BigEndian.Uint64(data[24:32])) hdr.Size = int64(binary.BigEndian.Uint64(data[32:40])) From 5d394bbc5713ca6dd99b1516a06649d52cb4ea2a Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Tue, 5 Apr 2022 13:48:03 -0600 Subject: [PATCH 77/95] Document -addr flag on replicate command --- cmd/litestream/replicate.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cmd/litestream/replicate.go b/cmd/litestream/replicate.go index 708ca8b8..9a8650b4 100644 --- a/cmd/litestream/replicate.go +++ b/cmd/litestream/replicate.go @@ -221,6 +221,10 @@ Arguments: Executes a subcommand. Litestream will exit when the child process exits. Useful for simple process management. + -addr BIND_ADDR + Starts an HTTP server that reports prometheus metrics and provides + an endpoint for live read replication. (e.g. ":9090") + -no-expand-env Disables environment variable expansion in configuration file. From 80f8de4d9ed16b7ae6f607bde85d90916784de7d Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Sat, 9 Apr 2022 10:34:26 -0600 Subject: [PATCH 78/95] Fix release workflow --- .github/workflows/release.linux.yml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.github/workflows/release.linux.yml b/.github/workflows/release.linux.yml index 86541990..20e09ebc 100644 --- a/.github/workflows/release.linux.yml +++ b/.github/workflows/release.linux.yml @@ -23,7 +23,6 @@ jobs: - arch: amd64 cc: gcc static: true - deploy_test_runner: true - arch: arm64 cc: aarch64-linux-gnu-gcc @@ -105,6 +104,13 @@ jobs: path: dist/litestream-${{ env.VERSION }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}${{ env.SUFFIX }}.deb if-no-files-found: error + - name: Get release + id: release + uses: bruceadams/get-release@v1.2.3 + if: github.event_name == 'release' + env: + GITHUB_TOKEN: ${{ github.token }} + - name: Upload release tarball uses: actions/upload-release-asset@v1.0.2 if: github.event_name == 'release' From ca07137d3215bb1d9d62c4498083bd0274a6ad45 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Thu, 14 Apr 2022 19:17:44 -0600 Subject: [PATCH 79/95] Re-add point-in-time restore --- cmd/litestream/restore.go | 45 +++-- replica_client.go | 82 +++++++++ replica_client_test.go | 161 ++++++++++++++++++ testdata/Makefile | 5 + .../generations/0000000000000000/.gitignore | 0 testdata/index-by-timestamp/no-wal/Makefile | 6 + .../snapshots/0000000000000000.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/0000000000000001.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/0000000000000002.snapshot.lz4 | Bin 0 -> 93 bytes testdata/index-by-timestamp/ok/Makefile | 11 ++ .../snapshots/0000000000000000.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/0000000000000001.snapshot.lz4 | Bin 0 -> 93 bytes .../0000000000000000/0000000000000000.wal.lz4 | Bin 0 -> 93 bytes .../0000000000000000/0000000000001234.wal.lz4 | Bin 0 -> 93 bytes .../0000000000000001/0000000000000000.wal.lz4 | Bin 0 -> 93 bytes .../0000000000000002/0000000000000000.wal.lz4 | Bin 0 -> 93 bytes .../0000000000000003/0000000000000000.wal.lz4 | Bin 0 -> 93 bytes .../snapshot-later-than-wal/Makefile | 7 + .../snapshots/0000000000000000.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/0000000000000001.snapshot.lz4 | Bin 0 -> 93 bytes .../0000000000000000/0000000000000000.wal.lz4 | Bin 0 -> 93 bytes .../0000000000000000/0000000000001234.wal.lz4 | Bin 0 -> 93 bytes .../generations/0000000000000000/.gitignore | 0 .../snapshot-index-by-timestamp/ok/Makefile | 5 + .../snapshots/0000000000000000.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/00000000000003e8.snapshot.lz4 | Bin 0 -> 93 bytes .../snapshots/00000000000007d0.snapshot.lz4 | Bin 0 -> 93 bytes testdata/wal-index-by-timestamp/ok/Makefile | 6 + .../0000000000000000/0000000000000000.wal.lz4 | Bin 0 -> 93 bytes .../0000000000000000/0000000000001234.wal.lz4 | Bin 0 -> 93 bytes .../0000000000000001/0000000000000000.wal.lz4 | Bin 0 -> 93 bytes 31 files changed, 318 insertions(+), 10 deletions(-) create mode 100644 testdata/index-by-timestamp/no-snapshots/generations/0000000000000000/.gitignore create mode 100644 testdata/index-by-timestamp/no-wal/Makefile create mode 100644 testdata/index-by-timestamp/no-wal/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 create mode 100644 testdata/index-by-timestamp/no-wal/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 create mode 100644 testdata/index-by-timestamp/no-wal/generations/0000000000000000/snapshots/0000000000000002.snapshot.lz4 create mode 100644 testdata/index-by-timestamp/ok/Makefile create mode 100644 testdata/index-by-timestamp/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 create mode 100644 testdata/index-by-timestamp/ok/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 create mode 100644 testdata/index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 create mode 100644 testdata/index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 create mode 100644 testdata/index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 create mode 100644 testdata/index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 create mode 100644 testdata/index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000003/0000000000000000.wal.lz4 create mode 100644 testdata/index-by-timestamp/snapshot-later-than-wal/Makefile create mode 100644 testdata/index-by-timestamp/snapshot-later-than-wal/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 create mode 100644 testdata/index-by-timestamp/snapshot-later-than-wal/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 create mode 100644 testdata/index-by-timestamp/snapshot-later-than-wal/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 create mode 100644 testdata/index-by-timestamp/snapshot-later-than-wal/generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 create mode 100644 testdata/snapshot-index-by-timestamp/no-snapshots/generations/0000000000000000/.gitignore create mode 100644 testdata/snapshot-index-by-timestamp/ok/Makefile create mode 100644 testdata/snapshot-index-by-timestamp/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 create mode 100644 testdata/snapshot-index-by-timestamp/ok/generations/0000000000000000/snapshots/00000000000003e8.snapshot.lz4 create mode 100644 testdata/snapshot-index-by-timestamp/ok/generations/0000000000000000/snapshots/00000000000007d0.snapshot.lz4 create mode 100644 testdata/wal-index-by-timestamp/ok/Makefile create mode 100644 testdata/wal-index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 create mode 100644 testdata/wal-index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 create mode 100644 testdata/wal-index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 diff --git a/cmd/litestream/restore.go b/cmd/litestream/restore.go index b746b1ea..6134c0f6 100644 --- a/cmd/litestream/restore.go +++ b/cmd/litestream/restore.go @@ -9,6 +9,7 @@ import ( "os" "path/filepath" "strconv" + "time" "github.com/benbjohnson/litestream" ) @@ -22,14 +23,15 @@ type RestoreCommand struct { snapshotIndex int // index of snapshot to start from // CLI options - configPath string // path to config file - noExpandEnv bool // if true, do not expand env variables in config - outputPath string // path to restore database to - replicaName string // optional, name of replica to restore from - generation string // optional, generation to restore - targetIndex int // optional, last WAL index to replay - ifDBNotExists bool // if true, skips restore if output path already exists - ifReplicaExists bool // if true, skips if no backups exist + configPath string // path to config file + noExpandEnv bool // if true, do not expand env variables in config + outputPath string // path to restore database to + replicaName string // optional, name of replica to restore from + generation string // optional, generation to restore + targetIndex int // optional, last WAL index to replay + timestamp time.Time // optional, restore to point-in-time (ISO 8601) + ifDBNotExists bool // if true, skips restore if output path already exists + ifReplicaExists bool // if true, skips if no backups exist opt litestream.RestoreOptions } @@ -53,6 +55,7 @@ func (c *RestoreCommand) Run(ctx context.Context, args []string) (err error) { fs.StringVar(&c.replicaName, "replica", "", "replica name") fs.StringVar(&c.generation, "generation", "", "generation name") fs.Var((*indexVar)(&c.targetIndex), "index", "wal index") + timestampStr := fs.String("timestamp", "", "point-in-time restore (ISO 8601)") fs.IntVar(&c.opt.Parallelism, "parallelism", c.opt.Parallelism, "parallelism") fs.BoolVar(&c.ifDBNotExists, "if-db-not-exists", false, "") fs.BoolVar(&c.ifReplicaExists, "if-replica-exists", false, "") @@ -66,9 +69,20 @@ func (c *RestoreCommand) Run(ctx context.Context, args []string) (err error) { } pathOrURL := fs.Arg(0) + // Parse timestamp. + if *timestampStr != "" { + if c.timestamp, err = time.Parse(time.RFC3339Nano, *timestampStr); err != nil { + return fmt.Errorf("invalid -timestamp, expected ISO 8601: %w", err) + } + } + // Ensure a generation is specified if target index is specified. - if c.targetIndex != -1 && c.generation == "" { + if c.targetIndex != -1 && !c.timestamp.IsZero() { + return fmt.Errorf("cannot specify both -index flag and -timestamp flag") + } else if c.targetIndex != -1 && c.generation == "" { return fmt.Errorf("must specify -generation flag when using -index flag") + } else if !c.timestamp.IsZero() && c.generation == "" { + return fmt.Errorf("must specify -generation flag when using -timestamp flag") } // Default to original database path if output path not specified. @@ -117,7 +131,11 @@ func (c *RestoreCommand) Run(ctx context.Context, args []string) (err error) { } // Determine the maximum available index for the generation if one is not specified. - if c.targetIndex == -1 { + if !c.timestamp.IsZero() { + if c.targetIndex, err = litestream.FindIndexByTimestamp(ctx, r.Client(), c.generation, c.timestamp); err != nil { + return fmt.Errorf("cannot find index for timestamp in generation %q: %w", c.generation, err) + } + } else if c.targetIndex == -1 { if c.targetIndex, err = litestream.FindMaxIndexByGeneration(ctx, r.Client(), c.generation); err != nil { return fmt.Errorf("cannot determine latest index in generation %q: %w", c.generation, err) } @@ -239,6 +257,10 @@ Arguments: Restore up to a specific hex-encoded WAL index (inclusive). Defaults to use the highest available index. + -timestamp DATETIME + Restore up to a specific point-in-time. Must be ISO 8601. + Cannot be specified with -index flag. + -o PATH Output path of the restored database. Defaults to original DB path. @@ -271,6 +293,9 @@ Examples: # Restore database from specific generation on S3. $ litestream restore -replica s3 -generation xxxxxxxx /path/to/db + # Restore database to a specific point in time. + $ litestream restore -generation xxxxxxxx -timestamp 2000-01-01T00:00:00Z /path/to/db + `[1:], DefaultConfigPath(), ) diff --git a/replica_client.go b/replica_client.go index ba6d4481..46af8501 100644 --- a/replica_client.go +++ b/replica_client.go @@ -234,6 +234,88 @@ func ReplicaClientTimeBounds(ctx context.Context, client ReplicaClient) (min, ma return min, max, nil } +// FindIndexByTimestamp returns the highest index before a given point-in-time +// within a generation. Returns ErrNoSnapshots if no index exists on the replica +// for the generation. +func FindIndexByTimestamp(ctx context.Context, client ReplicaClient, generation string, timestamp time.Time) (index int, err error) { + snapshotIndex, err := FindSnapshotIndexByTimestamp(ctx, client, generation, timestamp) + if err == ErrNoSnapshots { + return 0, err + } else if err != nil { + return 0, fmt.Errorf("max snapshot index: %w", err) + } + + // Determine the highest available WAL index. + walIndex, err := FindWALIndexByTimestamp(ctx, client, generation, timestamp) + if err != nil && err != ErrNoWALSegments { + return 0, fmt.Errorf("max wal index: %w", err) + } + + // Use snapshot index if it's after the last WAL index. + if snapshotIndex > walIndex { + return snapshotIndex, nil + } + return walIndex, nil +} + +// FindSnapshotIndexByTimestamp returns the highest snapshot index before timestamp. +// Returns ErrNoSnapshots if no snapshots exist for the generation on the replica. +func FindSnapshotIndexByTimestamp(ctx context.Context, client ReplicaClient, generation string, timestamp time.Time) (index int, err error) { + itr, err := client.Snapshots(ctx, generation) + if err != nil { + return 0, fmt.Errorf("snapshots: %w", err) + } + defer func() { _ = itr.Close() }() + + // Iterate over snapshots to find the highest index. + var n int + for ; itr.Next(); n++ { + if info := itr.Snapshot(); info.CreatedAt.After(timestamp) { + continue + } else if info.Index > index { + index = info.Index + } + } + if err := itr.Close(); err != nil { + return 0, fmt.Errorf("snapshot iteration: %w", err) + } + + // Return an error if no snapshots were found. + if n == 0 { + return 0, ErrNoSnapshots + } + return index, nil +} + +// FindWALIndexByTimestamp returns the highest WAL index before timestamp. +// Returns ErrNoWALSegments if no segments exist for the generation on the replica. +func FindWALIndexByTimestamp(ctx context.Context, client ReplicaClient, generation string, timestamp time.Time) (index int, err error) { + itr, err := client.WALSegments(ctx, generation) + if err != nil { + return 0, fmt.Errorf("wal segments: %w", err) + } + defer func() { _ = itr.Close() }() + + // Iterate over WAL segments to find the highest index. + var n int + for ; itr.Next(); n++ { + if info := itr.WALSegment(); info.CreatedAt.After(timestamp) { + continue + } else if info.Index > index { + index = info.Index + } + } + if err := itr.Close(); err != nil { + return 0, fmt.Errorf("wal segment iteration: %w", err) + } + + // Return an error if no WAL segments were found. + if n == 0 { + return 0, ErrNoWALSegments + } + return index, nil +} + // FindMaxIndexByGeneration returns the last index within a generation. // Returns ErrNoSnapshots if no index exists on the replica for the generation. func FindMaxIndexByGeneration(ctx context.Context, client ReplicaClient, generation string) (index int, err error) { diff --git a/replica_client_test.go b/replica_client_test.go index 83117b28..37f92d2d 100644 --- a/replica_client_test.go +++ b/replica_client_test.go @@ -489,6 +489,167 @@ func TestFindMaxIndexByGeneration(t *testing.T) { }) } +func TestFindSnapshotIndexByTimestamp(t *testing.T) { + t.Run("OK", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "snapshot-index-by-timestamp", "ok")) + if index, err := litestream.FindSnapshotIndexByTimestamp(context.Background(), client, "0000000000000000", time.Date(2000, 1, 3, 0, 0, 0, 0, time.UTC)); err != nil { + t.Fatal(err) + } else if got, want := index, 0x000007d0; got != want { + t.Fatalf("index=%d, want %d", got, want) + } + }) + + t.Run("ErrNoSnapshots", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "snapshot-index-by-timestamp", "no-snapshots")) + + _, err := litestream.FindSnapshotIndexByTimestamp(context.Background(), client, "0000000000000000", time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)) + if err != litestream.ErrNoSnapshots { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrSnapshots", func(t *testing.T) { + var client mock.ReplicaClient + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return nil, fmt.Errorf("marker") + } + + _, err := litestream.FindSnapshotIndexByTimestamp(context.Background(), &client, "0000000000000000", time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)) + if err == nil || err.Error() != `snapshots: marker` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrSnapshotIteration", func(t *testing.T) { + var itr mock.SnapshotIterator + itr.NextFunc = func() bool { return false } + itr.CloseFunc = func() error { return fmt.Errorf("marker") } + + var client mock.ReplicaClient + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return &itr, nil + } + + _, err := litestream.FindSnapshotIndexByTimestamp(context.Background(), &client, "0000000000000000", time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)) + if err == nil || err.Error() != `snapshot iteration: marker` { + t.Fatalf("unexpected error: %s", err) + } + }) +} + +func TestFindWALIndexByTimestamp(t *testing.T) { + t.Run("OK", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "wal-index-by-timestamp", "ok")) + if index, err := litestream.FindWALIndexByTimestamp(context.Background(), client, "0000000000000000", time.Date(2000, 1, 3, 0, 0, 0, 0, time.UTC)); err != nil { + t.Fatal(err) + } else if got, want := index, 1; got != want { + t.Fatalf("index=%d, want %d", got, want) + } + }) + + t.Run("ErrNoWALSegments", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "wal-index-by-timestamp", "no-wal")) + + _, err := litestream.FindWALIndexByTimestamp(context.Background(), client, "0000000000000000", time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)) + if err != litestream.ErrNoWALSegments { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrWALSegments", func(t *testing.T) { + var client mock.ReplicaClient + client.WALSegmentsFunc = func(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) { + return nil, fmt.Errorf("marker") + } + + _, err := litestream.FindWALIndexByTimestamp(context.Background(), &client, "0000000000000000", time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)) + if err == nil || err.Error() != `wal segments: marker` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrWALSegmentIteration", func(t *testing.T) { + var itr mock.WALSegmentIterator + itr.NextFunc = func() bool { return false } + itr.CloseFunc = func() error { return fmt.Errorf("marker") } + + var client mock.ReplicaClient + client.WALSegmentsFunc = func(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) { + return &itr, nil + } + + _, err := litestream.FindWALIndexByTimestamp(context.Background(), &client, "0000000000000000", time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)) + if err == nil || err.Error() != `wal segment iteration: marker` { + t.Fatalf("unexpected error: %s", err) + } + }) +} + +func TestFindIndexByTimestamp(t *testing.T) { + t.Run("OK", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "index-by-timestamp", "ok")) + if index, err := litestream.FindIndexByTimestamp(context.Background(), client, "0000000000000000", time.Date(2000, 1, 4, 0, 0, 0, 0, time.UTC)); err != nil { + t.Fatal(err) + } else if got, want := index, 0x00000002; got != want { + t.Fatalf("index=%d, want %d", got, want) + } + }) + + t.Run("NoWAL", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "index-by-timestamp", "no-wal")) + if index, err := litestream.FindIndexByTimestamp(context.Background(), client, "0000000000000000", time.Date(2000, 1, 2, 0, 0, 0, 0, time.UTC)); err != nil { + t.Fatal(err) + } else if got, want := index, 0x00000001; got != want { + t.Fatalf("index=%d, want %d", got, want) + } + }) + + t.Run("SnapshotLaterThanWAL", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "index-by-timestamp", "snapshot-later-than-wal")) + if index, err := litestream.FindIndexByTimestamp(context.Background(), client, "0000000000000000", time.Date(2000, 1, 3, 0, 0, 0, 0, time.UTC)); err != nil { + t.Fatal(err) + } else if got, want := index, 0x00000001; got != want { + t.Fatalf("index=%d, want %d", got, want) + } + }) + + t.Run("ErrNoSnapshots", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "index-by-timestamp", "no-snapshots")) + + _, err := litestream.FindIndexByTimestamp(context.Background(), client, "0000000000000000", time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)) + if err != litestream.ErrNoSnapshots { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrSnapshots", func(t *testing.T) { + var client mock.ReplicaClient + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return nil, fmt.Errorf("marker") + } + + _, err := litestream.FindIndexByTimestamp(context.Background(), &client, "0000000000000000", time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)) + if err == nil || err.Error() != `max snapshot index: snapshots: marker` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrWALSegments", func(t *testing.T) { + var client mock.ReplicaClient + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return litestream.NewSnapshotInfoSliceIterator([]litestream.SnapshotInfo{{Index: 0x00000001}}), nil + } + client.WALSegmentsFunc = func(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) { + return nil, fmt.Errorf("marker") + } + + _, err := litestream.FindIndexByTimestamp(context.Background(), &client, "0000000000000000", time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)) + if err == nil || err.Error() != `max wal index: wal segments: marker` { + t.Fatalf("unexpected error: %s", err) + } + }) +} + func TestRestore(t *testing.T) { t.Run("OK", func(t *testing.T) { testDir := filepath.Join("testdata", "restore", "ok") diff --git a/testdata/Makefile b/testdata/Makefile index b87ebd50..504fe254 100644 --- a/testdata/Makefile +++ b/testdata/Makefile @@ -1,8 +1,13 @@ .PHONY: default default: make -C find-latest-generation/ok + make -C index-by-timestamp/no-wal + make -C index-by-timestamp/ok + make -C index-by-timestamp/snapshot-later-than-wal make -C generation-time-bounds/ok make -C generation-time-bounds/snapshots-only make -C replica-client-time-bounds/ok make -C snapshot-time-bounds/ok + make -C snapshot-index-by-timestamp/ok make -C wal-time-bounds/ok + make -C wal-index-by-timestamp/ok diff --git a/testdata/index-by-timestamp/no-snapshots/generations/0000000000000000/.gitignore b/testdata/index-by-timestamp/no-snapshots/generations/0000000000000000/.gitignore new file mode 100644 index 00000000..e69de29b diff --git a/testdata/index-by-timestamp/no-wal/Makefile b/testdata/index-by-timestamp/no-wal/Makefile new file mode 100644 index 00000000..87751339 --- /dev/null +++ b/testdata/index-by-timestamp/no-wal/Makefile @@ -0,0 +1,6 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001010000 generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 + TZ=UTC touch -ct 200001030000 generations/0000000000000000/snapshots/0000000000000002.snapshot.lz4 + diff --git a/testdata/index-by-timestamp/no-wal/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 b/testdata/index-by-timestamp/no-wal/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/index-by-timestamp/no-wal/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 b/testdata/index-by-timestamp/no-wal/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/index-by-timestamp/no-wal/generations/0000000000000000/snapshots/0000000000000002.snapshot.lz4 b/testdata/index-by-timestamp/no-wal/generations/0000000000000000/snapshots/0000000000000002.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/index-by-timestamp/ok/Makefile b/testdata/index-by-timestamp/ok/Makefile new file mode 100644 index 00000000..258f1e8c --- /dev/null +++ b/testdata/index-by-timestamp/ok/Makefile @@ -0,0 +1,11 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001010000 generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001030000 generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 + + TZ=UTC touch -ct 200001010000 generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 + TZ=UTC touch -ct 200001020000 generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 + TZ=UTC touch -ct 200001030000 generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 + TZ=UTC touch -ct 200001040000 generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 + TZ=UTC touch -ct 200001050000 generations/0000000000000000/wal/0000000000000003/0000000000000000.wal.lz4 + diff --git a/testdata/index-by-timestamp/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 b/testdata/index-by-timestamp/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/index-by-timestamp/ok/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 b/testdata/index-by-timestamp/ok/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 b/testdata/index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 b/testdata/index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 b/testdata/index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 b/testdata/index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000003/0000000000000000.wal.lz4 b/testdata/index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000003/0000000000000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/index-by-timestamp/snapshot-later-than-wal/Makefile b/testdata/index-by-timestamp/snapshot-later-than-wal/Makefile new file mode 100644 index 00000000..9e0d3908 --- /dev/null +++ b/testdata/index-by-timestamp/snapshot-later-than-wal/Makefile @@ -0,0 +1,7 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001010000 generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001030000 generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 + + TZ=UTC touch -ct 200001020000 generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 + TZ=UTC touch -ct 200001020000 generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 diff --git a/testdata/index-by-timestamp/snapshot-later-than-wal/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 b/testdata/index-by-timestamp/snapshot-later-than-wal/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/index-by-timestamp/snapshot-later-than-wal/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 b/testdata/index-by-timestamp/snapshot-later-than-wal/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/index-by-timestamp/snapshot-later-than-wal/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 b/testdata/index-by-timestamp/snapshot-later-than-wal/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/index-by-timestamp/snapshot-later-than-wal/generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 b/testdata/index-by-timestamp/snapshot-later-than-wal/generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/snapshot-index-by-timestamp/no-snapshots/generations/0000000000000000/.gitignore b/testdata/snapshot-index-by-timestamp/no-snapshots/generations/0000000000000000/.gitignore new file mode 100644 index 00000000..e69de29b diff --git a/testdata/snapshot-index-by-timestamp/ok/Makefile b/testdata/snapshot-index-by-timestamp/ok/Makefile new file mode 100644 index 00000000..a11b6dbd --- /dev/null +++ b/testdata/snapshot-index-by-timestamp/ok/Makefile @@ -0,0 +1,5 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001010000 generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 generations/0000000000000000/snapshots/00000000000003e8.snapshot.lz4 + TZ=UTC touch -ct 200001030000 generations/0000000000000000/snapshots/00000000000007d0.snapshot.lz4 diff --git a/testdata/snapshot-index-by-timestamp/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 b/testdata/snapshot-index-by-timestamp/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/snapshot-index-by-timestamp/ok/generations/0000000000000000/snapshots/00000000000003e8.snapshot.lz4 b/testdata/snapshot-index-by-timestamp/ok/generations/0000000000000000/snapshots/00000000000003e8.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/snapshot-index-by-timestamp/ok/generations/0000000000000000/snapshots/00000000000007d0.snapshot.lz4 b/testdata/snapshot-index-by-timestamp/ok/generations/0000000000000000/snapshots/00000000000007d0.snapshot.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/wal-index-by-timestamp/ok/Makefile b/testdata/wal-index-by-timestamp/ok/Makefile new file mode 100644 index 00000000..40d692ff --- /dev/null +++ b/testdata/wal-index-by-timestamp/ok/Makefile @@ -0,0 +1,6 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001010000 generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 + TZ=UTC touch -ct 200001020000 generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 + TZ=UTC touch -ct 200001030000 generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 + diff --git a/testdata/wal-index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 b/testdata/wal-index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/wal-index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 b/testdata/wal-index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 diff --git a/testdata/wal-index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 b/testdata/wal-index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 new file mode 100644 index 0000000000000000000000000000000000000000..7536340954b2a683272e3ed74900762ffd86afcf GIT binary patch literal 93 zcmZQk@|8$&SnkEZ!0?$jIM64vBvm0TzbH4cM8TLrfPsmL!9hU*D9Omez|X{>nZU@P bXQIIC#2_HR3KIB_3OWOTN+Dp*_P@^oU0EHA literal 0 HcmV?d00001 From 301e1172fd0bb6d425fc3a6536b06d4444faa3a5 Mon Sep 17 00:00:00 2001 From: Michael Lynch Date: Sun, 17 Apr 2022 15:47:54 -0400 Subject: [PATCH 80/95] Add Go code coverage to CI --- .github/workflows/build_and_test.yml | 7 ++++++- .gitignore | 1 + 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 34565a7f..bc0d157f 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -22,4 +22,9 @@ jobs: run: go install ./cmd/litestream - name: Run unit tests - run: make testdata && go test -v ./... + run: make testdata && go test -v --coverprofile=.coverage.out ./... && go tool cover -html .coverage.out -o .coverage.html + + - uses: actions/upload-artifact@v3 + with: + name: code-coverage + path: coverage.html diff --git a/.gitignore b/.gitignore index 6acf8819..7f08fdd2 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ +.coverage.* .DS_Store /dist From 6763e9218c02de5c8ddf4b1efcdca0b7b37c4e78 Mon Sep 17 00:00:00 2001 From: Michael Lynch Date: Sun, 17 Apr 2022 15:51:00 -0400 Subject: [PATCH 81/95] Fix path to coverage file --- .github/workflows/build_and_test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index bc0d157f..43fb62e8 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -27,4 +27,4 @@ jobs: - uses: actions/upload-artifact@v3 with: name: code-coverage - path: coverage.html + path: .coverage.html From 88737d7164ce83a3dae1c3398e0d1c5d8def01da Mon Sep 17 00:00:00 2001 From: Michael Lynch Date: Sun, 17 Apr 2022 16:12:12 -0400 Subject: [PATCH 82/95] Add a unit test for internal.MD5Hash --- internal/internal_test.go | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/internal/internal_test.go b/internal/internal_test.go index 11d0f6d9..308c6df4 100644 --- a/internal/internal_test.go +++ b/internal/internal_test.go @@ -102,6 +102,24 @@ func TestTruncateDuration(t *testing.T) { } } +func TestMD5Hash(t *testing.T) { + for _, tt := range []struct { + input []byte + output string + }{ + {[]byte{}, "d41d8cd98f00b204e9800998ecf8427e"}, + {[]byte{0x0}, "93b885adfe0da089cdf634904fd59f71"}, + {[]byte{0x0, 0x1, 0x2, 0x3}, "37b59afd592725f9305e484a5d7f5168"}, + {[]byte("Hello, world!"), "6cd3556deb0da54bca060b4c39479839"}, + } { + t.Run(fmt.Sprintf("%v", tt.input), func(t *testing.T) { + if got, want := internal.MD5Hash(tt.input), tt.output; got != want { + t.Fatalf("hash=%s, want %s", got, want) + } + }) + } +} + func TestOnceCloser(t *testing.T) { var closed bool var rc = &mock.ReadCloser{ From 7d8b8c6ec08caff0c1e5d8c729cfb327a96b0bd6 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Tue, 3 May 2022 07:28:14 -0600 Subject: [PATCH 83/95] Remove verbose flag from restore docs --- cmd/litestream/restore.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/cmd/litestream/restore.go b/cmd/litestream/restore.go index 6134c0f6..e4324812 100644 --- a/cmd/litestream/restore.go +++ b/cmd/litestream/restore.go @@ -275,9 +275,6 @@ Arguments: Determines the number of WAL files downloaded in parallel. Defaults to `+strconv.Itoa(litestream.DefaultRestoreParallelism)+`. - -v - Verbose output. - Examples: From e6f7c6052d84b7265fd54d3a3ab33208948e126b Mon Sep 17 00:00:00 2001 From: Hiroaki Nakamura Date: Tue, 10 May 2022 19:18:31 +0900 Subject: [PATCH 84/95] Add two environments for overriding endpoint and region export LITESTREAM_ACCESS_KEY_ID=your_key_id export LITESTREAM_SECRET_ACCESS_KEY=your_access_key export LITESTREAM_ENDPOINT=your_endpoint export LITESTREAM_REGION=your_region litestream replicate fruits.db s3://mybkt/fruits.db --- s3/replica_client.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/s3/replica_client.go b/s3/replica_client.go index 182d713f..d5512888 100644 --- a/s3/replica_client.go +++ b/s3/replica_client.go @@ -703,6 +703,13 @@ func ParseHost(s string) (bucket, region, endpoint string, forcePathStyle bool) endpoint = net.JoinHostPort(endpoint, port) } + if e := os.Getenv("LITESTREAM_ENDPOINT"); e != "" { + endpoint = e + } + if r := os.Getenv("LITESTREAM_REGION"); r != "" { + region = r + } + // Prepend scheme to endpoint. if endpoint != "" { endpoint = scheme + "://" + endpoint From 46597ab22fff45cc35dd76ce5e15c76fc7d3befc Mon Sep 17 00:00:00 2001 From: Hiroaki Nakamura Date: Fri, 13 May 2022 23:23:22 +0900 Subject: [PATCH 85/95] Fix wal internal error log --- http/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/http/server.go b/http/server.go index dbc0a617..702b2d66 100644 --- a/http/server.go +++ b/http/server.go @@ -289,7 +289,7 @@ func (s *Server) handleGetStream(w http.ResponseWriter, r *http.Request) { // Flush after WAL segment has been written. w.(http.Flusher).Flush() } - if bitr.Err() != nil { + if err := bitr.Err(); err != nil { s.Logger.Printf("wal iterator error: %s", err) return } From 98673c67851c1f009e21ec6c21198bda76bf23e6 Mon Sep 17 00:00:00 2001 From: Hiroaki Nakamura Date: Sat, 14 May 2022 00:28:57 +0900 Subject: [PATCH 86/95] Add environment variables for scheme and forcePathStyle --- s3/replica_client.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/s3/replica_client.go b/s3/replica_client.go index d5512888..6e6696b2 100644 --- a/s3/replica_client.go +++ b/s3/replica_client.go @@ -10,6 +10,7 @@ import ( "os" "path" "regexp" + "strconv" "strings" "sync" "time" @@ -703,12 +704,26 @@ func ParseHost(s string) (bucket, region, endpoint string, forcePathStyle bool) endpoint = net.JoinHostPort(endpoint, port) } + if s := os.Getenv("LITESTREAM_SCHEME"); s != "" { + if s != "https" && s != "http" { + panic(fmt.Sprintf("Unsupported LITESTREAM_SCHEME value: %q", s)) + } else { + scheme = s + } + } if e := os.Getenv("LITESTREAM_ENDPOINT"); e != "" { endpoint = e } if r := os.Getenv("LITESTREAM_REGION"); r != "" { region = r } + if s := os.Getenv("LITESTREAM_FORCE_PATH_STYLE"); s != "" { + if b, err := strconv.ParseBool(s); err != nil { + panic(fmt.Sprintf("Invalid LITESTREAM_FORCE_PATH_STYLE value: %q", s)) + } else { + forcePathStyle = b + } + } // Prepend scheme to endpoint. if endpoint != "" { From 2c0dce21fa23ad91bb5e0e5c819807589c40b704 Mon Sep 17 00:00:00 2001 From: Yasuhiro Matsumoto Date: Sat, 7 May 2022 00:57:52 +0900 Subject: [PATCH 87/95] Use fsnotify --- go.mod | 1 + go.sum | 4 +- internal/file_watcher.go | 36 ---- internal/file_watcher_bsd.go | 259 ----------------------- internal/file_watcher_linux.go | 369 --------------------------------- internal/file_watcher_test.go | 211 ------------------- internal/internal.go | 9 - server.go | 33 ++- 8 files changed, 26 insertions(+), 896 deletions(-) delete mode 100644 internal/file_watcher.go delete mode 100644 internal/file_watcher_bsd.go delete mode 100644 internal/file_watcher_linux.go delete mode 100644 internal/file_watcher_test.go diff --git a/go.mod b/go.mod index 3f5b3d90..58f7c80c 100644 --- a/go.mod +++ b/go.mod @@ -6,6 +6,7 @@ require ( cloud.google.com/go/storage v1.20.0 github.com/Azure/azure-storage-blob-go v0.14.0 github.com/aws/aws-sdk-go v1.42.53 + github.com/fsnotify/fsnotify v1.5.1 github.com/mattn/go-shellwords v1.0.12 github.com/mattn/go-sqlite3 v1.14.12 github.com/pierrec/lz4/v4 v4.1.14 diff --git a/go.sum b/go.sum index 895540f1..a4d13895 100644 --- a/go.sum +++ b/go.sum @@ -111,6 +111,8 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -236,8 +238,6 @@ github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqf github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= -github.com/mattn/go-sqlite3 v1.14.11 h1:gt+cp9c0XGqe9S/wAHTL3n/7MqY+siPWgWJgqdsFrzQ= -github.com/mattn/go-sqlite3 v1.14.11/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.12 h1:TJ1bhYJPV44phC+IMu1u2K/i5RriLTPe+yc68XDJ1Z0= github.com/mattn/go-sqlite3 v1.14.12/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= diff --git a/internal/file_watcher.go b/internal/file_watcher.go deleted file mode 100644 index 501703d4..00000000 --- a/internal/file_watcher.go +++ /dev/null @@ -1,36 +0,0 @@ -package internal - -import ( - "errors" -) - -// File event mask constants. -const ( - FileEventCreated = 1 << iota - FileEventModified - FileEventDeleted -) - -// FileEvent represents an event on a watched file. -type FileEvent struct { - Name string - Mask int -} - -// ErrFileEventQueueOverflow is returned when the file event queue has overflowed. -var ErrFileEventQueueOverflow = errors.New("file event queue overflow") - -// FileWatcher represents a watcher of file events. -type FileWatcher interface { - Open() error - Close() error - - // Returns a channel of events for watched files. - Events() <-chan FileEvent - - // Adds a specific file to be watched. - Watch(filename string) error - - // Removes a specific file from being watched. - Unwatch(filename string) error -} diff --git a/internal/file_watcher_bsd.go b/internal/file_watcher_bsd.go deleted file mode 100644 index c4852e0a..00000000 --- a/internal/file_watcher_bsd.go +++ /dev/null @@ -1,259 +0,0 @@ -//go:build freebsd || openbsd || netbsd || dragonfly || darwin - -package internal - -import ( - "context" - "log" - "os" - "path/filepath" - "sync" - "time" - - "golang.org/x/sync/errgroup" - "golang.org/x/sys/unix" -) - -var _ FileWatcher = (*KqueueFileWatcher)(nil) - -// KqueueFileWatcher watches files and is notified of events on them. -// -// Watcher code based on https://github.com/fsnotify/fsnotify -type KqueueFileWatcher struct { - fd int - events chan FileEvent - - mu sync.Mutex - watches map[string]int - paths map[int]string - notExists map[string]struct{} - - g errgroup.Group - ctx context.Context - cancel func() -} - -// NewKqueueFileWatcher returns a new instance of KqueueFileWatcher. -func NewKqueueFileWatcher() *KqueueFileWatcher { - return &KqueueFileWatcher{ - events: make(chan FileEvent), - - watches: make(map[string]int), - paths: make(map[int]string), - notExists: make(map[string]struct{}), - } -} - -// NewFileWatcher returns an instance of KqueueFileWatcher on BSD systems. -func NewFileWatcher() FileWatcher { - return NewKqueueFileWatcher() -} - -// Events returns a read-only channel of file events. -func (w *KqueueFileWatcher) Events() <-chan FileEvent { - return w.events -} - -// Open initializes the watcher and begins listening for file events. -func (w *KqueueFileWatcher) Open() (err error) { - if w.fd, err = unix.Kqueue(); err != nil { - return err - } - - w.ctx, w.cancel = context.WithCancel(context.Background()) - w.g.Go(func() error { - if err := w.monitor(w.ctx); err != nil && w.ctx.Err() == nil { - return err - } - return nil - }) - w.g.Go(func() error { - if err := w.monitorNotExists(w.ctx); err != nil && w.ctx.Err() == nil { - return err - } - return nil - }) - - return nil -} - -// Close stops watching for file events and cleans up resources. -func (w *KqueueFileWatcher) Close() (err error) { - w.cancel() - - if w.fd != 0 { - if e := unix.Close(w.fd); e != nil && err == nil { - err = e - } - } - - if e := w.g.Wait(); e != nil && err == nil { - err = e - } - return err -} - -// Watch begins watching the given file or directory. -func (w *KqueueFileWatcher) Watch(filename string) error { - w.mu.Lock() - defer w.mu.Unlock() - - filename = filepath.Clean(filename) - - // If file doesn't exist, monitor separately until it does exist as we - // can't watch non-existent files with kqueue. - if _, err := os.Stat(filename); os.IsNotExist(err) { - w.notExists[filename] = struct{}{} - return nil - } - - return w.addWatch(filename) -} - -func (w *KqueueFileWatcher) addWatch(filename string) error { - wd, err := unix.Open(filename, unix.O_NONBLOCK|unix.O_RDONLY|unix.O_CLOEXEC, 0700) - if err != nil { - return err - } - - // TODO: Handle return count different than 1. - kevent := unix.Kevent_t{Fflags: unix.NOTE_DELETE | unix.NOTE_WRITE} - unix.SetKevent(&kevent, wd, unix.EVFILT_VNODE, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE) - if _, err := unix.Kevent(w.fd, []unix.Kevent_t{kevent}, nil, nil); err != nil { - return err - } - - w.watches[filename] = wd - w.paths[wd] = filename - - delete(w.notExists, filename) - - return err -} - -// Unwatch stops watching the given file or directory. -func (w *KqueueFileWatcher) Unwatch(filename string) error { - w.mu.Lock() - defer w.mu.Unlock() - - filename = filepath.Clean(filename) - - // Look up watch ID by filename. - wd, ok := w.watches[filename] - if !ok { - return nil - } - - // TODO: Handle return count different than 1. - var kevent unix.Kevent_t - unix.SetKevent(&kevent, wd, unix.EVFILT_VNODE, unix.EV_DELETE) - if _, err := unix.Kevent(w.fd, []unix.Kevent_t{kevent}, nil, nil); err != nil { - return err - } - unix.Close(wd) - - delete(w.paths, wd) - delete(w.watches, filename) - delete(w.notExists, filename) - - return nil -} - -// monitorNotExist runs in a separate goroutine and monitors for the creation of -// watched files that do not yet exist. -func (w *KqueueFileWatcher) monitorNotExists(ctx context.Context) error { - ticker := time.NewTicker(1 * time.Second) - defer ticker.Stop() - - for { - select { - case <-ctx.Done(): - return nil - case <-ticker.C: - w.checkNotExists(ctx) - } - } -} - -func (w *KqueueFileWatcher) checkNotExists(ctx context.Context) { - w.mu.Lock() - defer w.mu.Unlock() - - for filename := range w.notExists { - if _, err := os.Stat(filename); os.IsNotExist(err) { - continue - } - - if err := w.addWatch(filename); err != nil { - log.Printf("non-existent file monitor: cannot add watch: %s", err) - continue - } - - // Send event to channel. - select { - case w.events <- FileEvent{ - Name: filename, - Mask: FileEventCreated, - }: - default: - } - } -} - -// monitor runs in a separate goroutine and monitors the inotify event queue. -func (w *KqueueFileWatcher) monitor(ctx context.Context) error { - kevents := make([]unix.Kevent_t, 10) - timeout := unix.NsecToTimespec(int64(100 * time.Millisecond)) - - for { - n, err := unix.Kevent(w.fd, nil, kevents, &timeout) - if err != nil && err != unix.EINTR { - return err - } else if n < 0 { - continue - } - - for _, kevent := range kevents[:n] { - if err := w.recv(ctx, &kevent); err != nil { - return err - } - } - } -} - -// recv processes a single event from kqeueue. -func (w *KqueueFileWatcher) recv(ctx context.Context, kevent *unix.Kevent_t) error { - if err := ctx.Err(); err != nil { - return err - } - - // Look up filename & remove from watcher if this is a delete. - w.mu.Lock() - filename, ok := w.paths[int(kevent.Ident)] - if ok && kevent.Fflags&unix.NOTE_DELETE != 0 { - delete(w.paths, int(kevent.Ident)) - delete(w.watches, filename) - unix.Close(int(kevent.Ident)) - } - w.mu.Unlock() - - // Convert to generic file event mask. - var mask int - if kevent.Fflags&unix.NOTE_WRITE != 0 { - mask |= FileEventModified - } - if kevent.Fflags&unix.NOTE_DELETE != 0 { - mask |= FileEventDeleted - } - - // Send event to channel or wait for close. - select { - case <-ctx.Done(): - return ctx.Err() - case w.events <- FileEvent{ - Name: filename, - Mask: mask, - }: - return nil - } -} diff --git a/internal/file_watcher_linux.go b/internal/file_watcher_linux.go deleted file mode 100644 index 07358750..00000000 --- a/internal/file_watcher_linux.go +++ /dev/null @@ -1,369 +0,0 @@ -//go:build linux - -package internal - -import ( - "context" - "fmt" - "log" - "os" - "path/filepath" - "sync" - "time" - "unsafe" - - "golang.org/x/sync/errgroup" - "golang.org/x/sys/unix" -) - -var _ FileWatcher = (*InotifyFileWatcher)(nil) - -// InotifyFileWatcher watches files and is notified of events on them. -// -// Watcher code based on https://github.com/fsnotify/fsnotify -type InotifyFileWatcher struct { - inotify struct { - fd int - buf []byte - } - epoll struct { - fd int // epoll_create1() file descriptor - events []unix.EpollEvent - } - pipe struct { - r int // read pipe file descriptor - w int // write pipe file descriptor - } - - events chan FileEvent - - mu sync.Mutex - watches map[string]int - paths map[int]string - notExists map[string]struct{} - - g errgroup.Group - ctx context.Context - cancel func() -} - -// NewInotifyFileWatcher returns a new instance of InotifyFileWatcher. -func NewInotifyFileWatcher() *InotifyFileWatcher { - w := &InotifyFileWatcher{ - events: make(chan FileEvent), - - watches: make(map[string]int), - paths: make(map[int]string), - notExists: make(map[string]struct{}), - } - - w.inotify.buf = make([]byte, 4096*unix.SizeofInotifyEvent) - w.epoll.events = make([]unix.EpollEvent, 64) - - return w -} - -// NewFileWatcher returns an instance of InotifyFileWatcher on Linux systems. -func NewFileWatcher() FileWatcher { - return NewInotifyFileWatcher() -} - -// Events returns a read-only channel of file events. -func (w *InotifyFileWatcher) Events() <-chan FileEvent { - return w.events -} - -// Open initializes the watcher and begins listening for file events. -func (w *InotifyFileWatcher) Open() (err error) { - w.inotify.fd, err = unix.InotifyInit1(unix.IN_CLOEXEC) - if err != nil { - return fmt.Errorf("cannot init inotify: %w", err) - } - - // Initialize epoll and create a non-blocking pipe. - if w.epoll.fd, err = unix.EpollCreate1(unix.EPOLL_CLOEXEC); err != nil { - return fmt.Errorf("cannot create epoll: %w", err) - } - - pipe := []int{-1, -1} - if err := unix.Pipe2(pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC); err != nil { - return fmt.Errorf("cannot create epoll pipe: %w", err) - } - w.pipe.r, w.pipe.w = pipe[0], pipe[1] - - // Register inotify fd with epoll - if err := unix.EpollCtl(w.epoll.fd, unix.EPOLL_CTL_ADD, w.inotify.fd, &unix.EpollEvent{ - Fd: int32(w.inotify.fd), - Events: unix.EPOLLIN, - }); err != nil { - return fmt.Errorf("cannot add inotify to epoll: %w", err) - } - - // Register pipe fd with epoll - if err := unix.EpollCtl(w.epoll.fd, unix.EPOLL_CTL_ADD, w.pipe.r, &unix.EpollEvent{ - Fd: int32(w.pipe.r), - Events: unix.EPOLLIN, - }); err != nil { - return fmt.Errorf("cannot add pipe to epoll: %w", err) - } - - w.ctx, w.cancel = context.WithCancel(context.Background()) - w.g.Go(func() error { - if err := w.monitor(w.ctx); err != nil && w.ctx.Err() == nil { - return err - } - return nil - }) - w.g.Go(func() error { - if err := w.monitorNotExists(w.ctx); err != nil && w.ctx.Err() == nil { - return err - } - return nil - }) - - return nil -} - -// Close stops watching for file events and cleans up resources. -func (w *InotifyFileWatcher) Close() (err error) { - w.cancel() - - if e := w.wake(); e != nil && err == nil { - err = e - } - if e := w.g.Wait(); e != nil && err == nil { - err = e - } - return err -} - -// Watch begins watching the given file or directory. -func (w *InotifyFileWatcher) Watch(filename string) error { - w.mu.Lock() - defer w.mu.Unlock() - - filename = filepath.Clean(filename) - - // If file doesn't exist, monitor separately until it does exist as we - // can't watch non-existent files with inotify. - if _, err := os.Stat(filename); os.IsNotExist(err) { - w.notExists[filename] = struct{}{} - return nil - } - - return w.addWatch(filename) -} - -func (w *InotifyFileWatcher) addWatch(filename string) error { - wd, err := unix.InotifyAddWatch(w.inotify.fd, filename, unix.IN_MODIFY|unix.IN_DELETE_SELF) - if err != nil { - return err - } - - w.watches[filename] = wd - w.paths[wd] = filename - - delete(w.notExists, filename) - - return err -} - -// Unwatch stops watching the given file or directory. -func (w *InotifyFileWatcher) Unwatch(filename string) error { - w.mu.Lock() - defer w.mu.Unlock() - - filename = filepath.Clean(filename) - - // Look up watch ID by filename. - wd, ok := w.watches[filename] - if !ok { - return nil - } - - if _, err := unix.InotifyRmWatch(w.inotify.fd, uint32(wd)); err != nil { - return err - } - - delete(w.paths, wd) - delete(w.watches, filename) - delete(w.notExists, filename) - - return nil -} - -// monitorNotExist runs in a separate goroutine and monitors for the creation of -// watched files that do not yet exist. -func (w *InotifyFileWatcher) monitorNotExists(ctx context.Context) error { - ticker := time.NewTicker(1 * time.Second) - defer ticker.Stop() - - for { - select { - case <-ctx.Done(): - return nil - case <-ticker.C: - w.checkNotExists(ctx) - } - } -} - -func (w *InotifyFileWatcher) checkNotExists(ctx context.Context) { - w.mu.Lock() - defer w.mu.Unlock() - - for filename := range w.notExists { - if _, err := os.Stat(filename); os.IsNotExist(err) { - continue - } - - if err := w.addWatch(filename); err != nil { - log.Printf("non-existent file monitor: cannot add watch: %s", err) - continue - } - - // Send event to channel. - select { - case w.events <- FileEvent{ - Name: filename, - Mask: FileEventCreated, - }: - default: - } - } -} - -// monitor runs in a separate goroutine and monitors the inotify event queue. -func (w *InotifyFileWatcher) monitor(ctx context.Context) error { - // Close all file descriptors once monitor exits. - defer func() { - unix.Close(w.inotify.fd) - unix.Close(w.epoll.fd) - unix.Close(w.pipe.w) - unix.Close(w.pipe.r) - }() - - for { - if err := w.wait(ctx); err != nil { - return err - } else if err := w.read(ctx); err != nil { - return err - } - } -} - -// read reads from the inotify file descriptor. Automatically rety on EINTR. -func (w *InotifyFileWatcher) read(ctx context.Context) error { - for { - n, err := unix.Read(w.inotify.fd, w.inotify.buf) - if err != nil && err != unix.EINTR { - return err - } else if n < 0 { - continue - } - - return w.recv(ctx, w.inotify.buf[:n]) - } -} - -func (w *InotifyFileWatcher) recv(ctx context.Context, b []byte) error { - if err := ctx.Err(); err != nil { - return err - } - - for { - if len(b) == 0 { - return nil - } else if len(b) < unix.SizeofInotifyEvent { - return fmt.Errorf("InotifyFileWatcher.recv(): inotify short record: n=%d", len(b)) - } - - event := (*unix.InotifyEvent)(unsafe.Pointer(&b[0])) - if event.Mask&unix.IN_Q_OVERFLOW != 0 { - // TODO: Change to notify all watches. - return ErrFileEventQueueOverflow - } - - // Remove deleted files from the lookups. - w.mu.Lock() - name, ok := w.paths[int(event.Wd)] - if ok && event.Mask&unix.IN_DELETE_SELF != 0 { - delete(w.paths, int(event.Wd)) - delete(w.watches, name) - } - w.mu.Unlock() - - //if nameLen > 0 { - // // Point "bytes" at the first byte of the filename - // bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] - // // The filename is padded with NULL bytes. TrimRight() gets rid of those. - // name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") - //} - - // Move to next event. - b = b[unix.SizeofInotifyEvent+event.Len:] - - // Skip event if ignored. - if event.Mask&unix.IN_IGNORED != 0 { - continue - } - - // Convert to generic file event mask. - var mask int - if event.Mask&unix.IN_MODIFY != 0 { - mask |= FileEventModified - } - if event.Mask&unix.IN_DELETE_SELF != 0 { - mask |= FileEventDeleted - } - - // Send event to channel or wait for close. - select { - case <-ctx.Done(): - return ctx.Err() - case w.events <- FileEvent{ - Name: name, - Mask: mask, - }: - } - } -} - -func (w *InotifyFileWatcher) wait(ctx context.Context) error { - for { - n, err := unix.EpollWait(w.epoll.fd, w.epoll.events, -1) - if n == 0 || err == unix.EINTR { - continue - } else if err != nil { - return err - } - - // Read events to see if we have data available on inotify or if we are awaken. - var hasData bool - for _, event := range w.epoll.events[:n] { - switch event.Fd { - case int32(w.inotify.fd): // inotify file descriptor - hasData = hasData || event.Events&(unix.EPOLLHUP|unix.EPOLLERR|unix.EPOLLIN) != 0 - - case int32(w.pipe.r): // epoll file descriptor - if _, err := unix.Read(w.pipe.r, make([]byte, 1024)); err != nil && err != unix.EAGAIN { - return fmt.Errorf("epoll pipe error: %w", err) - } - } - } - - // Check if context is closed and then exit if data is available. - if err := ctx.Err(); err != nil { - return err - } else if hasData { - return nil - } - } -} - -func (w *InotifyFileWatcher) wake() error { - if _, err := unix.Write(w.pipe.w, []byte{0}); err != nil && err != unix.EAGAIN { - return err - } - return nil -} diff --git a/internal/file_watcher_test.go b/internal/file_watcher_test.go deleted file mode 100644 index dd767154..00000000 --- a/internal/file_watcher_test.go +++ /dev/null @@ -1,211 +0,0 @@ -package internal_test - -import ( - "database/sql" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/benbjohnson/litestream/internal" - _ "github.com/mattn/go-sqlite3" -) - -func TestFileWatcher(t *testing.T) { - t.Run("WriteAndRemove", func(t *testing.T) { - dbPath := filepath.Join(t.TempDir(), "db") - - w := internal.NewFileWatcher() - if err := w.Open(); err != nil { - t.Fatal(err) - } - defer w.Close() - - db, err := sql.Open("sqlite3", dbPath) - if err != nil { - t.Fatal(err) - } - defer db.Close() - - if _, err := db.Exec(`PRAGMA journal_mode = wal`); err != nil { - t.Fatal(err) - } else if _, err := db.Exec(`CREATE TABLE t (x)`); err != nil { - t.Fatal(err) - } - - if err := w.Watch(dbPath + "-wal"); err != nil { - t.Fatal(err) - } - - // Write to the WAL file & ensure a "modified" event occurs. - if _, err := db.Exec(`INSERT INTO t (x) VALUES (1)`); err != nil { - t.Fatal(err) - } - - select { - case <-time.After(10 * time.Second): - t.Fatal("timeout waiting for event") - case event := <-w.Events(): - if got, want := event.Name, dbPath+"-wal"; got != want { - t.Fatalf("name=%s, want %s", got, want) - } else if got, want := event.Mask, internal.FileEventModified; got != want { - t.Fatalf("mask=0x%02x, want 0x%02x", got, want) - } - } - - // Flush any duplicate events. - drainFileEventChannel(w.Events()) - - // Close database and ensure checkpointed WAL creates a "delete" event. - if err := db.Close(); err != nil { - t.Fatal(err) - } - - select { - case <-time.After(10 * time.Second): - t.Fatal("timeout waiting for event") - case event := <-w.Events(): - if got, want := event.Name, dbPath+"-wal"; got != want { - t.Fatalf("name=%s, want %s", got, want) - } else if got, want := event.Mask, internal.FileEventDeleted; got != want { - t.Fatalf("mask=0x%02x, want 0x%02x", got, want) - } - } - }) - - t.Run("LargeTx", func(t *testing.T) { - w := internal.NewFileWatcher() - if err := w.Open(); err != nil { - t.Fatal(err) - } - defer w.Close() - - dbPath := filepath.Join(t.TempDir(), "db") - db, err := sql.Open("sqlite3", dbPath) - if err != nil { - t.Fatal(err) - } else if _, err := db.Exec(`PRAGMA cache_size = 4`); err != nil { - t.Fatal(err) - } else if _, err := db.Exec(`PRAGMA journal_mode = wal`); err != nil { - t.Fatal(err) - } else if _, err := db.Exec(`CREATE TABLE t (x)`); err != nil { - t.Fatal(err) - } - defer db.Close() - - if err := w.Watch(dbPath + "-wal"); err != nil { - t.Fatal(err) - } - - // Start a transaction to ensure writing large data creates multiple write events. - tx, err := db.Begin() - if err != nil { - t.Fatal(err) - } - defer func() { _ = tx.Rollback() }() - - // Write enough data to require a spill. - for i := 0; i < 100; i++ { - if _, err := tx.Exec(`INSERT INTO t (x) VALUES (?)`, strings.Repeat("x", 512)); err != nil { - t.Fatal(err) - } - } - - // Ensure spill writes to disk. - select { - case <-time.After(10 * time.Second): - t.Fatal("timeout waiting for event") - case event := <-w.Events(): - if got, want := event.Name, dbPath+"-wal"; got != want { - t.Fatalf("name=%s, want %s", got, want) - } else if got, want := event.Mask, internal.FileEventModified; got != want { - t.Fatalf("mask=0x%02x, want 0x%02x", got, want) - } - } - - // Flush any duplicate events. - drainFileEventChannel(w.Events()) - - if err := tx.Commit(); err != nil { - t.Fatal(err) - } - - // Final commit should spill remaining pages and cause another write event. - select { - case <-time.After(10 * time.Second): - t.Fatal("timeout waiting for event") - case event := <-w.Events(): - if got, want := event.Name, dbPath+"-wal"; got != want { - t.Fatalf("name=%s, want %s", got, want) - } else if got, want := event.Mask, internal.FileEventModified; got != want { - t.Fatalf("mask=0x%02x, want 0x%02x", got, want) - } - } - }) - - t.Run("WatchBeforeCreate", func(t *testing.T) { - dbPath := filepath.Join(t.TempDir(), "db") - - w := internal.NewFileWatcher() - if err := w.Open(); err != nil { - t.Fatal(err) - } - defer w.Close() - - if err := w.Watch(dbPath); err != nil { - t.Fatal(err) - } else if err := w.Watch(dbPath + "-wal"); err != nil { - t.Fatal(err) - } - - db, err := sql.Open("sqlite3", dbPath) - if err != nil { - t.Fatal(err) - } - defer db.Close() - - if _, err := db.Exec(`CREATE TABLE t (x)`); err != nil { - t.Fatal(err) - } - - // Wait for main database creation event. - waitForFileEvent(t, w.Events(), internal.FileEvent{Name: dbPath, Mask: internal.FileEventCreated}) - - // Write to the WAL file & ensure a "modified" event occurs. - if _, err := db.Exec(`PRAGMA journal_mode = wal`); err != nil { - t.Fatal(err) - } else if _, err := db.Exec(`INSERT INTO t (x) VALUES (1)`); err != nil { - t.Fatal(err) - } - - // Wait for WAL creation event. - waitForFileEvent(t, w.Events(), internal.FileEvent{Name: dbPath + "-wal", Mask: internal.FileEventCreated}) - }) -} - -func drainFileEventChannel(ch <-chan internal.FileEvent) { - for { - select { - case <-time.After(100 * time.Millisecond): - return - case <-ch: - } - } -} - -func waitForFileEvent(tb testing.TB, ch <-chan internal.FileEvent, want internal.FileEvent) { - tb.Helper() - - timeout := time.After(10 * time.Second) - - for { - select { - case <-timeout: - tb.Fatalf("timeout waiting for event: %#v", want) - case got := <-ch: - if got == want { - return - } - } - } -} diff --git a/internal/internal.go b/internal/internal.go index 95d0f789..e4da7f8e 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -176,15 +176,6 @@ func MkdirAll(path string, mode os.FileMode, uid, gid int) error { return nil } -// Fileinfo returns syscall fields from a FileInfo object. -func Fileinfo(fi os.FileInfo) (uid, gid int) { - if fi == nil { - return -1, -1 - } - stat := fi.Sys().(*syscall.Stat_t) - return int(stat.Uid), int(stat.Gid) -} - // ParseSnapshotPath parses the index from a snapshot filename. Used by path-based replicas. func ParseSnapshotPath(s string) (index int, err error) { a := snapshotPathRegex.FindStringSubmatch(s) diff --git a/server.go b/server.go index 501e81eb..f1fe7d90 100644 --- a/server.go +++ b/server.go @@ -3,10 +3,11 @@ package litestream import ( "context" "fmt" + "path/filepath" "strings" "sync" - "github.com/benbjohnson/litestream/internal" + "github.com/fsnotify/fsnotify" "golang.org/x/sync/errgroup" ) @@ -15,7 +16,7 @@ import ( type Server struct { mu sync.Mutex dbs map[string]*DB // databases by path - watcher internal.FileWatcher + watcher *fsnotify.Watcher ctx context.Context cancel func() @@ -31,8 +32,9 @@ func NewServer() *Server { // Open initializes the server and begins watching for file system events. func (s *Server) Open() error { - s.watcher = internal.NewFileWatcher() - if err := s.watcher.Open(); err != nil { + var err error + s.watcher, err = fsnotify.NewWatcher() + if err != nil { return err } @@ -110,10 +112,8 @@ func (s *Server) Watch(path string, fn func(path string) (*DB, error)) error { s.dbs[path] = db // Watch for changes on the database file & WAL. - if err := s.watcher.Watch(path); err != nil { + if err := s.watcher.Add(filepath.Dir(path)); err != nil { return fmt.Errorf("watch db file: %w", err) - } else if err := s.watcher.Watch(path + "-wal"); err != nil { - return fmt.Errorf("watch wal file: %w", err) } // Kick off an initial sync. @@ -137,7 +137,7 @@ func (s *Server) Unwatch(path string) error { delete(s.dbs, path) // Stop watching for changes on the database WAL. - if err := s.watcher.Unwatch(path + "-wal"); err != nil { + if err := s.watcher.Remove(path + "-wal"); err != nil { return fmt.Errorf("unwatch file: %w", err) } @@ -149,13 +149,26 @@ func (s *Server) Unwatch(path string) error { return nil } +func (s *Server) isWatched(event fsnotify.Event) bool { + path := event.Name + path = strings.TrimSuffix(path, "-wal") + + if _, ok := s.dbs[path]; ok { + return true + } + return false +} + // monitor runs in a separate goroutine and dispatches notifications to managed DBs. func (s *Server) monitor(ctx context.Context) error { for { select { case <-ctx.Done(): return ctx.Err() - case event := <-s.watcher.Events(): + case event := <-s.watcher.Events: + if !s.isWatched(event) { + continue + } if err := s.dispatchFileEvent(ctx, event); err != nil { return err } @@ -164,7 +177,7 @@ func (s *Server) monitor(ctx context.Context) error { } // dispatchFileEvent dispatches a notification to the database which owns the file. -func (s *Server) dispatchFileEvent(ctx context.Context, event internal.FileEvent) error { +func (s *Server) dispatchFileEvent(ctx context.Context, event fsnotify.Event) error { path := event.Name path = strings.TrimSuffix(path, "-wal") From 7d0167f10a69e74740e3992b15f1abe49cad13bb Mon Sep 17 00:00:00 2001 From: Yasuhiro Matsumoto Date: Sat, 7 May 2022 00:59:30 +0900 Subject: [PATCH 88/95] Unwatch directory --- server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server.go b/server.go index f1fe7d90..e487b55b 100644 --- a/server.go +++ b/server.go @@ -137,7 +137,7 @@ func (s *Server) Unwatch(path string) error { delete(s.dbs, path) // Stop watching for changes on the database WAL. - if err := s.watcher.Remove(path + "-wal"); err != nil { + if err := s.watcher.Remove(filepath.Dir(path)); err != nil { return fmt.Errorf("unwatch file: %w", err) } From e9dbf83a45bef3f3dd05638789d81e8cba8815a9 Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Thu, 12 May 2022 13:18:55 -0600 Subject: [PATCH 89/95] Re-add Fileinfo() --- internal/internal.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/internal/internal.go b/internal/internal.go index e4da7f8e..95d0f789 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -176,6 +176,15 @@ func MkdirAll(path string, mode os.FileMode, uid, gid int) error { return nil } +// Fileinfo returns syscall fields from a FileInfo object. +func Fileinfo(fi os.FileInfo) (uid, gid int) { + if fi == nil { + return -1, -1 + } + stat := fi.Sys().(*syscall.Stat_t) + return int(stat.Uid), int(stat.Gid) +} + // ParseSnapshotPath parses the index from a snapshot filename. Used by path-based replicas. func ParseSnapshotPath(s string) (index int, err error) { a := snapshotPathRegex.FindStringSubmatch(s) From 4522c7bce5a7a05505816b444132e4cf3485944d Mon Sep 17 00:00:00 2001 From: Yasuhiro Matsumoto Date: Fri, 13 May 2022 09:51:46 +0900 Subject: [PATCH 90/95] implement Fileinfo for Windows and non-Windows --- internal/internal.go | 9 --------- internal/internal_unix.go | 17 +++++++++++++++++ internal/internal_windows.go | 12 ++++++++++++ 3 files changed, 29 insertions(+), 9 deletions(-) create mode 100644 internal/internal_unix.go create mode 100644 internal/internal_windows.go diff --git a/internal/internal.go b/internal/internal.go index 95d0f789..e4da7f8e 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -176,15 +176,6 @@ func MkdirAll(path string, mode os.FileMode, uid, gid int) error { return nil } -// Fileinfo returns syscall fields from a FileInfo object. -func Fileinfo(fi os.FileInfo) (uid, gid int) { - if fi == nil { - return -1, -1 - } - stat := fi.Sys().(*syscall.Stat_t) - return int(stat.Uid), int(stat.Gid) -} - // ParseSnapshotPath parses the index from a snapshot filename. Used by path-based replicas. func ParseSnapshotPath(s string) (index int, err error) { a := snapshotPathRegex.FindStringSubmatch(s) diff --git a/internal/internal_unix.go b/internal/internal_unix.go new file mode 100644 index 00000000..7c4869c3 --- /dev/null +++ b/internal/internal_unix.go @@ -0,0 +1,17 @@ +//go:build !windows + +package internal + +import ( + "os" + "syscall" +) + +// Fileinfo returns syscall fields from a FileInfo object. +func Fileinfo(fi os.FileInfo) (uid, gid int) { + if fi == nil { + return -1, -1 + } + stat := fi.Sys().(*syscall.Stat_t) + return int(stat.Uid), int(stat.Gid) +} diff --git a/internal/internal_windows.go b/internal/internal_windows.go new file mode 100644 index 00000000..5b741594 --- /dev/null +++ b/internal/internal_windows.go @@ -0,0 +1,12 @@ +//go:build windows + +package internal + +import ( + "os" +) + +// Fileinfo returns syscall fields from a FileInfo object. +func Fileinfo(fi os.FileInfo) (uid, gid int) { + return -1, -1 +} From 31aa5b34f6f9837149061dd9d9e2c51857148273 Mon Sep 17 00:00:00 2001 From: Yasuhiro Matsumoto Date: Fri, 13 May 2022 09:53:30 +0900 Subject: [PATCH 91/95] Fix build tag --- internal/internal_unix.go | 1 + internal/internal_windows.go | 1 + 2 files changed, 2 insertions(+) diff --git a/internal/internal_unix.go b/internal/internal_unix.go index 7c4869c3..4de6d15d 100644 --- a/internal/internal_unix.go +++ b/internal/internal_unix.go @@ -1,4 +1,5 @@ //go:build !windows +// +build !windows package internal diff --git a/internal/internal_windows.go b/internal/internal_windows.go index 5b741594..c9810326 100644 --- a/internal/internal_windows.go +++ b/internal/internal_windows.go @@ -1,4 +1,5 @@ //go:build windows +// +build windows package internal From 2acdab02c87056e9028c04292b5410c2c314e720 Mon Sep 17 00:00:00 2001 From: Ryan Russell Date: Mon, 30 May 2022 09:19:02 -0500 Subject: [PATCH 92/95] Improve readability Signed-off-by: Ryan Russell --- replica.go | 2 +- wal_downloader.go | 4 ++-- wal_downloader_test.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/replica.go b/replica.go index 6541fea2..245565fe 100644 --- a/replica.go +++ b/replica.go @@ -105,7 +105,7 @@ func (r *Replica) Client() ReplicaClient { return r.client } // Starts replicating in a background goroutine. func (r *Replica) Start(ctx context.Context) { - // Ignore if replica is being used sychronously. + // Ignore if replica is being used synchronously. if !r.MonitorEnabled { return } diff --git a/wal_downloader.go b/wal_downloader.go index c4903844..78ee2400 100644 --- a/wal_downloader.go +++ b/wal_downloader.go @@ -15,12 +15,12 @@ import ( // WALDownloader represents a parallel downloader of WAL files from a replica client. // // It works on a per-index level so WAL files are always downloaded in their -// entirety and are not segmented. WAL files are downloaded from minIndex to +// entiretry and are not segmented. WAL files are downloaded from minIndex to // maxIndex, inclusively, and are written to a path prefix. WAL files are named // with the prefix and suffixed with the WAL index. It is the responsibility of // the caller to clean up these WAL files. // -// The purpose of the parallization is that RTT & WAL apply time can consume +// The purpose of the parallelization is that RTT & WAL apply time can consume // much of the restore time so it's useful to download multiple WAL files in // the background to minimize the latency. While some WAL indexes may be // downloaded out of order, the WALDownloader ensures that Next() always diff --git a/wal_downloader_test.go b/wal_downloader_test.go index f43ff177..f467a2e6 100644 --- a/wal_downloader_test.go +++ b/wal_downloader_test.go @@ -383,7 +383,7 @@ func testWALDownloader(t *testing.T, parallelism int) { } }) - // Ensure a gap in indicies returns an error. + // Ensure a gap in indices returns an error. t.Run("ErrMissingMiddleIndex", func(t *testing.T) { testDir := filepath.Join("testdata", "wal-downloader", "missing-middle-index") tempDir := t.TempDir() From 80cd049ae757140b4693d6d5c9c71fce0344f697 Mon Sep 17 00:00:00 2001 From: Ryan Russell Date: Fri, 3 Jun 2022 16:01:59 -0500 Subject: [PATCH 93/95] Revert to correct `wal_downloader.go` Signed-off-by: Ryan Russell --- wal_downloader.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wal_downloader.go b/wal_downloader.go index 78ee2400..d051e170 100644 --- a/wal_downloader.go +++ b/wal_downloader.go @@ -15,7 +15,7 @@ import ( // WALDownloader represents a parallel downloader of WAL files from a replica client. // // It works on a per-index level so WAL files are always downloaded in their -// entiretry and are not segmented. WAL files are downloaded from minIndex to +// entirety and are not segmented. WAL files are downloaded from minIndex to // maxIndex, inclusively, and are written to a path prefix. WAL files are named // with the prefix and suffixed with the WAL index. It is the responsibility of // the caller to clean up these WAL files. From 9f9f4c0be7662610c6e72d507d555ff780e247e9 Mon Sep 17 00:00:00 2001 From: Niels Hofmans Date: Thu, 30 Jun 2022 14:04:57 +0200 Subject: [PATCH 94/95] feat(docker): harden docker image --- Dockerfile | 36 ++++++++++++++++++++++++++++++++---- 1 file changed, 32 insertions(+), 4 deletions(-) diff --git a/Dockerfile b/Dockerfile index c0dd0cc0..67afb3ed 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,13 @@ -FROM golang:1.17 as builder +FROM golang:1.18 as builder + +# add ca certificates and timezone data files +# hadolint ignore=DL3018 +RUN apk add -U --no-cache ca-certificates tzdata + +# add unprivileged user +RUN adduser -s /bin/true -u 1000 -D -h /app app \ + && sed -i -r "/^(app|root)/!d" /etc/group /etc/passwd \ + && sed -i -r 's#^(.*):[^:]*$#\1:/sbin/nologin#' /etc/passwd WORKDIR /src/litestream COPY . . @@ -9,8 +18,27 @@ RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg \ go build -ldflags "-s -w -X 'main.Version=${LITESTREAM_VERSION}' -extldflags '-static'" -tags osusergo,netgo,sqlite_omit_load_extension -o /usr/local/bin/litestream ./cmd/litestream +# +# --- +# + +# start with empty image +FROM scratch + +# add-in our timezone data file +COPY --from=builder /usr/share/zoneinfo /usr/share/zoneinfo + +# add-in our ca certificates +COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ + +# add-in our unprivileged user +COPY --from=builder /etc/passwd /etc/group /etc/shadow /etc/ + +# from now on, run as the unprivileged user +USER app + +# copy in our litestream binary +COPY --from=builder /usr/local/bin/litestream /litestream -FROM alpine -COPY --from=builder /usr/local/bin/litestream /usr/local/bin/litestream -ENTRYPOINT ["/usr/local/bin/litestream"] +ENTRYPOINT ["/litestream"] CMD [] From 5124cc461c367997ed992a43ed192453860b23c8 Mon Sep 17 00:00:00 2001 From: Niels Hofmans Date: Thu, 30 Jun 2022 14:11:21 +0200 Subject: [PATCH 95/95] chore: fix build --- Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 67afb3ed..e51a4bcf 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,8 +1,8 @@ -FROM golang:1.18 as builder +FROM golang:1.18-alpine as builder # add ca certificates and timezone data files # hadolint ignore=DL3018 -RUN apk add -U --no-cache ca-certificates tzdata +RUN apk add -U --no-cache ca-certificates tzdata gcc musl-dev # add unprivileged user RUN adduser -s /bin/true -u 1000 -D -h /app app \