diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 917b04a7..b1cf4525 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -1,17 +1,18 @@ -## Open-source, not open-contribution +## Contribution Policy -[Similar to SQLite](https://www.sqlite.org/copyright.html), Litestream is open -source but closed to contributions. This keeps the code base free of proprietary -or licensed code but it also helps me continue to maintain and build Litestream. +Initially, Litestream was closed to outside contributions. The goal was to +reduce burnout by limiting the maintenance overhead of reviewing and validating +third-party code. However, this policy is overly broad and has prevented small, +easily testable patches from being contributed. -As the author of [BoltDB](https://github.com/boltdb/bolt), I found that -accepting and maintaining third party patches contributed to my burn out and -I eventually archived the project. Writing databases & low-level replication -tools involves nuance and simple one line changes can have profound and -unexpected changes in correctness and performance. Small contributions -typically required hours of my time to properly test and validate them. +Litestream is now open to code contributions for bug fixes only. Features carry +a long-term maintenance burden so they will not be accepted at this time. +Please [submit an issue][new-issue] if you have a feature you'd like to +request. + +If you find mistakes in the documentation, please submit a fix to the +[documentation repository][docs]. + +[new-issue]: https://github.com/benbjohnson/litestream/issues/new +[docs]: https://github.com/benbjohnson/litestream.io -I am grateful for community involvement, bug reports, & feature requests. I do -not wish to come off as anything but welcoming, however, I've -made the decision to keep this project closed to contributions for my own -mental health and long term viability of the project. diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md deleted file mode 100644 index fe28e01f..00000000 --- a/.github/pull_request_template.md +++ /dev/null @@ -1,7 +0,0 @@ -Litestream is not accepting code contributions at this time. You can find a summary of why on the project's GitHub README: - -https://github.com/benbjohnson/litestream#open-source-not-open-contribution - -Web site & Documentation changes, however, are welcome. You can find that repository here: - -https://github.com/benbjohnson/litestream.io diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml new file mode 100644 index 00000000..d4e3b5e2 --- /dev/null +++ b/.github/workflows/build_and_test.yml @@ -0,0 +1,30 @@ +name: "Build and Unit Test" +on: pull_request + +jobs: + build: + name: Build + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v2 + + - uses: actions/setup-go@v2 + with: + go-version: '1.19' + + - uses: actions/cache@v2 + with: + path: ~/go/pkg/mod + key: ${{ inputs.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ inputs.os }}-go- + + - name: Build binary + run: go install ./cmd/litestream + + - name: Run unit tests + run: make testdata && go test -v --coverprofile=.coverage.out ./... && go tool cover -html .coverage.out -o .coverage.html + + - uses: actions/upload-artifact@v3 + with: + name: code-coverage + path: .coverage.html diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml new file mode 100644 index 00000000..d06b47f6 --- /dev/null +++ b/.github/workflows/codeql-analysis.yml @@ -0,0 +1,38 @@ +name: "CodeQL" + +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + schedule: + - cron: '20 16 * * 4' + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: [ 'go' ] + + steps: + - name: Checkout repository + uses: actions/checkout@v2 + + - name: Initialize CodeQL + uses: github/codeql-action/init@v1 + with: + languages: ${{ matrix.language }} + + - name: Autobuild + uses: github/codeql-action/autobuild@v1 + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v1 diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml new file mode 100644 index 00000000..e26eb482 --- /dev/null +++ b/.github/workflows/golangci-lint.yml @@ -0,0 +1,18 @@ +name: golangci-lint +on: + pull_request: + +permissions: + contents: read + +jobs: + golangci: + name: lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - uses: golangci/golangci-lint-action@v2 + with: + version: latest + args: --timeout=10m diff --git a/.github/workflows/integration_test.yml b/.github/workflows/integration_test.yml new file mode 100644 index 00000000..29293cf8 --- /dev/null +++ b/.github/workflows/integration_test.yml @@ -0,0 +1,138 @@ +name: Integration Tests +on: + pull_request: + branches-ignore: + - "dependabot/**" + +jobs: + s3-integration-test: + name: Run S3 Integration Tests + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v2 + + - uses: actions/setup-go@v2 + with: + go-version: '1.19' + + - uses: actions/cache@v2 + with: + path: ~/go/pkg/mod + key: ${{ inputs.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ inputs.os }}-go- + + - run: go install ./cmd/litestream + + - run: go test -v -run=TestReplicaClient ./integration -replica-type s3 + env: + LITESTREAM_S3_ACCESS_KEY_ID: ${{ secrets.LITESTREAM_S3_ACCESS_KEY_ID }} + LITESTREAM_S3_SECRET_ACCESS_KEY: ${{ secrets.LITESTREAM_S3_SECRET_ACCESS_KEY }} + LITESTREAM_S3_REGION: us-east-1 + LITESTREAM_S3_BUCKET: integration.litestream.io + + gcp-integration-test: + name: Run GCP Integration Tests + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v2 + + - uses: actions/setup-go@v2 + with: + go-version: '1.19' + + - uses: actions/cache@v2 + with: + path: ~/go/pkg/mod + key: ${{ inputs.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ inputs.os }}-go- + + - name: Extract GCP credentials + run: 'echo "$GOOGLE_APPLICATION_CREDENTIALS" > /opt/gcp.json' + shell: bash + env: + GOOGLE_APPLICATION_CREDENTIALS: ${{secrets.GOOGLE_APPLICATION_CREDENTIALS}} + + - run: go test -v -run=TestReplicaClient ./integration -replica-type gs + env: + GOOGLE_APPLICATION_CREDENTIALS: /opt/gcp.json + LITESTREAM_GS_BUCKET: integration.litestream.io + + abs-integration-test: + name: Run Azure Blob Store Integration Tests + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v2 + + - uses: actions/setup-go@v2 + with: + go-version: '1.19' + + - uses: actions/cache@v2 + with: + path: ~/go/pkg/mod + key: ${{ inputs.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ inputs.os }}-go- + + - run: go test -v -run=TestReplicaClient ./integration -replica-type abs + env: + LITESTREAM_ABS_ACCOUNT_NAME: ${{ secrets.LITESTREAM_ABS_ACCOUNT_NAME }} + LITESTREAM_ABS_ACCOUNT_KEY: ${{ secrets.LITESTREAM_ABS_ACCOUNT_KEY }} + LITESTREAM_ABS_BUCKET: integration + + sftp-integration-test: + name: Run SFTP Integration Tests + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v2 + + - uses: actions/setup-go@v2 + with: + go-version: '1.19' + + - uses: actions/cache@v2 + with: + path: ~/go/pkg/mod + key: ${{ inputs.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ inputs.os }}-go- + + - name: Extract SSH key + run: 'echo "$LITESTREAM_SFTP_KEY" > /opt/id_ed25519' + shell: bash + env: + LITESTREAM_SFTP_KEY: ${{secrets.LITESTREAM_SFTP_KEY}} + + - name: Run sftp tests w/ key + run: go test -v -run=TestReplicaClient ./integration -replica-type sftp + env: + LITESTREAM_SFTP_HOST: litestream-test-sftp.fly.dev:2222 + LITESTREAM_SFTP_USER: litestream + LITESTREAM_SFTP_PATH: /litestream + LITESTREAM_SFTP_KEY_PATH: /opt/id_ed25519 + + - name: Run sftp tests w/ password + run: go test -v -run=TestReplicaClient ./integration -replica-type sftp + env: + LITESTREAM_SFTP_HOST: litestream-test-sftp.fly.dev:2222 + LITESTREAM_SFTP_USER: litestream + LITESTREAM_SFTP_PASSWORD: ${{ secrets.LITESTREAM_SFTP_PASSWORD }} + LITESTREAM_SFTP_PATH: /litestream + + long-running-test: + name: Run Long-Running Test + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v2 + + - uses: actions/setup-go@v2 + with: + go-version: '1.19' + + - uses: actions/cache@v2 + with: + path: ~/go/pkg/mod + key: ${{ inputs.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ inputs.os }}-go- + + - run: go install ./cmd/litestream + + - run: go test -v -run=TestCmd_Replicate_LongRunning ./integration -long-running-duration 1m diff --git a/.github/workflows/release.docker.yml b/.github/workflows/release.docker.yml new file mode 100644 index 00000000..5b56f540 --- /dev/null +++ b/.github/workflows/release.docker.yml @@ -0,0 +1,51 @@ +on: + release: + types: + - published + pull_request: + types: + - opened + - synchronize + - reopened + branches-ignore: + - "dependabot/**" + +name: Release (Docker) +jobs: + docker: + runs-on: ubuntu-latest + env: + PLATFORMS: "${{ github.event_name == 'release' && 'linux/amd64,linux/arm64,linux/arm/v7' || 'linux/amd64' }}" + VERSION: "${{ github.event_name == 'release' && github.event.release.name || github.sha }}" + + steps: + - uses: actions/checkout@v2 + - uses: docker/setup-qemu-action@v1 + - uses: docker/setup-buildx-action@v1 + + - uses: docker/login-action@v1 + with: + username: benbjohnson + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - id: meta + uses: docker/metadata-action@v3 + with: + images: litestream/litestream + tags: | + type=ref,event=branch + type=ref,event=pr + type=sha + type=sha,format=long + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + + - uses: docker/build-push-action@v2 + with: + context: . + push: true + platforms: ${{ env.PLATFORMS }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + build-args: | + LITESTREAM_VERSION=${{ env.VERSION }} diff --git a/.github/workflows/release.linux.yml b/.github/workflows/release.linux.yml index ce80a956..ac185b93 100644 --- a/.github/workflows/release.linux.yml +++ b/.github/workflows/release.linux.yml @@ -1,42 +1,71 @@ on: release: types: - - created + - published + pull_request: + types: + - opened + - synchronize + - reopened + branches-ignore: + - "dependabot/**" -name: release (linux) +name: Release (Linux) jobs: build: - runs-on: ubuntu-18.04 + runs-on: ubuntu-22.04 strategy: matrix: include: - - arch: amd64 - cc: gcc + - arch: amd64 + cc: gcc + + - arch: amd64 + cc: gcc + static: true + + - arch: arm64 + cc: aarch64-linux-gnu-gcc + - arch: arm64 cc: aarch64-linux-gnu-gcc + static: true + + - arch: arm + arm: 6 + cc: arm-linux-gnueabi-gcc + - arch: arm arm: 6 cc: arm-linux-gnueabi-gcc + static: true + + - arch: arm + arm: 7 + cc: arm-linux-gnueabihf-gcc + - arch: arm arm: 7 cc: arm-linux-gnueabihf-gcc + static: true env: GOOS: linux GOARCH: ${{ matrix.arch }} GOARM: ${{ matrix.arm }} CC: ${{ matrix.cc }} + LDFLAGS: ${{ matrix.static && '-extldflags "-static"' || '' }} + TAGS: ${{ matrix.static && 'osusergo,netgo,sqlite_omit_load_extension' || '' }} + SUFFIX: "${{ matrix.static && '-static' || ''}}" + VERSION: "${{ github.event_name == 'release' && github.event.release.name || github.sha }}" + steps: - uses: actions/checkout@v2 + - uses: actions/setup-go@v2 with: - go-version: '1.16' - - - id: release - uses: bruceadams/get-release@v1.2.2 - env: - GITHUB_TOKEN: ${{ github.token }} + go-version: '1.19' - name: Install cross-compilers run: | @@ -50,32 +79,56 @@ jobs: - name: Build litestream run: | - rm -rf dist - mkdir -p dist + rm -rf dist && mkdir -p dist + cp etc/litestream.yml etc/litestream.service dist - cat etc/nfpm.yml | LITESTREAM_VERSION=${{ steps.release.outputs.tag_name }} envsubst > dist/nfpm.yml - CGO_ENABLED=1 go build -ldflags "-s -w -X 'main.Version=${{ steps.release.outputs.tag_name }}'" -o dist/litestream ./cmd/litestream - + cat etc/nfpm.yml | LITESTREAM_VERSION=${{ env.VERSION }} envsubst > dist/nfpm.yml + + CGO_ENABLED=1 go build -ldflags "-s -w ${{ env.LDFLAGS }} -X 'main.Version=${{ env.VERSION }}'" -tags "${{ env.TAGS }}" -o dist/litestream ./cmd/litestream + cd dist - tar -czvf litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}.tar.gz litestream - ../nfpm pkg --config nfpm.yml --packager deb --target litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}.deb + tar -czvf litestream-${{ env.VERSION }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}${{ env.SUFFIX }}.tar.gz litestream + ../nfpm pkg --config nfpm.yml --packager deb --target litestream-${{ env.VERSION }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}${{ env.SUFFIX }}.deb + + - name: Upload binary artifact + uses: actions/upload-artifact@v2 + with: + name: litestream-${{ env.VERSION }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}${{ env.SUFFIX }}.tar.gz + path: dist/litestream-${{ env.VERSION }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}${{ env.SUFFIX }}.tar.gz + if-no-files-found: error + + - name: Upload debian artifact + uses: actions/upload-artifact@v2 + with: + name: litestream-${{ env.VERSION }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}${{ env.SUFFIX }}.deb + path: dist/litestream-${{ env.VERSION }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}${{ env.SUFFIX }}.deb + if-no-files-found: error + + - name: Get release + id: release + uses: bruceadams/get-release@v1.2.3 + if: github.event_name == 'release' + env: + GITHUB_TOKEN: ${{ github.token }} - name: Upload release tarball uses: actions/upload-release-asset@v1.0.2 + if: github.event_name == 'release' env: GITHUB_TOKEN: ${{ github.token }} with: upload_url: ${{ steps.release.outputs.upload_url }} - asset_path: ./dist/litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}.tar.gz - asset_name: litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}.tar.gz + asset_path: ./dist/litestream-${{ env.VERSION }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}${{ env.SUFFIX }}.tar.gz + asset_name: litestream-${{ env.VERSION }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}${{ env.SUFFIX }}.tar.gz asset_content_type: application/gzip - name: Upload debian package uses: actions/upload-release-asset@v1.0.2 + if: github.event_name == 'release' env: GITHUB_TOKEN: ${{ github.token }} with: upload_url: ${{ steps.release.outputs.upload_url }} - asset_path: ./dist/litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}.deb - asset_name: litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}.deb + asset_path: ./dist/litestream-${{ env.VERSION }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}${{ env.SUFFIX }}.deb + asset_name: litestream-${{ env.VERSION }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}${{ env.SUFFIX }}.deb asset_content_type: application/octet-stream diff --git a/.github/workflows/release.linux_static.yml b/.github/workflows/release.linux_static.yml deleted file mode 100644 index ddc90b9a..00000000 --- a/.github/workflows/release.linux_static.yml +++ /dev/null @@ -1,62 +0,0 @@ -on: - release: - types: - - created - -name: release (linux/static) -jobs: - build: - runs-on: ubuntu-18.04 - strategy: - matrix: - include: - - arch: amd64 - cc: gcc - - arch: arm64 - cc: aarch64-linux-gnu-gcc - - arch: arm - arm: 6 - cc: arm-linux-gnueabi-gcc - - arch: arm - arm: 7 - cc: arm-linux-gnueabihf-gcc - - env: - GOOS: linux - GOARCH: ${{ matrix.arch }} - GOARM: ${{ matrix.arm }} - CC: ${{ matrix.cc }} - - steps: - - uses: actions/checkout@v2 - - uses: actions/setup-go@v2 - with: - go-version: '1.16' - - - id: release - uses: bruceadams/get-release@v1.2.2 - env: - GITHUB_TOKEN: ${{ github.token }} - - - name: Install cross-compilers - run: | - sudo apt-get update - sudo apt-get install -y gcc-aarch64-linux-gnu gcc-arm-linux-gnueabihf gcc-arm-linux-gnueabi - - - name: Build litestream - run: | - rm -rf dist - mkdir -p dist - CGO_ENABLED=1 go build -ldflags "-s -w -extldflags "-static" -X 'main.Version=${{ steps.release.outputs.tag_name }}'" -tags osusergo,netgo,sqlite_omit_load_extension -o dist/litestream ./cmd/litestream - cd dist - tar -czvf litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}-static.tar.gz litestream - - - name: Upload release tarball - uses: actions/upload-release-asset@v1.0.2 - env: - GITHUB_TOKEN: ${{ github.token }} - with: - upload_url: ${{ steps.release.outputs.upload_url }} - asset_path: ./dist/litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}-static.tar.gz - asset_name: litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}-static.tar.gz - asset_content_type: application/gzip diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml deleted file mode 100644 index 4d3122b7..00000000 --- a/.github/workflows/test.yml +++ /dev/null @@ -1,62 +0,0 @@ -on: push -name: test -jobs: - test: - runs-on: ubuntu-18.04 - steps: - - uses: actions/setup-go@v2 - with: - go-version: '1.16' - - - uses: actions/checkout@v2 - - - uses: actions/cache@v2 - with: - path: ~/go/pkg/mod - key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go- - - - name: Extract GCP credentials - run: 'echo "$GOOGLE_APPLICATION_CREDENTIALS" > /opt/gcp.json' - shell: bash - env: - GOOGLE_APPLICATION_CREDENTIALS: ${{secrets.GOOGLE_APPLICATION_CREDENTIALS}} - - - name: Extract SSH key - run: 'echo "$LITESTREAM_SFTP_KEY" > /opt/id_ed25519' - shell: bash - env: - LITESTREAM_SFTP_KEY: ${{secrets.LITESTREAM_SFTP_KEY}} - - - name: Run unit tests - run: go test -v ./... - - - name: Run aws s3 tests - run: go test -v -run=TestReplicaClient . -integration s3 - env: - LITESTREAM_S3_ACCESS_KEY_ID: ${{ secrets.LITESTREAM_S3_ACCESS_KEY_ID }} - LITESTREAM_S3_SECRET_ACCESS_KEY: ${{ secrets.LITESTREAM_S3_SECRET_ACCESS_KEY }} - LITESTREAM_S3_REGION: ${{ secrets.LITESTREAM_S3_REGION }} - LITESTREAM_S3_BUCKET: ${{ secrets.LITESTREAM_S3_BUCKET }} - - - name: Run google cloud storage (gcs) tests - run: go test -v -run=TestReplicaClient . -integration gcs - env: - GOOGLE_APPLICATION_CREDENTIALS: /opt/gcp.json - LITESTREAM_GCS_BUCKET: ${{ secrets.LITESTREAM_GCS_BUCKET }} - - - name: Run azure blob storage (abs) tests - run: go test -v -run=TestReplicaClient . -integration abs - env: - LITESTREAM_ABS_ACCOUNT_NAME: ${{ secrets.LITESTREAM_ABS_ACCOUNT_NAME }} - LITESTREAM_ABS_ACCOUNT_KEY: ${{ secrets.LITESTREAM_ABS_ACCOUNT_KEY }} - LITESTREAM_ABS_BUCKET: ${{ secrets.LITESTREAM_ABS_BUCKET }} - - - name: Run sftp tests - run: go test -v -run=TestReplicaClient . -integration sftp - env: - LITESTREAM_SFTP_HOST: ${{ secrets.LITESTREAM_SFTP_HOST }} - LITESTREAM_SFTP_USER: ${{ secrets.LITESTREAM_SFTP_USER }} - LITESTREAM_SFTP_KEY_PATH: /opt/id_ed25519 - LITESTREAM_SFTP_PATH: ${{ secrets.LITESTREAM_SFTP_PATH }} diff --git a/.gitignore b/.gitignore index 6acf8819..7f08fdd2 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ +.coverage.* .DS_Store /dist diff --git a/Dockerfile b/Dockerfile index 677f27ff..461fac88 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,11 +1,15 @@ -FROM golang:1.16 as builder +FROM golang:1.19 as builder + WORKDIR /src/litestream COPY . . + ARG LITESTREAM_VERSION=latest + RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg \ go build -ldflags "-s -w -X 'main.Version=${LITESTREAM_VERSION}' -extldflags '-static'" -tags osusergo,netgo,sqlite_omit_load_extension -o /usr/local/bin/litestream ./cmd/litestream + FROM alpine COPY --from=builder /usr/local/bin/litestream /usr/local/bin/litestream ENTRYPOINT ["/usr/local/bin/litestream"] diff --git a/Makefile b/Makefile index e3d75e4c..598eddd2 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,11 @@ +.PHONY: default default: +.PHONY: testdata +testdata: + make -C testdata + make -C cmd/litestream testdata + docker: docker build -t litestream . diff --git a/README.md b/README.md index 1ce84798..84d6e4e8 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ Litestream ![test](https://github.com/benbjohnson/litestream/workflows/test/badge.svg) ========== -Litestream is a standalone streaming replication tool for SQLite. It runs as a +Litestream is a standalone disaster recovery tool for SQLite. It runs as a background process and safely replicates changes incrementally to another file or S3. Litestream only communicates with SQLite through the SQLite API so it will not corrupt your database. @@ -33,35 +33,29 @@ energy into the project to help make it better: - Thanks to [Cory LaNou](https://twitter.com/corylanou) for giving early feedback and testing when Litestream was still pre-release. - Thanks to [Michael Lynch](https://github.com/mtlynch) for digging into issues and contributing to the documentation. -- Thanks to [Kurt Mackey](https://twitter.com/mrkurt) for feedback and testing. Also, thanks to fly.io for providing testing resources. +- Thanks to [Kurt Mackey](https://twitter.com/mrkurt) for feedback and testing. - Thanks to [Sam Weston](https://twitter.com/cablespaghetti) for figuring out how to run Litestream on Kubernetes and writing up the docs for it. - Thanks to [Rafael](https://github.com/netstx) & [Jungle Boogie](https://github.com/jungle-boogie) for helping to get OpenBSD release builds working. -- Thanks to [Simon Gottschlag](https://github.com/simongottschlag), [Marin](https://github.com/supermarin),[Victor Björklund](https://github.com/victorbjorklund), [Jonathan Beri](https://twitter.com/beriberikix) [Yuri](https://github.com/yurivish), [Nathan Probst](https://github.com/nprbst), [Yann Coleuu](https://github.com/yanc0), and [Nicholas Grilly](https://twitter.com/ngrilly) for frequent feedback, testing, & support. +- Thanks to [Simon Gottschlag](https://github.com/simongottschlag), [Marin](https://github.com/supermarin),[Victor Björklund](https://github.com/victorbjorklund), [Jonathan Beri](https://twitter.com/beriberikix) [Yuri](https://github.com/yurivish), [Nathan Probst](https://github.com/nprbst), [Yann Coleu](https://github.com/yanc0), and [Nicholas Grilly](https://twitter.com/ngrilly) for frequent feedback, testing, & support. +Huge thanks to fly.io for their support and for contributing credits for testing and development! +## Contribution Policy -## Open-source, not open-contribution +Initially, Litestream was closed to outside contributions. The goal was to +reduce burnout by limiting the maintenance overhead of reviewing and validating +third-party code. However, this policy is overly broad and has prevented small, +easily testable patches from being contributed. -[Similar to SQLite](https://www.sqlite.org/copyright.html), Litestream is open -source but closed to code contributions. This keeps the code base free of -proprietary or licensed code but it also helps me continue to maintain and build -Litestream. +Litestream is now open to code contributions for bug fixes only. Features carry +a long-term maintenance burden so they will not be accepted at this time. +Please [submit an issue][new-issue] if you have a feature you'd like to +request. -As the author of [BoltDB](https://github.com/boltdb/bolt), I found that -accepting and maintaining third party patches contributed to my burn out and -I eventually archived the project. Writing databases & low-level replication -tools involves nuance and simple one line changes can have profound and -unexpected changes in correctness and performance. Small contributions -typically required hours of my time to properly test and validate them. +If you find mistakes in the documentation, please submit a fix to the +[documentation repository][docs]. -I am grateful for community involvement, bug reports, & feature requests. I do -not wish to come off as anything but welcoming, however, I've -made the decision to keep this project closed to contributions for my own -mental health and long term viability of the project. - -The [documentation repository][docs] is MIT licensed and pull requests are welcome there. - -[releases]: https://github.com/benbjohnson/litestream/releases +[new-issue]: https://github.com/benbjohnson/litestream/issues/new [docs]: https://github.com/benbjohnson/litestream.io diff --git a/abs/replica_client.go b/abs/replica_client.go index 4d5e00ef..0c211bc1 100644 --- a/abs/replica_client.go +++ b/abs/replica_client.go @@ -2,16 +2,22 @@ package abs import ( "context" + "errors" "fmt" "io" - "net/url" "os" "path" "strings" "sync" "time" - "github.com/Azure/azure-storage-blob-go/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" "github.com/benbjohnson/litestream" "github.com/benbjohnson/litestream/internal" "golang.org/x/sync/errgroup" @@ -24,8 +30,8 @@ var _ litestream.ReplicaClient = (*ReplicaClient)(nil) // ReplicaClient is a client for writing snapshots & WAL segments to disk. type ReplicaClient struct { - mu sync.Mutex - containerURL *azblob.ContainerURL + mu sync.Mutex + client *container.Client // Azure credentials AccountName string @@ -52,7 +58,7 @@ func (c *ReplicaClient) Init(ctx context.Context) (err error) { c.mu.Lock() defer c.mu.Unlock() - if c.containerURL != nil { + if c.client != nil { return nil } @@ -62,10 +68,10 @@ func (c *ReplicaClient) Init(ctx context.Context) (err error) { accountKey = os.Getenv("LITESTREAM_AZURE_ACCOUNT_KEY") } - // Authenticate to ACS. - credential, err := azblob.NewSharedKeyCredential(c.AccountName, accountKey) + // Authenticate to Azure Blob Storage using shared key credentials. + credentials, err := azblob.NewSharedKeyCredential(c.AccountName, accountKey) if err != nil { - return err + return fmt.Errorf("cannot create Shared Key Credential object: %w", err) } // Construct & parse endpoint unless already set. @@ -73,19 +79,25 @@ func (c *ReplicaClient) Init(ctx context.Context) (err error) { if endpoint == "" { endpoint = fmt.Sprintf("https://%s.blob.core.windows.net", c.AccountName) } - endpointURL, err := url.Parse(endpoint) - if err != nil { - return fmt.Errorf("cannot parse azure endpoint: %w", err) + if !strings.HasSuffix(endpoint, "/") { + endpoint += "/" } + endpoint += c.Bucket // Build pipeline and reference to container. - pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{ - Retry: azblob.RetryOptions{ - TryTimeout: 24 * time.Hour, + c.client, err = container.NewClientWithSharedKeyCredential(endpoint, credentials, &container.ClientOptions{ + ClientOptions: policy.ClientOptions{ + Retry: policy.RetryOptions{ + TryTimeout: 24 * time.Hour, + }, + Telemetry: policy.TelemetryOptions{ + ApplicationID: "litestream", + }, }, }) - containerURL := azblob.NewServiceURL(*endpointURL, pipeline).NewContainerURL(c.Bucket) - c.containerURL = &containerURL + if err != nil { + return fmt.Errorf("cannot create Azure Blob Storage client: %w", err) + } return nil } @@ -97,20 +109,20 @@ func (c *ReplicaClient) Generations(ctx context.Context) ([]string, error) { } var generations []string - var marker azblob.Marker - for marker.NotDone() { - internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc() - - resp, err := c.containerURL.ListBlobsHierarchySegment(ctx, marker, "/", azblob.ListBlobsSegmentOptions{ - Prefix: litestream.GenerationsPath(c.Path) + "/", - }) + pager := c.client.NewListBlobsHierarchyPager("/", &container.ListBlobsHierarchyOptions{ + Prefix: to.Ptr(c.Path + "/generations/"), + }) + for pager.More() { + resp, err := pager.NextPage(ctx) if err != nil { return nil, err } - marker = resp.NextMarker for _, prefix := range resp.Segment.BlobPrefixes { - name := path.Base(strings.TrimSuffix(prefix.Name, "/")) + if prefix == nil || prefix.Name == nil { + continue + } + name := path.Base(strings.TrimSuffix(*prefix.Name, "/")) if !litestream.IsGenerationName(name) { continue } @@ -125,30 +137,32 @@ func (c *ReplicaClient) Generations(ctx context.Context) ([]string, error) { func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string) error { if err := c.Init(ctx); err != nil { return err + } else if generation == "" { + return errors.New("generation required") } - dir, err := litestream.GenerationPath(c.Path, generation) - if err != nil { - return fmt.Errorf("cannot determine generation path: %w", err) - } - - var marker azblob.Marker - for marker.NotDone() { + pager := c.client.NewListBlobsFlatPager(&container.ListBlobsFlatOptions{ + Prefix: to.Ptr(c.Path + "/generations/" + generation + "/"), + }) + for pager.More() { internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc() - resp, err := c.containerURL.ListBlobsFlatSegment(ctx, marker, azblob.ListBlobsSegmentOptions{Prefix: dir + "/"}) + resp, err := pager.NextPage(ctx) if err != nil { return err } - marker = resp.NextMarker for _, item := range resp.Segment.BlobItems { + if item == nil || item.Name == nil { + continue + } internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc() - blobURL := c.containerURL.NewBlobURL(item.Name) - if _, err := blobURL.Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{}); isNotExists(err) { - continue - } else if err != nil { + _, err = c.client.NewBlobClient(*item.Name).Delete(ctx, nil) + if err != nil { + if bloberror.HasCode(err, bloberror.BlobNotFound) { + continue + } return err } } @@ -171,29 +185,27 @@ func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (lites func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, index int, rd io.Reader) (info litestream.SnapshotInfo, err error) { if err := c.Init(ctx); err != nil { return info, err + } else if generation == "" { + return info, fmt.Errorf("generation required") } - key, err := litestream.SnapshotPath(c.Path, generation, index) - if err != nil { - return info, fmt.Errorf("cannot determine snapshot path: %w", err) - } + key := path.Join(c.Path, "generations", generation, "snapshots", litestream.FormatIndex(index)+".snapshot.lz4") startTime := time.Now() rc := internal.NewReadCounter(rd) - blobURL := c.containerURL.NewBlockBlobURL(key) - if _, err := azblob.UploadStreamToBlockBlob(ctx, rc, blobURL, azblob.UploadStreamToBlockBlobOptions{ - BlobHTTPHeaders: azblob.BlobHTTPHeaders{ContentType: "application/octet-stream"}, - BlobAccessTier: azblob.DefaultAccessTier, - }); err != nil { + _, err = c.client.NewBlockBlobClient(key).UploadStream(ctx, rc, &blockblob.UploadStreamOptions{ + HTTPHeaders: &blob.HTTPHeaders{ + BlobContentType: to.Ptr("application/octet-stream"), + }, + }) + if err != nil { return info, err } internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "PUT").Inc() internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "PUT").Add(float64(rc.N())) - // log.Printf("%s(%s): snapshot: creating %s/%08x t=%s", r.db.Path(), r.Name(), generation, index, time.Since(startTime).Truncate(time.Millisecond)) - return litestream.SnapshotInfo{ Generation: generation, Index: index, @@ -206,46 +218,46 @@ func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, in func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error) { if err := c.Init(ctx); err != nil { return nil, err + } else if generation == "" { + return nil, fmt.Errorf("generation required") } - key, err := litestream.SnapshotPath(c.Path, generation, index) - if err != nil { - return nil, fmt.Errorf("cannot determine snapshot path: %w", err) - } + key := path.Join(c.Path, "generations", generation, "snapshots", litestream.FormatIndex(index)+".snapshot.lz4") - blobURL := c.containerURL.NewBlobURL(key) - resp, err := blobURL.Download(ctx, 0, 0, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{}) - if isNotExists(err) { - return nil, os.ErrNotExist - } else if err != nil { + resp, err := c.client.NewBlobClient(key).DownloadStream(ctx, nil) + if err != nil { + if bloberror.HasCode(err, bloberror.BlobNotFound) { + return nil, os.ErrNotExist + } return nil, fmt.Errorf("cannot start new reader for %q: %w", key, err) } internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "GET").Inc() - internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "GET").Add(float64(resp.ContentLength())) + internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "GET").Add(float64(*resp.ContentLength)) - return resp.Body(azblob.RetryReaderOptions{}), nil + return resp.NewRetryReader(ctx, nil), nil } // DeleteSnapshot deletes a snapshot with the given generation & index. func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, index int) error { if err := c.Init(ctx); err != nil { return err + } else if generation == "" { + return fmt.Errorf("generation required") } - key, err := litestream.SnapshotPath(c.Path, generation, index) - if err != nil { - return fmt.Errorf("cannot determine snapshot path: %w", err) - } + key := path.Join(c.Path, "generations", generation, "snapshots", litestream.FormatIndex(index)+".snapshot.lz4") internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc() - blobURL := c.containerURL.NewBlobURL(key) - if _, err := blobURL.Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{}); isNotExists(err) { - return nil - } else if err != nil { + _, err := c.client.NewBlobClient(key).Delete(ctx, nil) + if err != nil { + if bloberror.HasCode(err, bloberror.BlobNotFound) { + return nil + } return fmt.Errorf("cannot delete snapshot %q: %w", key, err) } + return nil } @@ -261,21 +273,21 @@ func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (lit func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, rd io.Reader) (info litestream.WALSegmentInfo, err error) { if err := c.Init(ctx); err != nil { return info, err + } else if pos.Generation == "" { + return info, fmt.Errorf("generation required") } - key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset) - if err != nil { - return info, fmt.Errorf("cannot determine wal segment path: %w", err) - } + key := path.Join(c.Path, "generations", pos.Generation, "wal", litestream.FormatIndex(pos.Index), litestream.FormatOffset(pos.Offset)+".wal.lz4") startTime := time.Now() rc := internal.NewReadCounter(rd) - blobURL := c.containerURL.NewBlockBlobURL(key) - if _, err := azblob.UploadStreamToBlockBlob(ctx, rc, blobURL, azblob.UploadStreamToBlockBlobOptions{ - BlobHTTPHeaders: azblob.BlobHTTPHeaders{ContentType: "application/octet-stream"}, - BlobAccessTier: azblob.DefaultAccessTier, - }); err != nil { + _, err = c.client.NewBlockBlobClient(key).UploadStream(ctx, rc, &blockblob.UploadStreamOptions{ + HTTPHeaders: &blob.HTTPHeaders{ + BlobContentType: to.Ptr("application/octet-stream"), + }, + }) + if err != nil { return info, err } @@ -296,45 +308,47 @@ func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, func (c *ReplicaClient) WALSegmentReader(ctx context.Context, pos litestream.Pos) (io.ReadCloser, error) { if err := c.Init(ctx); err != nil { return nil, err + } else if pos.Generation == "" { + return nil, fmt.Errorf("generation required") } - key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset) - if err != nil { - return nil, fmt.Errorf("cannot determine wal segment path: %w", err) - } + key := path.Join(c.Path, "generations", pos.Generation, "wal", litestream.FormatIndex(pos.Index), litestream.FormatOffset(pos.Offset)+".wal.lz4") - blobURL := c.containerURL.NewBlobURL(key) - resp, err := blobURL.Download(ctx, 0, 0, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{}) - if isNotExists(err) { - return nil, os.ErrNotExist - } else if err != nil { + resp, err := c.client.NewBlobClient(key).DownloadStream(ctx, nil) + if err != nil { + if bloberror.HasCode(err, bloberror.BlobNotFound) { + return nil, os.ErrNotExist + } return nil, fmt.Errorf("cannot start new reader for %q: %w", key, err) } internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "GET").Inc() - internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "GET").Add(float64(resp.ContentLength())) + internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "GET").Add(float64(*resp.ContentLength)) - return resp.Body(azblob.RetryReaderOptions{}), nil + return resp.NewRetryReader(ctx, nil), nil } // DeleteWALSegments deletes WAL segments with at the given positions. func (c *ReplicaClient) DeleteWALSegments(ctx context.Context, a []litestream.Pos) error { - if err := c.Init(ctx); err != nil { + var err error + if err = c.Init(ctx); err != nil { return err } for _, pos := range a { - key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset) - if err != nil { - return fmt.Errorf("cannot determine wal segment path: %w", err) + if pos.Generation == "" { + return fmt.Errorf("generation required") } + key := path.Join(c.Path, "generations", pos.Generation, "wal", litestream.FormatIndex(pos.Index), litestream.FormatOffset(pos.Offset)+".wal.lz4") + internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc() - blobURL := c.containerURL.NewBlobURL(key) - if _, err := blobURL.Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{}); isNotExists(err) { - continue - } else if err != nil { + _, err := c.client.NewBlobClient(key).Delete(ctx, nil) + if err != nil { + if bloberror.HasCode(err, bloberror.BlobNotFound) { + continue + } return fmt.Errorf("cannot delete wal segment %q: %w", key, err) } } @@ -372,24 +386,27 @@ func newSnapshotIterator(ctx context.Context, generation string, client *Replica func (itr *snapshotIterator) fetch() error { defer close(itr.ch) - dir, err := litestream.SnapshotsPath(itr.client.Path, itr.generation) - if err != nil { - return fmt.Errorf("cannot determine snapshots path: %w", err) + if itr.generation == "" { + return errors.New("generation required") } - var marker azblob.Marker - for marker.NotDone() { + pager := itr.client.client.NewListBlobsFlatPager(&container.ListBlobsFlatOptions{ + Prefix: to.Ptr(itr.client.Path + "/generations/" + itr.generation + "/"), + }) + for pager.More() { internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc() - resp, err := itr.client.containerURL.ListBlobsFlatSegment(itr.ctx, marker, azblob.ListBlobsSegmentOptions{Prefix: dir + "/"}) + resp, err := pager.NextPage(itr.ctx) if err != nil { return err } - marker = resp.NextMarker for _, item := range resp.Segment.BlobItems { - key := path.Base(item.Name) - index, err := litestream.ParseSnapshotPath(key) + if item == nil || item.Name == nil { + continue + } + + index, err := internal.ParseSnapshotPath(path.Base(*item.Name)) if err != nil { continue } @@ -407,6 +424,7 @@ func (itr *snapshotIterator) fetch() error { } } } + return nil } @@ -478,24 +496,29 @@ func newWALSegmentIterator(ctx context.Context, generation string, client *Repli func (itr *walSegmentIterator) fetch() error { defer close(itr.ch) - dir, err := litestream.WALPath(itr.client.Path, itr.generation) - if err != nil { - return fmt.Errorf("cannot determine wal path: %w", err) + if itr.generation == "" { + return fmt.Errorf("generation required") } + prefix := path.Join(itr.client.Path, "generations", itr.generation, "wal") - var marker azblob.Marker - for marker.NotDone() { + pager := itr.client.client.NewListBlobsFlatPager(&container.ListBlobsFlatOptions{ + Prefix: to.Ptr(prefix), + }) + for pager.More() { internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc() - resp, err := itr.client.containerURL.ListBlobsFlatSegment(itr.ctx, marker, azblob.ListBlobsSegmentOptions{Prefix: dir + "/"}) + resp, err := pager.NextPage(itr.ctx) if err != nil { return err } - marker = resp.NextMarker for _, item := range resp.Segment.BlobItems { - key := path.Base(item.Name) - index, offset, err := litestream.ParseWALSegmentPath(key) + if item == nil || item.Name == nil { + continue + } + + key := strings.TrimPrefix(*item.Name, prefix+"/") + index, offset, err := internal.ParseWALSegmentPath(key) if err != nil { continue } @@ -514,6 +537,7 @@ func (itr *walSegmentIterator) fetch() error { } } } + return nil } @@ -554,12 +578,3 @@ func (itr *walSegmentIterator) Err() error { return itr.err } func (itr *walSegmentIterator) WALSegment() litestream.WALSegmentInfo { return itr.info } - -func isNotExists(err error) bool { - switch err := err.(type) { - case azblob.StorageError: - return err.ServiceCode() == azblob.ServiceCodeBlobNotFound - default: - return false - } -} diff --git a/cmd/litestream/Makefile b/cmd/litestream/Makefile new file mode 100644 index 00000000..40738583 --- /dev/null +++ b/cmd/litestream/Makefile @@ -0,0 +1,6 @@ +.PHONY: default +default: + +.PHONY: testdata +testdata: + make -C testdata diff --git a/cmd/litestream/databases.go b/cmd/litestream/databases.go index 236c01eb..a9e99aef 100644 --- a/cmd/litestream/databases.go +++ b/cmd/litestream/databases.go @@ -4,18 +4,34 @@ import ( "context" "flag" "fmt" - "os" + "io" "strings" "text/tabwriter" ) // DatabasesCommand is a command for listing managed databases. -type DatabasesCommand struct{} +type DatabasesCommand struct { + stdin io.Reader + stdout io.Writer + stderr io.Writer + + configPath string + noExpandEnv bool +} + +// NewDatabasesCommand returns a new instance of DatabasesCommand. +func NewDatabasesCommand(stdin io.Reader, stdout, stderr io.Writer) *DatabasesCommand { + return &DatabasesCommand{ + stdin: stdin, + stdout: stdout, + stderr: stderr, + } +} // Run executes the command. func (c *DatabasesCommand) Run(ctx context.Context, args []string) (err error) { fs := flag.NewFlagSet("litestream-databases", flag.ContinueOnError) - configPath, noExpandEnv := registerConfigFlag(fs) + registerConfigFlag(fs, &c.configPath, &c.noExpandEnv) fs.Usage = c.Usage if err := fs.Parse(args); err != nil { return err @@ -24,16 +40,16 @@ func (c *DatabasesCommand) Run(ctx context.Context, args []string) (err error) { } // Load configuration. - if *configPath == "" { - *configPath = DefaultConfigPath() - } - config, err := ReadConfigFile(*configPath, !*noExpandEnv) + config, err := ReadConfigFile(c.configPath, !c.noExpandEnv) if err != nil { return err + } else if len(config.DBs) == 0 { + fmt.Fprintln(c.stdout, "No databases found in config file.") + return nil } // List all databases. - w := tabwriter.NewWriter(os.Stdout, 0, 8, 2, ' ', 0) + w := tabwriter.NewWriter(c.stdout, 0, 8, 2, ' ', 0) defer w.Flush() fmt.Fprintln(w, "path\treplicas") @@ -59,7 +75,7 @@ func (c *DatabasesCommand) Run(ctx context.Context, args []string) (err error) { // Usage prints the help screen to STDOUT. func (c *DatabasesCommand) Usage() { - fmt.Printf(` + fmt.Fprintf(c.stdout, ` The databases command lists all databases in the configuration file. Usage: diff --git a/cmd/litestream/databases_test.go b/cmd/litestream/databases_test.go new file mode 100644 index 00000000..25aef5ed --- /dev/null +++ b/cmd/litestream/databases_test.go @@ -0,0 +1,66 @@ +package main_test + +import ( + "context" + "flag" + "path/filepath" + "strings" + "testing" + + "github.com/benbjohnson/litestream/internal/testingutil" +) + +func TestDatabasesCommand(t *testing.T) { + t.Run("OK", func(t *testing.T) { + testDir := filepath.Join("testdata", "databases", "ok") + m, _, stdout, _ := newMain() + if err := m.Run(context.Background(), []string{"databases", "-config", filepath.Join(testDir, "litestream.yml")}); err != nil { + t.Fatal(err) + } else if got, want := stdout.String(), string(testingutil.ReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + t.Fatalf("stdout=%q, want %q", got, want) + } + }) + + t.Run("NoDatabases", func(t *testing.T) { + testDir := filepath.Join("testdata", "databases", "no-databases") + m, _, stdout, _ := newMain() + if err := m.Run(context.Background(), []string{"databases", "-config", filepath.Join(testDir, "litestream.yml")}); err != nil { + t.Fatal(err) + } else if got, want := stdout.String(), string(testingutil.ReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + t.Fatalf("stdout=%q, want %q", got, want) + } + }) + + t.Run("ErrConfigNotFound", func(t *testing.T) { + testDir := filepath.Join("testdata", "databases", "no-config") + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"databases", "-config", filepath.Join(testDir, "litestream.yml")}) + if err == nil || !strings.Contains(err.Error(), `config file not found:`) { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrInvalidConfig", func(t *testing.T) { + testDir := filepath.Join("testdata", "databases", "invalid-config") + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"databases", "-config", filepath.Join(testDir, "litestream.yml")}) + if err == nil || !strings.Contains(err.Error(), `replica path cannot be a url`) { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrTooManyArguments", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"databases", "xyz"}) + if err == nil || err.Error() != `too many arguments` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("Usage", func(t *testing.T) { + m, _, _, _ := newMain() + if err := m.Run(context.Background(), []string{"databases", "-h"}); err != flag.ErrHelp { + t.Fatalf("unexpected error: %s", err) + } + }) +} diff --git a/cmd/litestream/generations.go b/cmd/litestream/generations.go index fefa40c6..5d237a2e 100644 --- a/cmd/litestream/generations.go +++ b/cmd/litestream/generations.go @@ -4,117 +4,116 @@ import ( "context" "flag" "fmt" - "log" + "io" "os" "text/tabwriter" "time" "github.com/benbjohnson/litestream" + "github.com/benbjohnson/litestream/internal" ) // GenerationsCommand represents a command to list all generations for a database. -type GenerationsCommand struct{} +type GenerationsCommand struct { + stdin io.Reader + stdout io.Writer + stderr io.Writer + + configPath string + noExpandEnv bool + + replicaName string +} + +// NewGenerationsCommand returns a new instance of GenerationsCommand. +func NewGenerationsCommand(stdin io.Reader, stdout, stderr io.Writer) *GenerationsCommand { + return &GenerationsCommand{ + stdin: stdin, + stdout: stdout, + stderr: stderr, + } +} // Run executes the command. -func (c *GenerationsCommand) Run(ctx context.Context, args []string) (err error) { +func (c *GenerationsCommand) Run(ctx context.Context, args []string) (ret error) { fs := flag.NewFlagSet("litestream-generations", flag.ContinueOnError) - configPath, noExpandEnv := registerConfigFlag(fs) - replicaName := fs.String("replica", "", "replica name") + registerConfigFlag(fs, &c.configPath, &c.noExpandEnv) + fs.StringVar(&c.replicaName, "replica", "", "replica name") fs.Usage = c.Usage if err := fs.Parse(args); err != nil { return err - } else if fs.NArg() == 0 || fs.Arg(0) == "" { + } else if fs.Arg(0) == "" { return fmt.Errorf("database path or replica URL required") } else if fs.NArg() > 1 { return fmt.Errorf("too many arguments") } - var db *litestream.DB - var r *litestream.Replica - dbUpdatedAt := time.Now() - if isURL(fs.Arg(0)) { - if *configPath != "" { - return fmt.Errorf("cannot specify a replica URL and the -config flag") - } - if r, err = NewReplicaFromConfig(&ReplicaConfig{URL: fs.Arg(0)}, nil); err != nil { - return err - } - } else { - if *configPath == "" { - *configPath = DefaultConfigPath() - } - - // Load configuration. - config, err := ReadConfigFile(*configPath, !*noExpandEnv) - if err != nil { - return err - } - - // Lookup database from configuration file by path. - if path, err := expand(fs.Arg(0)); err != nil { - return err - } else if dbc := config.DBConfig(path); dbc == nil { - return fmt.Errorf("database not found in config: %s", path) - } else if db, err = NewDBFromConfig(dbc); err != nil { - return err - } + // Load configuration. + config, err := ReadConfigFile(c.configPath, !c.noExpandEnv) + if err != nil { + return err + } - // Filter by replica, if specified. - if *replicaName != "" { - if r = db.Replica(*replicaName); r == nil { - return fmt.Errorf("replica %q not found for database %q", *replicaName, db.Path()) - } - } + replicas, db, err := loadReplicas(ctx, config, fs.Arg(0), c.replicaName) + if err != nil { + return err + } - // Determine last time database or WAL was updated. - if dbUpdatedAt, err = db.UpdatedAt(); err != nil { + // Determine last time database or WAL was updated. + var dbUpdatedAt time.Time + if db != nil { + if dbUpdatedAt, err = db.UpdatedAt(); err != nil && !os.IsNotExist(err) { return err } } - var replicas []*litestream.Replica - if r != nil { - replicas = []*litestream.Replica{r} - } else { - replicas = db.Replicas - } - // List each generation. - w := tabwriter.NewWriter(os.Stdout, 0, 8, 2, ' ', 0) + w := tabwriter.NewWriter(c.stdout, 0, 8, 2, ' ', 0) defer w.Flush() fmt.Fprintln(w, "name\tgeneration\tlag\tstart\tend") + for _, r := range replicas { - generations, err := r.Client.Generations(ctx) + generations, err := r.Client().Generations(ctx) if err != nil { - log.Printf("%s: cannot list generations: %s", r.Name(), err) + fmt.Fprintf(c.stderr, "%s: cannot list generations: %s", r.Name(), err) + ret = errExit // signal error return without printing message continue } // Iterate over each generation for the replica. for _, generation := range generations { - createdAt, updatedAt, err := r.GenerationTimeBounds(ctx, generation) + createdAt, updatedAt, err := litestream.GenerationTimeBounds(ctx, r.Client(), generation) if err != nil { - log.Printf("%s: cannot determine generation time bounds: %s", r.Name(), err) + fmt.Fprintf(c.stderr, "%s: cannot determine generation time bounds: %s", r.Name(), err) + ret = errExit // signal error return without printing message continue } + // Calculate lag from database mod time to the replica mod time. + // This is ignored if the database mod time is unavailable such as + // when specifying the replica URL or if the database file is missing. + lag := "-" + if !dbUpdatedAt.IsZero() { + lag = internal.TruncateDuration(dbUpdatedAt.Sub(updatedAt)).String() + } + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", r.Name(), generation, - truncateDuration(dbUpdatedAt.Sub(updatedAt)).String(), + lag, createdAt.Format(time.RFC3339), updatedAt.Format(time.RFC3339), ) } } - return nil + return ret } // Usage prints the help message to STDOUT. func (c *GenerationsCommand) Usage() { - fmt.Printf(` + fmt.Fprintf(c.stdout, ` The generations command lists all generations for a database or replica. It also lists stats about their lag behind the primary database and the time range they cover. @@ -141,29 +140,3 @@ Arguments: DefaultConfigPath(), ) } - -func truncateDuration(d time.Duration) time.Duration { - if d < 0 { - if d < -10*time.Second { - return d.Truncate(time.Second) - } else if d < -time.Second { - return d.Truncate(time.Second / 10) - } else if d < -time.Millisecond { - return d.Truncate(time.Millisecond) - } else if d < -time.Microsecond { - return d.Truncate(time.Microsecond) - } - return d - } - - if d > 10*time.Second { - return d.Truncate(time.Second) - } else if d > time.Second { - return d.Truncate(time.Second / 10) - } else if d > time.Millisecond { - return d.Truncate(time.Millisecond) - } else if d > time.Microsecond { - return d.Truncate(time.Microsecond) - } - return d -} diff --git a/cmd/litestream/generations_test.go b/cmd/litestream/generations_test.go new file mode 100644 index 00000000..1da23e43 --- /dev/null +++ b/cmd/litestream/generations_test.go @@ -0,0 +1,140 @@ +package main_test + +import ( + "context" + "flag" + "path/filepath" + "strings" + "testing" + + "github.com/benbjohnson/litestream/internal/testingutil" +) + +func TestGenerationsCommand(t *testing.T) { + t.Run("OK", func(t *testing.T) { + testDir := filepath.Join("testdata", "generations", "ok") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + + m, _, stdout, _ := newMain() + if err := m.Run(context.Background(), []string{"generations", "-config", filepath.Join(testDir, "litestream.yml"), filepath.Join(testDir, "db")}); err != nil { + t.Fatal(err) + } else if got, want := stdout.String(), string(testingutil.ReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + t.Fatalf("stdout=%q, want %q", got, want) + } + }) + + t.Run("ReplicaName", func(t *testing.T) { + testDir := filepath.Join("testdata", "generations", "replica-name") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + + m, _, stdout, _ := newMain() + if err := m.Run(context.Background(), []string{"generations", "-config", filepath.Join(testDir, "litestream.yml"), "-replica", "replica1", filepath.Join(testDir, "db")}); err != nil { + t.Fatal(err) + } else if got, want := stdout.String(), string(testingutil.ReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + t.Fatalf("stdout=%q, want %q", got, want) + } + }) + + t.Run("ReplicaURL", func(t *testing.T) { + testDir := filepath.Join(testingutil.Getwd(t), "testdata", "generations", "replica-url") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + replicaURL := "file://" + filepath.ToSlash(testDir) + "/replica" + + m, _, stdout, _ := newMain() + if err := m.Run(context.Background(), []string{"generations", replicaURL}); err != nil { + t.Fatal(err) + } else if got, want := stdout.String(), string(testingutil.ReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + t.Fatalf("stdout=%q, want %q", got, want) + } + }) + + t.Run("NoDatabase", func(t *testing.T) { + testDir := filepath.Join("testdata", "generations", "no-database") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + + m, _, stdout, _ := newMain() + if err := m.Run(context.Background(), []string{"generations", "-config", filepath.Join(testDir, "litestream.yml"), filepath.Join(testDir, "db")}); err != nil { + t.Fatal(err) + } else if got, want := stdout.String(), string(testingutil.ReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + t.Fatalf("stdout=%q, want %q", got, want) + } + }) + + t.Run("ErrDatabaseOrReplicaRequired", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"generations"}) + if err == nil || err.Error() != `database path or replica URL required` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrTooManyArguments", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"generations", "abc", "123"}) + if err == nil || err.Error() != `too many arguments` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrInvalidFlags", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"generations", "-no-such-flag"}) + if err == nil || err.Error() != `flag provided but not defined: -no-such-flag` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrConfigFileNotFound", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"generations", "-config", "/no/such/file", "/var/lib/db"}) + if err == nil || err.Error() != `config file not found: /no/such/file` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrInvalidConfig", func(t *testing.T) { + testDir := filepath.Join("testdata", "generations", "invalid-config") + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"generations", "-config", filepath.Join(testDir, "litestream.yml"), "/var/lib/db"}) + if err == nil || !strings.Contains(err.Error(), `replica path cannot be a url`) { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrDatabaseNotFound", func(t *testing.T) { + testDir := filepath.Join("testdata", "generations", "database-not-found") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"generations", "-config", filepath.Join(testDir, "litestream.yml"), "/no/such/db"}) + if err == nil || err.Error() != `database not found in config: /no/such/db` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrReplicaNotFound", func(t *testing.T) { + testDir := filepath.Join(testingutil.Getwd(t), "testdata", "generations", "replica-not-found") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"generations", "-config", filepath.Join(testDir, "litestream.yml"), "-replica", "no_such_replica", filepath.Join(testDir, "db")}) + if err == nil || err.Error() != `replica "no_such_replica" not found for database "`+filepath.Join(testDir, "db")+`"` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrInvalidReplicaURL", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"generations", "xyz://xyz"}) + if err == nil || !strings.Contains(err.Error(), `unknown replica type in config: "xyz"`) { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("Usage", func(t *testing.T) { + m, _, _, _ := newMain() + if err := m.Run(context.Background(), []string{"generations", "-h"}); err != flag.ErrHelp { + t.Fatalf("unexpected error: %s", err) + } + }) +} diff --git a/cmd/litestream/main.go b/cmd/litestream/main.go index d186f61f..73b9fd85 100644 --- a/cmd/litestream/main.go +++ b/cmd/litestream/main.go @@ -5,22 +5,24 @@ import ( "errors" "flag" "fmt" + "io" "io/ioutil" "log" "net/url" "os" + "os/signal" "os/user" "path" "path/filepath" "regexp" "strconv" "strings" + "syscall" "time" "github.com/benbjohnson/litestream" "github.com/benbjohnson/litestream/abs" - "github.com/benbjohnson/litestream/file" - "github.com/benbjohnson/litestream/gcs" + "github.com/benbjohnson/litestream/gs" "github.com/benbjohnson/litestream/s3" "github.com/benbjohnson/litestream/sftp" _ "github.com/mattn/go-sqlite3" @@ -32,14 +34,15 @@ var ( Version = "(development build)" ) -// errStop is a terminal error for indicating program should quit. -var errStop = errors.New("stop") +// errExit is a terminal error for indicating program should quit. +var errExit = errors.New("exit") func main() { log.SetFlags(0) + log.SetOutput(os.Stdout) - m := NewMain() - if err := m.Run(context.Background(), os.Args[1:]); err == flag.ErrHelp || err == errStop { + m := NewMain(os.Stdin, os.Stdout, os.Stderr) + if err := m.Run(context.Background(), os.Args[1:]); err == flag.ErrHelp || err == errExit { os.Exit(1) } else if err != nil { log.Println(err) @@ -48,22 +51,23 @@ func main() { } // Main represents the main program execution. -type Main struct{} +type Main struct { + stdin io.Reader + stdout io.Writer + stderr io.Writer +} // NewMain returns a new instance of Main. -func NewMain() *Main { - return &Main{} +func NewMain(stdin io.Reader, stdout, stderr io.Writer) *Main { + return &Main{ + stdin: stdin, + stdout: stdout, + stderr: stderr, + } } // Run executes the program. func (m *Main) Run(ctx context.Context, args []string) (err error) { - // Execute replication command if running as a Windows service. - if isService, err := isWindowsService(); err != nil { - return err - } else if isService { - return runWindowsService(ctx) - } - // Copy "LITESTEAM" environment credentials. applyLitestreamEnv() @@ -75,18 +79,20 @@ func (m *Main) Run(ctx context.Context, args []string) (err error) { switch cmd { case "databases": - return (&DatabasesCommand{}).Run(ctx, args) + return NewDatabasesCommand(m.stdin, m.stdout, m.stderr).Run(ctx, args) case "generations": - return (&GenerationsCommand{}).Run(ctx, args) + return NewGenerationsCommand(m.stdin, m.stdout, m.stderr).Run(ctx, args) case "replicate": - c := NewReplicateCommand() + c := NewReplicateCommand(m.stdin, m.stdout, m.stderr) if err := c.ParseFlags(ctx, args); err != nil { return err } // Setup signal handler. ctx, cancel := context.WithCancel(ctx) - signalCh := signalChan() + defer cancel() + signalCh := make(chan os.Signal, 1) + signal.Notify(signalCh, syscall.SIGINT, syscall.SIGTERM) if err := c.Run(ctx); err != nil { return err @@ -94,20 +100,22 @@ func (m *Main) Run(ctx context.Context, args []string) (err error) { // Wait for signal to stop program. select { + case <-ctx.Done(): + fmt.Fprintln(m.stdout, "context done, litestream shutting down") case err = <-c.execCh: cancel() - fmt.Println("subprocess exited, litestream shutting down") + fmt.Fprintln(m.stdout, "subprocess exited, litestream shutting down") case sig := <-signalCh: cancel() - fmt.Println("signal received, litestream shutting down") + fmt.Fprintln(m.stdout, "signal received, litestream shutting down") if c.cmd != nil { - fmt.Println("sending signal to exec process") + fmt.Fprintln(m.stdout, "sending signal to exec process") if err := c.cmd.Process.Signal(sig); err != nil { return fmt.Errorf("cannot signal exec process: %w", err) } - fmt.Println("waiting for exec process to close") + fmt.Fprintln(m.stdout, "waiting for exec process to close") if err := <-c.execCh; err != nil && !strings.HasPrefix(err.Error(), "signal:") { return fmt.Errorf("cannot wait for exec process: %w", err) } @@ -118,17 +126,17 @@ func (m *Main) Run(ctx context.Context, args []string) (err error) { if e := c.Close(); e != nil && err == nil { err = e } - fmt.Println("litestream shut down") + fmt.Fprintln(m.stdout, "litestream shut down") return err case "restore": - return (&RestoreCommand{}).Run(ctx, args) + return NewRestoreCommand(m.stdin, m.stdout, m.stderr).Run(ctx, args) case "snapshots": - return (&SnapshotsCommand{}).Run(ctx, args) + return NewSnapshotsCommand(m.stdin, m.stdout, m.stderr).Run(ctx, args) case "version": - return (&VersionCommand{}).Run(ctx, args) + return NewVersionCommand(m.stdin, m.stdout, m.stderr).Run(ctx, args) case "wal": - return (&WALCommand{}).Run(ctx, args) + return NewWALCommand(m.stdin, m.stdout, m.stderr).Run(ctx, args) default: if cmd == "" || cmd == "help" || strings.HasPrefix(cmd, "-") { m.Usage() @@ -140,7 +148,7 @@ func (m *Main) Run(ctx context.Context, args []string) (err error) { // Usage prints the help screen to STDOUT. func (m *Main) Usage() { - fmt.Println(` + fmt.Fprintln(m.stdout, ` litestream is a tool for replicating SQLite databases. Usage: @@ -207,7 +215,34 @@ func (c *Config) DBConfig(path string) *DBConfig { // ReadConfigFile unmarshals config from filename. Expands path if needed. // If expandEnv is true then environment variables are expanded in the config. -func ReadConfigFile(filename string, expandEnv bool) (_ Config, err error) { +// If filename is blank then the default config path is used. +func ReadConfigFile(filename string, expandEnv bool) (config Config, err error) { + var filenames []string + if filename != "" { + filenames = append(filenames, filename) + } + filenames = append(filenames, "./litestream.yml") + filenames = append(filenames, DefaultConfigPath()) + + for _, name := range filenames { + isDefaultPath := name != filename + + if config, err = readConfigFile(name, expandEnv); os.IsNotExist(err) { + if isDefaultPath { + continue + } + return config, fmt.Errorf("config file not found: %s", filename) + } else if err != nil { + return config, err + } + break + } + return config, nil +} + +// readConfigFile unmarshals config from filename. Expands path if needed. +// If expandEnv is true then environment variables are expanded in the config. +func readConfigFile(filename string, expandEnv bool) (_ Config, err error) { config := DefaultConfig() // Expand filename, if necessary. @@ -217,10 +252,9 @@ func ReadConfigFile(filename string, expandEnv bool) (_ Config, err error) { } // Read configuration. + // Do not return an error if using default path and file is missing. buf, err := ioutil.ReadFile(filename) - if os.IsNotExist(err) { - return config, fmt.Errorf("config file not found: %s", filename) - } else if err != nil { + if err != nil { return config, err } @@ -248,11 +282,12 @@ func ReadConfigFile(filename string, expandEnv bool) (_ Config, err error) { // DBConfig represents the configuration for a single database. type DBConfig struct { - Path string `yaml:"path"` - MonitorInterval *time.Duration `yaml:"monitor-interval"` - CheckpointInterval *time.Duration `yaml:"checkpoint-interval"` - MinCheckpointPageN *int `yaml:"min-checkpoint-page-count"` - MaxCheckpointPageN *int `yaml:"max-checkpoint-page-count"` + Path string `yaml:"path"` + MonitorDelayInterval *time.Duration `yaml:"monitor-delay-interval"` + CheckpointInterval *time.Duration `yaml:"checkpoint-interval"` + MinCheckpointPageN *int `yaml:"min-checkpoint-page-count"` + MaxCheckpointPageN *int `yaml:"max-checkpoint-page-count"` + ShadowRetentionN *int `yaml:"shadow-retention-count"` Replicas []*ReplicaConfig `yaml:"replicas"` } @@ -263,13 +298,17 @@ func NewDBFromConfig(dbc *DBConfig) (*litestream.DB, error) { if err != nil { return nil, err } + return NewDBFromConfigWithPath(dbc, path) +} +// NewDBFromConfigWithPath instantiates a DB based on a configuration and using a given path. +func NewDBFromConfigWithPath(dbc *DBConfig, path string) (*litestream.DB, error) { // Initialize database with given path. db := litestream.NewDB(path) // Override default database settings if specified in configuration. - if dbc.MonitorInterval != nil { - db.MonitorInterval = *dbc.MonitorInterval + if dbc.MonitorDelayInterval != nil { + db.MonitorDelayInterval = *dbc.MonitorDelayInterval } if dbc.CheckpointInterval != nil { db.CheckpointInterval = *dbc.CheckpointInterval @@ -280,6 +319,9 @@ func NewDBFromConfig(dbc *DBConfig) (*litestream.DB, error) { if dbc.MaxCheckpointPageN != nil { db.MaxCheckpointPageN = *dbc.MaxCheckpointPageN } + if dbc.ShadowRetentionN != nil { + db.ShadowRetentionN = *dbc.ShadowRetentionN + } // Instantiate and attach replicas. for _, rc := range dbc.Replicas { @@ -332,55 +374,56 @@ func NewReplicaFromConfig(c *ReplicaConfig, db *litestream.DB) (_ *litestream.Re return nil, fmt.Errorf("replica path cannot be a url, please use the 'url' field instead: %s", c.Path) } - // Build replica. - r := litestream.NewReplica(db, c.Name) - if v := c.Retention; v != nil { - r.Retention = *v - } - if v := c.RetentionCheckInterval; v != nil { - r.RetentionCheckInterval = *v - } - if v := c.SyncInterval; v != nil { - r.SyncInterval = *v - } - if v := c.SnapshotInterval; v != nil { - r.SnapshotInterval = *v - } - if v := c.ValidationInterval; v != nil { - r.ValidationInterval = *v - } - // Build and set client on replica. - switch c.ReplicaType() { + var client litestream.ReplicaClient + switch typ := c.ReplicaType(); typ { case "file": - if r.Client, err = newFileReplicaClientFromConfig(c, r); err != nil { + if client, err = newFileReplicaClientFromConfig(c); err != nil { return nil, err } case "s3": - if r.Client, err = newS3ReplicaClientFromConfig(c, r); err != nil { + if client, err = newS3ReplicaClientFromConfig(c); err != nil { return nil, err } - case "gcs": - if r.Client, err = newGCSReplicaClientFromConfig(c, r); err != nil { + case "gs": + if client, err = newGSReplicaClientFromConfig(c); err != nil { return nil, err } case "abs": - if r.Client, err = newABSReplicaClientFromConfig(c, r); err != nil { + if client, err = newABSReplicaClientFromConfig(c); err != nil { return nil, err } case "sftp": - if r.Client, err = newSFTPReplicaClientFromConfig(c, r); err != nil { + if client, err = newSFTPReplicaClientFromConfig(c); err != nil { return nil, err } default: - return nil, fmt.Errorf("unknown replica type in config: %q", c.Type) + return nil, fmt.Errorf("unknown replica type in config: %q", typ) + } + + // Build replica. + r := litestream.NewReplica(db, c.Name, client) + if v := c.Retention; v != nil { + r.Retention = *v + } + if v := c.RetentionCheckInterval; v != nil { + r.RetentionCheckInterval = *v + } + if v := c.SyncInterval; v != nil { + r.SyncInterval = *v + } + if v := c.SnapshotInterval; v != nil { + r.SnapshotInterval = *v + } + if v := c.ValidationInterval; v != nil { + r.ValidationInterval = *v } return r, nil } -// newFileReplicaClientFromConfig returns a new instance of file.ReplicaClient built from config. -func newFileReplicaClientFromConfig(c *ReplicaConfig, r *litestream.Replica) (_ *file.ReplicaClient, err error) { +// newFileReplicaClientFromConfig returns a new instance of FileReplicaClient built from config. +func newFileReplicaClientFromConfig(c *ReplicaConfig) (_ *litestream.FileReplicaClient, err error) { // Ensure URL & path are not both specified. if c.URL != "" && c.Path != "" { return nil, fmt.Errorf("cannot specify url & path for file replica") @@ -405,13 +448,11 @@ func newFileReplicaClientFromConfig(c *ReplicaConfig, r *litestream.Replica) (_ } // Instantiate replica and apply time fields, if set. - client := file.NewReplicaClient(path) - client.Replica = r - return client, nil + return litestream.NewFileReplicaClient(path), nil } // newS3ReplicaClientFromConfig returns a new instance of s3.ReplicaClient built from config. -func newS3ReplicaClientFromConfig(c *ReplicaConfig, r *litestream.Replica) (_ *s3.ReplicaClient, err error) { +func newS3ReplicaClientFromConfig(c *ReplicaConfig) (_ *s3.ReplicaClient, err error) { // Ensure URL & constituent parts are not both specified. if c.URL != "" && c.Path != "" { return nil, fmt.Errorf("cannot specify url & path for s3 replica") @@ -473,13 +514,13 @@ func newS3ReplicaClientFromConfig(c *ReplicaConfig, r *litestream.Replica) (_ *s return client, nil } -// newGCSReplicaClientFromConfig returns a new instance of gcs.ReplicaClient built from config. -func newGCSReplicaClientFromConfig(c *ReplicaConfig, r *litestream.Replica) (_ *gcs.ReplicaClient, err error) { +// newGSReplicaClientFromConfig returns a new instance of gs.ReplicaClient built from config. +func newGSReplicaClientFromConfig(c *ReplicaConfig) (_ *gs.ReplicaClient, err error) { // Ensure URL & constituent parts are not both specified. if c.URL != "" && c.Path != "" { - return nil, fmt.Errorf("cannot specify url & path for gcs replica") + return nil, fmt.Errorf("cannot specify url & path for gs replica") } else if c.URL != "" && c.Bucket != "" { - return nil, fmt.Errorf("cannot specify url & bucket for gcs replica") + return nil, fmt.Errorf("cannot specify url & bucket for gs replica") } bucket, path := c.Bucket, c.Path @@ -502,18 +543,18 @@ func newGCSReplicaClientFromConfig(c *ReplicaConfig, r *litestream.Replica) (_ * // Ensure required settings are set. if bucket == "" { - return nil, fmt.Errorf("bucket required for gcs replica") + return nil, fmt.Errorf("bucket required for gs replica") } // Build replica. - client := gcs.NewReplicaClient() + client := gs.NewReplicaClient() client.Bucket = bucket client.Path = path return client, nil } // newABSReplicaClientFromConfig returns a new instance of abs.ReplicaClient built from config. -func newABSReplicaClientFromConfig(c *ReplicaConfig, r *litestream.Replica) (_ *abs.ReplicaClient, err error) { +func newABSReplicaClientFromConfig(c *ReplicaConfig) (_ *abs.ReplicaClient, err error) { // Ensure URL & constituent parts are not both specified. if c.URL != "" && c.Path != "" { return nil, fmt.Errorf("cannot specify url & path for abs replica") @@ -556,7 +597,7 @@ func newABSReplicaClientFromConfig(c *ReplicaConfig, r *litestream.Replica) (_ * } // newSFTPReplicaClientFromConfig returns a new instance of sftp.ReplicaClient built from config. -func newSFTPReplicaClientFromConfig(c *ReplicaConfig, r *litestream.Replica) (_ *sftp.ReplicaClient, err error) { +func newSFTPReplicaClientFromConfig(c *ReplicaConfig) (_ *sftp.ReplicaClient, err error) { // Ensure URL & constituent parts are not both specified. if c.URL != "" && c.Path != "" { return nil, fmt.Errorf("cannot specify url & path for sftp replica") @@ -662,12 +703,12 @@ func DefaultConfigPath() string { if v := os.Getenv("LITESTREAM_CONFIG"); v != "" { return v } - return defaultConfigPath + return "/etc/litestream.yml" } -func registerConfigFlag(fs *flag.FlagSet) (configPath *string, noExpandEnv *bool) { - return fs.String("config", "", "config path"), - fs.Bool("no-expand-env", false, "do not expand env vars in config") +func registerConfigFlag(fs *flag.FlagSet, configPath *string, noExpandEnv *bool) { + fs.StringVar(configPath, "config", "", "config path") + fs.BoolVar(noExpandEnv, "no-expand-env", false, "do not expand env vars in config") } // expand returns an absolute path for s. @@ -701,7 +742,7 @@ var _ flag.Value = (*indexVar)(nil) // String returns an 8-character hexadecimal value. func (v *indexVar) String() string { - return fmt.Sprintf("%08x", int(*v)) + return litestream.FormatIndex(int(*v)) } // Set parses s into an integer from a hexadecimal value. @@ -713,3 +754,45 @@ func (v *indexVar) Set(s string) error { *v = indexVar(i) return nil } + +// loadReplicas returns a list of replicas to use based on CLI flags. Filters +// by replicaName, if not blank. The DB is returned if pathOrURL is not a replica URL. +func loadReplicas(ctx context.Context, config Config, pathOrURL, replicaName string) ([]*litestream.Replica, *litestream.DB, error) { + // Build a replica based on URL, if specified. + if isURL(pathOrURL) { + r, err := NewReplicaFromConfig(&ReplicaConfig{ + URL: pathOrURL, + AccessKeyID: config.AccessKeyID, + SecretAccessKey: config.SecretAccessKey, + }, nil) + if err != nil { + return nil, nil, err + } + return []*litestream.Replica{r}, nil, nil + } + + // Otherwise use replicas from the database configuration file. + path, err := expand(pathOrURL) + if err != nil { + return nil, nil, err + } + dbc := config.DBConfig(path) + if dbc == nil { + return nil, nil, fmt.Errorf("database not found in config: %s", path) + } + db, err := NewDBFromConfig(dbc) + if err != nil { + return nil, nil, err + } + + // Filter by replica, if specified. + if replicaName != "" { + r := db.Replica(replicaName) + if r == nil { + return nil, nil, fmt.Errorf("replica %q not found for database %q", replicaName, db.Path()) + } + return []*litestream.Replica{r}, db, nil + } + + return db.Replicas, db, nil +} diff --git a/cmd/litestream/main_notwindows.go b/cmd/litestream/main_notwindows.go deleted file mode 100644 index aaf87a10..00000000 --- a/cmd/litestream/main_notwindows.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build !windows - -package main - -import ( - "context" - "os" - "os/signal" - "syscall" -) - -const defaultConfigPath = "/etc/litestream.yml" - -func isWindowsService() (bool, error) { - return false, nil -} - -func runWindowsService(ctx context.Context) error { - panic("cannot run windows service as unix process") -} - -func signalChan() <-chan os.Signal { - ch := make(chan os.Signal, 2) - signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM) - return ch -} diff --git a/cmd/litestream/main_test.go b/cmd/litestream/main_test.go index d99c52df..f37ff467 100644 --- a/cmd/litestream/main_test.go +++ b/cmd/litestream/main_test.go @@ -1,17 +1,24 @@ package main_test import ( + "bytes" + "io" "io/ioutil" + "log" "os" "path/filepath" "testing" + "github.com/benbjohnson/litestream" main "github.com/benbjohnson/litestream/cmd/litestream" - "github.com/benbjohnson/litestream/file" - "github.com/benbjohnson/litestream/gcs" + "github.com/benbjohnson/litestream/gs" "github.com/benbjohnson/litestream/s3" ) +func init() { + litestream.LogFlags = log.Lmsgprefix | log.Ldate | log.Ltime | log.Lmicroseconds | log.LUTC | log.Lshortfile +} + func TestReadConfigFile(t *testing.T) { // Ensure global AWS settings are propagated down to replica configurations. t.Run("PropagateGlobalSettings", func(t *testing.T) { @@ -97,7 +104,7 @@ func TestNewFileReplicaFromConfig(t *testing.T) { r, err := main.NewReplicaFromConfig(&main.ReplicaConfig{Path: "/foo"}, nil) if err != nil { t.Fatal(err) - } else if client, ok := r.Client.(*file.ReplicaClient); !ok { + } else if client, ok := r.Client().(*litestream.FileReplicaClient); !ok { t.Fatal("unexpected replica type") } else if got, want := client.Path(), "/foo"; got != want { t.Fatalf("Path=%s, want %s", got, want) @@ -109,7 +116,7 @@ func TestNewS3ReplicaFromConfig(t *testing.T) { r, err := main.NewReplicaFromConfig(&main.ReplicaConfig{URL: "s3://foo/bar"}, nil) if err != nil { t.Fatal(err) - } else if client, ok := r.Client.(*s3.ReplicaClient); !ok { + } else if client, ok := r.Client().(*s3.ReplicaClient); !ok { t.Fatal("unexpected replica type") } else if got, want := client.Bucket, "foo"; got != want { t.Fatalf("Bucket=%s, want %s", got, want) @@ -128,7 +135,7 @@ func TestNewS3ReplicaFromConfig(t *testing.T) { r, err := main.NewReplicaFromConfig(&main.ReplicaConfig{URL: "s3://foo.localhost:9000/bar"}, nil) if err != nil { t.Fatal(err) - } else if client, ok := r.Client.(*s3.ReplicaClient); !ok { + } else if client, ok := r.Client().(*s3.ReplicaClient); !ok { t.Fatal("unexpected replica type") } else if got, want := client.Bucket, "foo"; got != want { t.Fatalf("Bucket=%s, want %s", got, want) @@ -147,7 +154,7 @@ func TestNewS3ReplicaFromConfig(t *testing.T) { r, err := main.NewReplicaFromConfig(&main.ReplicaConfig{URL: "s3://foo.s3.us-west-000.backblazeb2.com/bar"}, nil) if err != nil { t.Fatal(err) - } else if client, ok := r.Client.(*s3.ReplicaClient); !ok { + } else if client, ok := r.Client().(*s3.ReplicaClient); !ok { t.Fatal("unexpected replica type") } else if got, want := client.Bucket, "foo"; got != want { t.Fatalf("Bucket=%s, want %s", got, want) @@ -163,11 +170,11 @@ func TestNewS3ReplicaFromConfig(t *testing.T) { }) } -func TestNewGCSReplicaFromConfig(t *testing.T) { - r, err := main.NewReplicaFromConfig(&main.ReplicaConfig{URL: "gcs://foo/bar"}, nil) +func TestNewGSReplicaFromConfig(t *testing.T) { + r, err := main.NewReplicaFromConfig(&main.ReplicaConfig{URL: "gs://foo/bar"}, nil) if err != nil { t.Fatal(err) - } else if client, ok := r.Client.(*gcs.ReplicaClient); !ok { + } else if client, ok := r.Client().(*gs.ReplicaClient); !ok { t.Fatal("unexpected replica type") } else if got, want := client.Bucket, "foo"; got != want { t.Fatalf("Bucket=%s, want %s", got, want) @@ -175,3 +182,17 @@ func TestNewGCSReplicaFromConfig(t *testing.T) { t.Fatalf("Path=%s, want %s", got, want) } } + +// newMain returns a new instance of Main and associated buffers. +func newMain() (m *main.Main, stdin, stdout, stderr *bytes.Buffer) { + stdin, stdout, stderr = &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{} + + // Split stdout/stderr to terminal if verbose flag set. + out, err := io.Writer(stdout), io.Writer(stderr) + if testing.Verbose() { + out = io.MultiWriter(out, os.Stdout) + err = io.MultiWriter(err, os.Stderr) + } + + return main.NewMain(stdin, out, err), stdin, stdout, stderr +} diff --git a/cmd/litestream/main_windows.go b/cmd/litestream/main_windows.go deleted file mode 100644 index a762d322..00000000 --- a/cmd/litestream/main_windows.go +++ /dev/null @@ -1,112 +0,0 @@ -// +build windows - -package main - -import ( - "context" - "io" - "log" - "os" - "os/signal" - - "golang.org/x/sys/windows" - "golang.org/x/sys/windows/svc" - "golang.org/x/sys/windows/svc/eventlog" -) - -const defaultConfigPath = `C:\Litestream\litestream.yml` - -// serviceName is the Windows Service name. -const serviceName = "Litestream" - -// isWindowsService returns true if currently executing within a Windows service. -func isWindowsService() (bool, error) { - return svc.IsWindowsService() -} - -func runWindowsService(ctx context.Context) error { - // Attempt to install new log service. This will fail if already installed. - // We don't log the error because we don't have anywhere to log until we open the log. - _ = eventlog.InstallAsEventCreate(serviceName, eventlog.Error|eventlog.Warning|eventlog.Info) - - elog, err := eventlog.Open(serviceName) - if err != nil { - return err - } - defer elog.Close() - - // Set eventlog as log writer while running. - log.SetOutput((*eventlogWriter)(elog)) - defer log.SetOutput(os.Stderr) - - log.Print("Litestream service starting") - - if err := svc.Run(serviceName, &windowsService{ctx: ctx}); err != nil { - return errStop - } - - log.Print("Litestream service stopped") - return nil -} - -// windowsService is an interface adapter for svc.Handler. -type windowsService struct { - ctx context.Context -} - -func (s *windowsService) Execute(args []string, r <-chan svc.ChangeRequest, statusCh chan<- svc.Status) (svcSpecificEC bool, exitCode uint32) { - var err error - - // Notify Windows that the service is starting up. - statusCh <- svc.Status{State: svc.StartPending} - - // Instantiate replication command and load configuration. - c := NewReplicateCommand() - if c.Config, err = ReadConfigFile(DefaultConfigPath(), true); err != nil { - log.Printf("cannot load configuration: %s", err) - return true, 1 - } - - // Execute replication command. - if err := c.Run(s.ctx); err != nil { - log.Printf("cannot replicate: %s", err) - statusCh <- svc.Status{State: svc.StopPending} - return true, 2 - } - - // Notify Windows that the service is now running. - statusCh <- svc.Status{State: svc.Running, Accepts: svc.AcceptStop} - - for { - select { - case req := <-r: - switch req.Cmd { - case svc.Stop: - c.Close() - statusCh <- svc.Status{State: svc.StopPending} - return false, windows.NO_ERROR - case svc.Interrogate: - statusCh <- req.CurrentStatus - default: - log.Printf("Litestream service received unexpected change request cmd: %d", req.Cmd) - } - } - } -} - -// Ensure implementation implements io.Writer interface. -var _ io.Writer = (*eventlogWriter)(nil) - -// eventlogWriter is an adapter for using eventlog.Log as an io.Writer. -type eventlogWriter eventlog.Log - -func (w *eventlogWriter) Write(p []byte) (n int, err error) { - elog := (*eventlog.Log)(w) - return 0, elog.Info(1, string(p)) -} - -func signalChan() <-chan os.Signal { - ch := make(chan os.Signal, 1) - signal.Notify(ch, os.Interrupt) - return ch -} diff --git a/cmd/litestream/replicate.go b/cmd/litestream/replicate.go index 7c0403bf..9a8650b4 100644 --- a/cmd/litestream/replicate.go +++ b/cmd/litestream/replicate.go @@ -4,36 +4,45 @@ import ( "context" "flag" "fmt" + "io" "log" - "net" - "net/http" - _ "net/http/pprof" "os" "os/exec" "github.com/benbjohnson/litestream" "github.com/benbjohnson/litestream/abs" - "github.com/benbjohnson/litestream/file" - "github.com/benbjohnson/litestream/gcs" + "github.com/benbjohnson/litestream/gs" + "github.com/benbjohnson/litestream/http" "github.com/benbjohnson/litestream/s3" "github.com/benbjohnson/litestream/sftp" "github.com/mattn/go-shellwords" - "github.com/prometheus/client_golang/prometheus/promhttp" ) // ReplicateCommand represents a command that continuously replicates SQLite databases. type ReplicateCommand struct { + stdin io.Reader + stdout io.Writer + stderr io.Writer + + configPath string + noExpandEnv bool + cmd *exec.Cmd // subcommand execCh chan error // subcommand error channel Config Config - // List of managed databases specified in the config. - DBs []*litestream.DB + server *litestream.Server + httpServer *http.Server } -func NewReplicateCommand() *ReplicateCommand { +// NewReplicateCommand returns a new instance of ReplicateCommand. +func NewReplicateCommand(stdin io.Reader, stdout, stderr io.Writer) *ReplicateCommand { return &ReplicateCommand{ + stdin: stdin, + stdout: stdout, + stderr: stderr, + execCh: make(chan error), } } @@ -42,8 +51,8 @@ func NewReplicateCommand() *ReplicateCommand { func (c *ReplicateCommand) ParseFlags(ctx context.Context, args []string) (err error) { fs := flag.NewFlagSet("litestream-replicate", flag.ContinueOnError) execFlag := fs.String("exec", "", "execute subcommand") - tracePath := fs.String("trace", "", "trace path") - configPath, noExpandEnv := registerConfigFlag(fs) + addr := fs.String("addr", "", "HTTP bind address (host:port)") + registerConfigFlag(fs, &c.configPath, &c.noExpandEnv) fs.Usage = c.Usage if err := fs.Parse(args); err != nil { return err @@ -53,7 +62,7 @@ func (c *ReplicateCommand) ParseFlags(ctx context.Context, args []string) (err e if fs.NArg() == 1 { return fmt.Errorf("must specify at least one replica URL for %s", fs.Arg(0)) } else if fs.NArg() > 1 { - if *configPath != "" { + if c.configPath != "" { return fmt.Errorf("cannot specify a replica URL and the -config flag") } @@ -67,29 +76,22 @@ func (c *ReplicateCommand) ParseFlags(ctx context.Context, args []string) (err e } c.Config.DBs = []*DBConfig{dbConfig} } else { - if *configPath == "" { - *configPath = DefaultConfigPath() + if c.configPath == "" { + c.configPath = DefaultConfigPath() } - if c.Config, err = ReadConfigFile(*configPath, !*noExpandEnv); err != nil { + if c.Config, err = ReadConfigFile(c.configPath, !c.noExpandEnv); err != nil { return err } } - // Override config exec command, if specified. + // Override config with flags, if specified. + if *addr != "" { + c.Config.Addr = *addr + } if *execFlag != "" { c.Config.Exec = *execFlag } - // Enable trace logging. - if *tracePath != "" { - f, err := os.Create(*tracePath) - if err != nil { - return err - } - defer f.Close() - litestream.Tracef = log.New(f, "", log.LstdFlags|log.Lmicroseconds|log.LUTC|log.Lshortfile).Printf - } - return nil } @@ -103,29 +105,35 @@ func (c *ReplicateCommand) Run(ctx context.Context) (err error) { log.Println("no databases specified in configuration") } + c.server = litestream.NewServer() + if err := c.server.Open(); err != nil { + return fmt.Errorf("open server: %w", err) + } + + // Add databases to the server. for _, dbConfig := range c.Config.DBs { - db, err := NewDBFromConfig(dbConfig) + path, err := expand(dbConfig.Path) if err != nil { return err } - // Open database & attach to program. - if err := db.Open(); err != nil { + if err := c.server.Watch(path, func(path string) (*litestream.DB, error) { + return NewDBFromConfigWithPath(dbConfig, path) + }); err != nil { return err } - c.DBs = append(c.DBs, db) } // Notify user that initialization is done. - for _, db := range c.DBs { + for _, db := range c.server.DBs() { log.Printf("initialized db: %s", db.Path()) for _, r := range db.Replicas { - switch client := r.Client.(type) { - case *file.ReplicaClient: + switch client := r.Client().(type) { + case *litestream.FileReplicaClient: log.Printf("replicating to: name=%q type=%q path=%q", r.Name(), client.Type(), client.Path()) case *s3.ReplicaClient: log.Printf("replicating to: name=%q type=%q bucket=%q path=%q region=%q endpoint=%q sync-interval=%s", r.Name(), client.Type(), client.Bucket, client.Path, client.Region, client.Endpoint, r.SyncInterval) - case *gcs.ReplicaClient: + case *gs.ReplicaClient: log.Printf("replicating to: name=%q type=%q bucket=%q path=%q sync-interval=%s", r.Name(), client.Type(), client.Bucket, client.Path, r.SyncInterval) case *abs.ReplicaClient: log.Printf("replicating to: name=%q type=%q bucket=%q path=%q endpoint=%q sync-interval=%s", r.Name(), client.Type(), client.Bucket, client.Path, client.Endpoint, r.SyncInterval) @@ -137,22 +145,13 @@ func (c *ReplicateCommand) Run(ctx context.Context) (err error) { } } - // Serve metrics over HTTP if enabled. + // Serve HTTP if enabled. if c.Config.Addr != "" { - hostport := c.Config.Addr - if host, port, _ := net.SplitHostPort(c.Config.Addr); port == "" { - return fmt.Errorf("must specify port for bind address: %q", c.Config.Addr) - } else if host == "" { - hostport = net.JoinHostPort("localhost", port) + c.httpServer = http.NewServer(c.server, c.Config.Addr) + if err := c.httpServer.Open(); err != nil { + return fmt.Errorf("cannot start http server: %w", err) } - - log.Printf("serving metrics on http://%s/metrics", hostport) - go func() { - http.Handle("/metrics", promhttp.Handler()) - if err := http.ListenAndServe(c.Config.Addr, nil); err != nil { - log.Printf("cannot start metrics server: %s", err) - } - }() + log.Printf("http server running at %s", c.httpServer.URL()) } // Parse exec commands args & start subprocess. @@ -162,8 +161,14 @@ func (c *ReplicateCommand) Run(ctx context.Context) (err error) { return fmt.Errorf("cannot parse exec command: %w", err) } + // Pass first database path to child process. + env := os.Environ() + if dbs := c.server.DBs(); len(dbs) > 0 { + env = append(env, fmt.Sprintf("LITESTREAM_DB_PATH=%s", dbs[0].Path())) + } + c.cmd = exec.CommandContext(ctx, execArgs[0], execArgs[1:]...) - c.cmd.Env = os.Environ() + c.cmd.Env = env c.cmd.Stdout = os.Stdout c.cmd.Stderr = os.Stderr if err := c.cmd.Start(); err != nil { @@ -172,17 +177,21 @@ func (c *ReplicateCommand) Run(ctx context.Context) (err error) { go func() { c.execCh <- c.cmd.Wait() }() } + log.Printf("litestream initialization complete") + return nil } -// Close closes all open databases. +// Close closes the HTTP server & all open databases. func (c *ReplicateCommand) Close() (err error) { - for _, db := range c.DBs { - if e := db.SoftClose(); e != nil { - log.Printf("error closing db: path=%s err=%s", db.Path(), e) - if err == nil { - err = e - } + if c.httpServer != nil { + if e := c.httpServer.Close(); e != nil && err == nil { + err = e + } + } + if c.server != nil { + if e := c.server.Close(); e != nil && err == nil { + err = e } } return err @@ -190,7 +199,7 @@ func (c *ReplicateCommand) Close() (err error) { // Usage prints the help screen to STDOUT. func (c *ReplicateCommand) Usage() { - fmt.Printf(` + fmt.Fprintf(c.stdout, ` The replicate command starts a server to monitor & replicate databases. You can specify your database & replicas in a configuration file or you can replicate a single database file by specifying its path and its replicas in the @@ -212,11 +221,12 @@ Arguments: Executes a subcommand. Litestream will exit when the child process exits. Useful for simple process management. + -addr BIND_ADDR + Starts an HTTP server that reports prometheus metrics and provides + an endpoint for live read replication. (e.g. ":9090") + -no-expand-env Disables environment variable expansion in configuration file. - -trace PATH - Write verbose trace logging to PATH. - `[1:], DefaultConfigPath()) } diff --git a/cmd/litestream/replicate_test.go b/cmd/litestream/replicate_test.go new file mode 100644 index 00000000..7d85b04e --- /dev/null +++ b/cmd/litestream/replicate_test.go @@ -0,0 +1,136 @@ +package main_test + +import ( + "context" + "database/sql" + "errors" + "fmt" + "hash/crc64" + "io" + "os" + "path/filepath" + "runtime" + "testing" + "time" + + "golang.org/x/sync/errgroup" +) + +func TestReplicateCommand(t *testing.T) { + if testing.Short() { + t.Skip("long running test, skipping") + } else if runtime.GOOS != "linux" { + t.Skip("must run system tests on Linux, skipping") + } + + const writeTime = 10 * time.Second + + dir := t.TempDir() + configPath := filepath.Join(dir, "litestream.yml") + dbPath := filepath.Join(dir, "db") + restorePath := filepath.Join(dir, "restored") + replicaPath := filepath.Join(dir, "replica") + + if err := os.WriteFile(configPath, []byte(` +dbs: + - path: `+dbPath+` + replicas: + - path: `+replicaPath+` +`), 0666); err != nil { + t.Fatal(err) + } + + // Generate data into SQLite database from separate goroutine. + g, ctx := errgroup.WithContext(context.Background()) + mainctx, cancel := context.WithCancel(ctx) + g.Go(func() error { + defer cancel() + + db, err := sql.Open("sqlite3", dbPath) + if err != nil { + return err + } + defer db.Close() + + if _, err := db.ExecContext(ctx, `PRAGMA journal_mode = WAL`); err != nil { + return fmt.Errorf("cannot enable wal: %w", err) + } else if _, err := db.ExecContext(ctx, `PRAGMA synchronous = NORMAL`); err != nil { + return fmt.Errorf("cannot enable wal: %w", err) + } else if _, err := db.ExecContext(ctx, `CREATE TABLE t (id INTEGER PRIMARY KEY)`); err != nil { + return fmt.Errorf("cannot create table: %w", err) + } + + ticker := time.NewTicker(1 * time.Millisecond) + defer ticker.Stop() + timer := time.NewTimer(writeTime) + defer timer.Stop() + + for i := 0; ; i++ { + select { + case <-ctx.Done(): + return ctx.Err() + case <-timer.C: + return nil + case <-ticker.C: + if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (?);`, i); err != nil { + return fmt.Errorf("cannot insert: i=%d err=%w", i, err) + } + } + } + }) + + // Replicate database unless the context is canceled. + g.Go(func() error { + m, _, _, _ := newMain() + return m.Run(mainctx, []string{"replicate", "-config", configPath}) + }) + + if err := g.Wait(); err != nil { + t.Fatal(err) + } + + // Checkpoint database. + mustCheckpoint(t, dbPath) + chksum0 := mustChecksum(t, dbPath) + + // Restore to another path. + m, _, _, _ := newMain() + if err := m.Run(context.Background(), []string{"restore", "-config", configPath, "-o", restorePath, dbPath}); err != nil && !errors.Is(err, context.Canceled) { + t.Fatal(err) + } + + // Verify contents match. + if chksum1 := mustChecksum(t, restorePath); chksum0 != chksum1 { + t.Fatal("restore mismatch") + } +} + +func mustCheckpoint(tb testing.TB, path string) { + tb.Helper() + + db, err := sql.Open("sqlite3", path) + if err != nil { + tb.Fatal(err) + } + defer db.Close() + + if _, err := db.Exec(`PRAGMA wal_checkpoint(TRUNCATE)`); err != nil { + tb.Fatal(err) + } +} + +func mustChecksum(tb testing.TB, path string) uint64 { + tb.Helper() + + f, err := os.Open(path) + if err != nil { + tb.Fatal(err) + } + defer f.Close() + + h := crc64.New(crc64.MakeTable(crc64.ISO)) + if _, err := io.Copy(h, f); err != nil { + tb.Fatal(err) + } + return h.Sum64() +} diff --git a/cmd/litestream/restore.go b/cmd/litestream/restore.go index 28c20fc1..e4324812 100644 --- a/cmd/litestream/restore.go +++ b/cmd/litestream/restore.go @@ -2,11 +2,12 @@ package main import ( "context" - "errors" "flag" "fmt" + "io" "log" "os" + "path/filepath" "strconv" "time" @@ -14,24 +15,50 @@ import ( ) // RestoreCommand represents a command to restore a database from a backup. -type RestoreCommand struct{} +type RestoreCommand struct { + stdin io.Reader + stdout io.Writer + stderr io.Writer + + snapshotIndex int // index of snapshot to start from + + // CLI options + configPath string // path to config file + noExpandEnv bool // if true, do not expand env variables in config + outputPath string // path to restore database to + replicaName string // optional, name of replica to restore from + generation string // optional, generation to restore + targetIndex int // optional, last WAL index to replay + timestamp time.Time // optional, restore to point-in-time (ISO 8601) + ifDBNotExists bool // if true, skips restore if output path already exists + ifReplicaExists bool // if true, skips if no backups exist + opt litestream.RestoreOptions +} + +// NewRestoreCommand returns a new instance of RestoreCommand. +func NewRestoreCommand(stdin io.Reader, stdout, stderr io.Writer) *RestoreCommand { + return &RestoreCommand{ + stdin: stdin, + stdout: stdout, + stderr: stderr, + + targetIndex: -1, + opt: litestream.NewRestoreOptions(), + } +} // Run executes the command. func (c *RestoreCommand) Run(ctx context.Context, args []string) (err error) { - opt := litestream.NewRestoreOptions() - opt.Verbose = true - fs := flag.NewFlagSet("litestream-restore", flag.ContinueOnError) - configPath, noExpandEnv := registerConfigFlag(fs) - fs.StringVar(&opt.OutputPath, "o", "", "output path") - fs.StringVar(&opt.ReplicaName, "replica", "", "replica name") - fs.StringVar(&opt.Generation, "generation", "", "generation name") - fs.Var((*indexVar)(&opt.Index), "index", "wal index") - fs.IntVar(&opt.Parallelism, "parallelism", opt.Parallelism, "parallelism") - ifDBNotExists := fs.Bool("if-db-not-exists", false, "") - ifReplicaExists := fs.Bool("if-replica-exists", false, "") - timestampStr := fs.String("timestamp", "", "timestamp") - verbose := fs.Bool("v", false, "verbose output") + registerConfigFlag(fs, &c.configPath, &c.noExpandEnv) + fs.StringVar(&c.outputPath, "o", "", "output path") + fs.StringVar(&c.replicaName, "replica", "", "replica name") + fs.StringVar(&c.generation, "generation", "", "generation name") + fs.Var((*indexVar)(&c.targetIndex), "index", "wal index") + timestampStr := fs.String("timestamp", "", "point-in-time restore (ISO 8601)") + fs.IntVar(&c.opt.Parallelism, "parallelism", c.opt.Parallelism, "parallelism") + fs.BoolVar(&c.ifDBNotExists, "if-db-not-exists", false, "") + fs.BoolVar(&c.ifReplicaExists, "if-replica-exists", false, "") fs.Usage = c.Usage if err := fs.Parse(args); err != nil { return err @@ -40,87 +67,122 @@ func (c *RestoreCommand) Run(ctx context.Context, args []string) (err error) { } else if fs.NArg() > 1 { return fmt.Errorf("too many arguments") } + pathOrURL := fs.Arg(0) - // Parse timestamp, if specified. + // Parse timestamp. if *timestampStr != "" { - if opt.Timestamp, err = time.Parse(time.RFC3339, *timestampStr); err != nil { - return errors.New("invalid -timestamp, must specify in ISO 8601 format (e.g. 2000-01-01T00:00:00Z)") + if c.timestamp, err = time.Parse(time.RFC3339Nano, *timestampStr); err != nil { + return fmt.Errorf("invalid -timestamp, expected ISO 8601: %w", err) } } - // Instantiate logger if verbose output is enabled. - if *verbose { - opt.Logger = log.New(os.Stderr, "", log.LstdFlags|log.Lmicroseconds) + // Ensure a generation is specified if target index is specified. + if c.targetIndex != -1 && !c.timestamp.IsZero() { + return fmt.Errorf("cannot specify both -index flag and -timestamp flag") + } else if c.targetIndex != -1 && c.generation == "" { + return fmt.Errorf("must specify -generation flag when using -index flag") + } else if !c.timestamp.IsZero() && c.generation == "" { + return fmt.Errorf("must specify -generation flag when using -timestamp flag") } - // Determine replica & generation to restore from. - var r *litestream.Replica - if isURL(fs.Arg(0)) { - if *configPath != "" { - return fmt.Errorf("cannot specify a replica URL and the -config flag") - } - if r, err = c.loadFromURL(ctx, fs.Arg(0), *ifDBNotExists, &opt); err == errSkipDBExists { - fmt.Println("database already exists, skipping") + // Default to original database path if output path not specified. + if !isURL(pathOrURL) && c.outputPath == "" { + c.outputPath = pathOrURL + } + + // Exit successfully if the output file already exists and flag is set. + if _, err := os.Stat(c.outputPath); os.IsNotExist(err) { + // file doesn't exist, continue + } else if err != nil { + return err + } else if err == nil { + if c.ifDBNotExists { + fmt.Fprintln(c.stdout, "database already exists, skipping") return nil - } else if err != nil { - return err - } - } else { - if *configPath == "" { - *configPath = DefaultConfigPath() } - if r, err = c.loadFromConfig(ctx, fs.Arg(0), *configPath, !*noExpandEnv, *ifDBNotExists, &opt); err == errSkipDBExists { - fmt.Println("database already exists, skipping") - return nil + return fmt.Errorf("output file already exists: %s", c.outputPath) + } + + // Load configuration. + config, err := ReadConfigFile(c.configPath, !c.noExpandEnv) + if err != nil { + return err + } + + // Build replica from either a URL or config. + r, err := c.loadReplica(ctx, config, pathOrURL) + if err != nil { + return err + } + + // Determine latest generation if one is not specified. + if c.generation == "" { + if c.generation, err = litestream.FindLatestGeneration(ctx, r.Client()); err == litestream.ErrNoGeneration { + // Return an error if no matching targets found. + // If optional flag set, return success. Useful for automated recovery. + if c.ifReplicaExists { + fmt.Fprintln(c.stdout, "no matching backups found, skipping") + return nil + } + return fmt.Errorf("no matching backups found") } else if err != nil { - return err + return fmt.Errorf("cannot determine latest generation: %w", err) } } - // Return an error if no matching targets found. - // If optional flag set, return success. Useful for automated recovery. - if opt.Generation == "" { - if *ifReplicaExists { - fmt.Println("no matching backups found") - return nil + // Determine the maximum available index for the generation if one is not specified. + if !c.timestamp.IsZero() { + if c.targetIndex, err = litestream.FindIndexByTimestamp(ctx, r.Client(), c.generation, c.timestamp); err != nil { + return fmt.Errorf("cannot find index for timestamp in generation %q: %w", c.generation, err) } - return fmt.Errorf("no matching backups found") + } else if c.targetIndex == -1 { + if c.targetIndex, err = litestream.FindMaxIndexByGeneration(ctx, r.Client(), c.generation); err != nil { + return fmt.Errorf("cannot determine latest index in generation %q: %w", c.generation, err) + } + } + + // Find lastest snapshot that occurs before the index. + // TODO: Optionally allow -snapshot-index + if c.snapshotIndex, err = litestream.FindSnapshotForIndex(ctx, r.Client(), c.generation, c.targetIndex); err != nil { + return fmt.Errorf("cannot find snapshot index: %w", err) } - return r.Restore(ctx, opt) + // Create parent directory if it doesn't already exist. + if err := os.MkdirAll(filepath.Dir(c.outputPath), 0700); err != nil { + return fmt.Errorf("cannot create parent directory: %w", err) + } + + c.opt.Logger = log.New(c.stdout, "", log.LstdFlags|log.Lmicroseconds) + + return litestream.Restore(ctx, r.Client(), c.outputPath, c.generation, c.snapshotIndex, c.targetIndex, c.opt) } -// loadFromURL creates a replica & updates the restore options from a replica URL. -func (c *RestoreCommand) loadFromURL(ctx context.Context, replicaURL string, ifDBNotExists bool, opt *litestream.RestoreOptions) (*litestream.Replica, error) { - if opt.OutputPath == "" { - return nil, fmt.Errorf("output path required") +func (c *RestoreCommand) loadReplica(ctx context.Context, config Config, arg string) (*litestream.Replica, error) { + if isURL(arg) { + return c.loadReplicaFromURL(ctx, config, arg) } + return c.loadReplicaFromConfig(ctx, config, arg) +} - // Exit successfully if the output file already exists. - if _, err := os.Stat(opt.OutputPath); !os.IsNotExist(err) && ifDBNotExists { - return nil, errSkipDBExists +// loadReplicaFromURL creates a replica & updates the restore options from a replica URL. +func (c *RestoreCommand) loadReplicaFromURL(ctx context.Context, config Config, replicaURL string) (*litestream.Replica, error) { + if c.replicaName != "" { + return nil, fmt.Errorf("cannot specify both the replica URL and the -replica flag") + } else if c.outputPath == "" { + return nil, fmt.Errorf("output path required when using a replica URL") } syncInterval := litestream.DefaultSyncInterval - r, err := NewReplicaFromConfig(&ReplicaConfig{ - URL: replicaURL, - SyncInterval: &syncInterval, + return NewReplicaFromConfig(&ReplicaConfig{ + URL: replicaURL, + AccessKeyID: config.AccessKeyID, + SecretAccessKey: config.SecretAccessKey, + SyncInterval: &syncInterval, }, nil) - if err != nil { - return nil, err - } - opt.Generation, _, err = r.CalcRestoreTarget(ctx, *opt) - return r, err } -// loadFromConfig returns a replica & updates the restore options from a DB reference. -func (c *RestoreCommand) loadFromConfig(ctx context.Context, dbPath, configPath string, expandEnv, ifDBNotExists bool, opt *litestream.RestoreOptions) (*litestream.Replica, error) { - // Load configuration. - config, err := ReadConfigFile(configPath, expandEnv) - if err != nil { - return nil, err - } - +// loadReplicaFromConfig returns replicas based on the specific config path. +func (c *RestoreCommand) loadReplicaFromConfig(ctx context.Context, config Config, dbPath string) (_ *litestream.Replica, err error) { // Lookup database from configuration file by path. if dbPath, err = expand(dbPath); err != nil { return nil, err @@ -132,31 +194,40 @@ func (c *RestoreCommand) loadFromConfig(ctx context.Context, dbPath, configPath db, err := NewDBFromConfig(dbConfig) if err != nil { return nil, err + } else if len(db.Replicas) == 0 { + return nil, fmt.Errorf("database has no replicas: %s", dbPath) } - // Restore into original database path if not specified. - if opt.OutputPath == "" { - opt.OutputPath = dbPath + // Filter by replica name if specified. + if c.replicaName != "" { + r := db.Replica(c.replicaName) + if r == nil { + return nil, fmt.Errorf("replica %q not found", c.replicaName) + } + return r, nil } - // Exit successfully if the output file already exists. - if _, err := os.Stat(opt.OutputPath); !os.IsNotExist(err) && ifDBNotExists { - return nil, errSkipDBExists + // Choose only replica if only one available and no name is specified. + if len(db.Replicas) == 1 { + return db.Replicas[0], nil } - // Determine the appropriate replica & generation to restore from, - r, generation, err := db.CalcRestoreTarget(ctx, *opt) - if err != nil { - return nil, err + // A replica must be specified when restoring a specific generation with multiple replicas. + if c.generation != "" { + return nil, fmt.Errorf("must specify -replica flag when restoring from a specific generation") } - opt.Generation = generation + // Determine latest replica to restore from. + r, err := litestream.LatestReplica(ctx, db.Replicas) + if err != nil { + return nil, fmt.Errorf("cannot determine latest replica: %w", err) + } return r, nil } // Usage prints the help screen to STDOUT. func (c *RestoreCommand) Usage() { - fmt.Printf(` + fmt.Fprintf(c.stdout, ` The restore command recovers a database from a previous snapshot and WAL. Usage: @@ -186,9 +257,9 @@ Arguments: Restore up to a specific hex-encoded WAL index (inclusive). Defaults to use the highest available index. - -timestamp TIMESTAMP - Restore to a specific point-in-time. - Defaults to use the latest available backup. + -timestamp DATETIME + Restore up to a specific point-in-time. Must be ISO 8601. + Cannot be specified with -index flag. -o PATH Output path of the restored database. @@ -204,18 +275,12 @@ Arguments: Determines the number of WAL files downloaded in parallel. Defaults to `+strconv.Itoa(litestream.DefaultRestoreParallelism)+`. - -v - Verbose output. - Examples: # Restore latest replica for database to original location. $ litestream restore /path/to/db - # Restore replica for database to a given point in time. - $ litestream restore -timestamp 2020-01-01T00:00:00Z /path/to/db - # Restore latest replica for database to new /tmp directory $ litestream restore -o /tmp/db /path/to/db @@ -225,9 +290,10 @@ Examples: # Restore database from specific generation on S3. $ litestream restore -replica s3 -generation xxxxxxxx /path/to/db + # Restore database to a specific point in time. + $ litestream restore -generation xxxxxxxx -timestamp 2000-01-01T00:00:00Z /path/to/db + `[1:], DefaultConfigPath(), ) } - -var errSkipDBExists = errors.New("database already exists, skipping") diff --git a/cmd/litestream/restore_test.go b/cmd/litestream/restore_test.go new file mode 100644 index 00000000..6744963c --- /dev/null +++ b/cmd/litestream/restore_test.go @@ -0,0 +1,330 @@ +package main_test + +import ( + "context" + "flag" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/benbjohnson/litestream/internal/testingutil" +) + +func TestRestoreCommand(t *testing.T) { + t.Run("OK", func(t *testing.T) { + testDir := filepath.Join("testdata", "restore", "ok") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + tempDir := t.TempDir() + + m, _, stdout, stderr := newMain() + if err := m.Run(context.Background(), []string{"restore", "-config", filepath.Join(testDir, "litestream.yml"), "-o", filepath.Join(tempDir, "db"), filepath.Join(testDir, "db")}); err != nil { + t.Fatal(err) + } else if got, want := stderr.String(), ""; got != want { + t.Fatalf("stderr=%q, want %q", got, want) + } + + // STDOUT has timing info so we need to grep per line. + lines := strings.Split(stdout.String(), "\n") + for i, substr := range []string{ + `restoring snapshot 0000000000000000/0000000000000000 to ` + filepath.Join(tempDir, "db.tmp"), + `applied wal 0000000000000000/0000000000000000 elapsed=`, + `applied wal 0000000000000000/0000000000000001 elapsed=`, + `applied wal 0000000000000000/0000000000000002 elapsed=`, + `renaming database from temporary location`, + } { + if !strings.Contains(lines[i], substr) { + t.Fatalf("stdout: unexpected line %d:\n%s", i+1, stdout) + } + } + }) + + t.Run("ReplicaName", func(t *testing.T) { + testDir := filepath.Join("testdata", "restore", "replica-name") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + tempDir := t.TempDir() + + m, _, stdout, stderr := newMain() + if err := m.Run(context.Background(), []string{"restore", "-config", filepath.Join(testDir, "litestream.yml"), "-o", filepath.Join(tempDir, "db"), "-replica", "replica1", filepath.Join(testDir, "db")}); err != nil { + t.Fatal(err) + } else if got, want := stderr.String(), ""; got != want { + t.Fatalf("stderr=%q, want %q", got, want) + } + + // STDOUT has timing info so we need to grep per line. + lines := strings.Split(stdout.String(), "\n") + for i, substr := range []string{ + `restoring snapshot 0000000000000001/0000000000000001 to ` + filepath.Join(tempDir, "db.tmp"), + `no wal files found, snapshot only`, + `renaming database from temporary location`, + } { + if !strings.Contains(lines[i], substr) { + t.Fatalf("stdout: unexpected line %d:\n%s", i+1, stdout) + } + } + }) + + t.Run("ReplicaURL", func(t *testing.T) { + testDir := filepath.Join(testingutil.Getwd(t), "testdata", "restore", "replica-url") + tempDir := t.TempDir() + replicaURL := "file://" + filepath.ToSlash(testDir) + "/replica" + + m, _, stdout, stderr := newMain() + if err := m.Run(context.Background(), []string{"restore", "-o", filepath.Join(tempDir, "db"), replicaURL}); err != nil { + t.Fatal(err) + } else if got, want := stderr.String(), ""; got != want { + t.Fatalf("stderr=%q, want %q", got, want) + } + + lines := strings.Split(stdout.String(), "\n") + for i, substr := range []string{ + `restoring snapshot 0000000000000000/0000000000000000 to ` + filepath.Join(tempDir, "db.tmp"), + `no wal files found, snapshot only`, + `renaming database from temporary location`, + } { + if !strings.Contains(lines[i], substr) { + t.Fatalf("stdout: unexpected line %d:\n%s", i+1, stdout) + } + } + }) + + t.Run("LatestReplica", func(t *testing.T) { + testDir := filepath.Join("testdata", "restore", "latest-replica") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + tempDir := t.TempDir() + + m, _, stdout, stderr := newMain() + if err := m.Run(context.Background(), []string{"restore", "-config", filepath.Join(testDir, "litestream.yml"), "-o", filepath.Join(tempDir, "db"), filepath.Join(testDir, "db")}); err != nil { + t.Fatal(err) + } else if got, want := stderr.String(), ""; got != want { + t.Fatalf("stderr=%q, want %q", got, want) + } + + lines := strings.Split(stdout.String(), "\n") + for i, substr := range []string{ + `restoring snapshot 0000000000000001/0000000000000000 to ` + filepath.Join(tempDir, "db.tmp"), + `no wal files found, snapshot only`, + `renaming database from temporary location`, + } { + if !strings.Contains(lines[i], substr) { + t.Fatalf("stdout: unexpected line %d:\n%s", i+1, stdout) + } + } + }) + + t.Run("IfDBNotExistsFlag", func(t *testing.T) { + testDir := filepath.Join("testdata", "restore", "if-db-not-exists-flag") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + + m, _, stdout, _ := newMain() + err := m.Run(context.Background(), []string{"restore", "-config", filepath.Join(testDir, "litestream.yml"), "-if-db-not-exists", filepath.Join(testDir, "db")}) + if err != nil { + t.Fatal(err) + } else if got, want := stdout.String(), string(testingutil.ReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + t.Fatalf("stdout=%q, want %q", got, want) + } + }) + + t.Run("IfReplicaExists", func(t *testing.T) { + testDir := filepath.Join("testdata", "restore", "if-replica-exists-flag") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + + m, _, stdout, _ := newMain() + err := m.Run(context.Background(), []string{"restore", "-config", filepath.Join(testDir, "litestream.yml"), "-if-replica-exists", filepath.Join(testDir, "db")}) + if err != nil { + t.Fatal(err) + } else if got, want := stdout.String(), string(testingutil.ReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + t.Fatalf("stdout=%q, want %q", got, want) + } + }) + + t.Run("ErrNoBackups", func(t *testing.T) { + testDir := filepath.Join("testdata", "restore", "no-backups") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + tempDir := t.TempDir() + + m, _, stdout, stderr := newMain() + err := m.Run(context.Background(), []string{"restore", "-config", filepath.Join(testDir, "litestream.yml"), "-o", filepath.Join(tempDir, "db"), filepath.Join(testDir, "db")}) + if err == nil || err.Error() != `no matching backups found` { + t.Fatalf("unexpected error: %s", err) + } else if got, want := stdout.String(), string(testingutil.ReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + t.Fatalf("stdout=%q, want %q", got, want) + } else if got, want := stderr.String(), string(testingutil.ReadFile(t, filepath.Join(testDir, "stderr"))); got != want { + t.Fatalf("stderr=%q, want %q", got, want) + } + }) + + t.Run("ErrNoGeneration", func(t *testing.T) { + testDir := filepath.Join("testdata", "restore", "no-generation") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"restore", "-config", filepath.Join(testDir, "litestream.yml"), filepath.Join(testDir, "db")}) + if err == nil || err.Error() != `no matching backups found` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrOutputPathExists", func(t *testing.T) { + testDir := filepath.Join("testdata", "restore", "output-path-exists") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"restore", "-config", filepath.Join(testDir, "litestream.yml"), filepath.Join(testDir, "db")}) + if err == nil || err.Error() != `output file already exists: `+filepath.Join(testDir, "db") { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrDatabaseOrReplicaRequired", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"restore"}) + if err == nil || err.Error() != `database path or replica URL required` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrTooManyArguments", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"restore", "abc", "123"}) + if err == nil || err.Error() != `too many arguments` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrInvalidFlags", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"restore", "-no-such-flag"}) + if err == nil || err.Error() != `flag provided but not defined: -no-such-flag` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrIndexFlagOnly", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"restore", "-index", "0", "/var/lib/db"}) + if err == nil || err.Error() != `must specify -generation flag when using -index flag` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrConfigFileNotFound", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"restore", "-config", "/no/such/file", "/var/lib/db"}) + if err == nil || err.Error() != `config file not found: /no/such/file` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrInvalidConfig", func(t *testing.T) { + testDir := filepath.Join("testdata", "restore", "invalid-config") + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"restore", "-config", filepath.Join(testDir, "litestream.yml"), "/var/lib/db"}) + if err == nil || !strings.Contains(err.Error(), `replica path cannot be a url`) { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrMkdir", func(t *testing.T) { + tempDir := t.TempDir() + if err := os.Mkdir(filepath.Join(tempDir, "noperm"), 0000); err != nil { + t.Fatal(err) + } + + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"restore", "-o", filepath.Join(tempDir, "noperm", "subdir", "db"), "/var/lib/db"}) + if err == nil || !strings.Contains(err.Error(), `permission denied`) { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrNoOutputPathWithReplicaURL", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"restore", "file://path/to/replica"}) + if err == nil || err.Error() != `output path required when using a replica URL` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrReplicaNameWithReplicaURL", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"restore", "-replica", "replica0", "file://path/to/replica"}) + if err == nil || err.Error() != `cannot specify both the replica URL and the -replica flag` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrInvalidReplicaURL", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"restore", "-o", filepath.Join(t.TempDir(), "db"), "xyz://xyz"}) + if err == nil || err.Error() != `unknown replica type in config: "xyz"` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrDatabaseNotFound", func(t *testing.T) { + testDir := filepath.Join("testdata", "restore", "database-not-found") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"restore", "-config", filepath.Join(testDir, "litestream.yml"), "/no/such/db"}) + if err == nil || err.Error() != `database not found in config: /no/such/db` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrNoReplicas", func(t *testing.T) { + testDir := filepath.Join("testdata", "restore", "no-replicas") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + tempDir := t.TempDir() + + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"restore", "-config", filepath.Join(testDir, "litestream.yml"), "-o", filepath.Join(tempDir, "db"), filepath.Join(testDir, "db")}) + if err == nil || err.Error() != `database has no replicas: `+filepath.Join(testingutil.Getwd(t), testDir, "db") { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrReplicaNotFound", func(t *testing.T) { + testDir := filepath.Join("testdata", "restore", "replica-not-found") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + tempDir := t.TempDir() + + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"restore", "-config", filepath.Join(testDir, "litestream.yml"), "-o", filepath.Join(tempDir, "db"), "-replica", "no_such_replica", filepath.Join(testDir, "db")}) + if err == nil || err.Error() != `replica "no_such_replica" not found` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrGenerationWithNoReplicaName", func(t *testing.T) { + testDir := filepath.Join("testdata", "restore", "generation-with-no-replica") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + tempDir := t.TempDir() + + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"restore", "-config", filepath.Join(testDir, "litestream.yml"), "-o", filepath.Join(tempDir, "db"), "-generation", "0000000000000000", filepath.Join(testDir, "db")}) + if err == nil || err.Error() != `must specify -replica flag when restoring from a specific generation` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrNoSnapshotsAvailable", func(t *testing.T) { + testDir := filepath.Join("testdata", "restore", "no-snapshots") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + tempDir := t.TempDir() + + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"restore", "-config", filepath.Join(testDir, "litestream.yml"), "-o", filepath.Join(tempDir, "db"), "-generation", "0000000000000000", filepath.Join(testDir, "db")}) + if err == nil || err.Error() != `cannot determine latest index in generation "0000000000000000": no snapshots available` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("Usage", func(t *testing.T) { + m, _, _, _ := newMain() + if err := m.Run(context.Background(), []string{"restore", "-h"}); err != flag.ErrHelp { + t.Fatalf("unexpected error: %s", err) + } + }) +} diff --git a/cmd/litestream/snapshots.go b/cmd/litestream/snapshots.go index 72e67a5a..c8309bae 100644 --- a/cmd/litestream/snapshots.go +++ b/cmd/litestream/snapshots.go @@ -4,8 +4,9 @@ import ( "context" "flag" "fmt" + "io" "log" - "os" + "sort" "text/tabwriter" "time" @@ -13,95 +14,90 @@ import ( ) // SnapshotsCommand represents a command to list snapshots for a command. -type SnapshotsCommand struct{} +type SnapshotsCommand struct { + stdin io.Reader + stdout io.Writer + stderr io.Writer + + configPath string + noExpandEnv bool + + replicaName string +} + +// NewSnapshotsCommand returns a new instance of SnapshotsCommand. +func NewSnapshotsCommand(stdin io.Reader, stdout, stderr io.Writer) *SnapshotsCommand { + return &SnapshotsCommand{ + stdin: stdin, + stdout: stdout, + stderr: stderr, + } +} // Run executes the command. -func (c *SnapshotsCommand) Run(ctx context.Context, args []string) (err error) { +func (c *SnapshotsCommand) Run(ctx context.Context, args []string) (ret error) { fs := flag.NewFlagSet("litestream-snapshots", flag.ContinueOnError) - configPath, noExpandEnv := registerConfigFlag(fs) - replicaName := fs.String("replica", "", "replica name") + registerConfigFlag(fs, &c.configPath, &c.noExpandEnv) + fs.StringVar(&c.replicaName, "replica", "", "replica name") fs.Usage = c.Usage if err := fs.Parse(args); err != nil { return err } else if fs.NArg() == 0 || fs.Arg(0) == "" { - return fmt.Errorf("database path required") + return fmt.Errorf("database path or replica URL required") } else if fs.NArg() > 1 { return fmt.Errorf("too many arguments") } - var db *litestream.DB - var r *litestream.Replica - if isURL(fs.Arg(0)) { - if *configPath != "" { - return fmt.Errorf("cannot specify a replica URL and the -config flag") - } - if r, err = NewReplicaFromConfig(&ReplicaConfig{URL: fs.Arg(0)}, nil); err != nil { - return err - } - } else { - if *configPath == "" { - *configPath = DefaultConfigPath() - } + // Load configuration. + config, err := ReadConfigFile(c.configPath, !c.noExpandEnv) + if err != nil { + return err + } - // Load configuration. - config, err := ReadConfigFile(*configPath, !*noExpandEnv) - if err != nil { - return err - } + // Determine list of replicas to pull snapshots from. + replicas, _, err := loadReplicas(ctx, config, fs.Arg(0), c.replicaName) + if err != nil { + return err + } - // Lookup database from configuration file by path. - if path, err := expand(fs.Arg(0)); err != nil { - return err - } else if dbc := config.DBConfig(path); dbc == nil { - return fmt.Errorf("database not found in config: %s", path) - } else if db, err = NewDBFromConfig(dbc); err != nil { - return err + // Build list of snapshot metadata with associated replica. + var infos []replicaSnapshotInfo + for _, r := range replicas { + a, err := r.Snapshots(ctx) + if err != nil { + log.Printf("cannot determine snapshots: %s", err) + ret = errExit // signal error return without printing message + continue } - - // Filter by replica, if specified. - if *replicaName != "" { - if r = db.Replica(*replicaName); r == nil { - return fmt.Errorf("replica %q not found for database %q", *replicaName, db.Path()) - } + for i := range a { + infos = append(infos, replicaSnapshotInfo{SnapshotInfo: a[i], replicaName: r.Name()}) } } - // Find snapshots by db or replica. - var replicas []*litestream.Replica - if r != nil { - replicas = []*litestream.Replica{r} - } else { - replicas = db.Replicas - } + // Sort snapshots by creation time from newest to oldest. + sort.Slice(infos, func(i, j int) bool { return infos[i].CreatedAt.After(infos[j].CreatedAt) }) // List all snapshots. - w := tabwriter.NewWriter(os.Stdout, 0, 8, 2, ' ', 0) + w := tabwriter.NewWriter(c.stdout, 0, 8, 2, ' ', 0) defer w.Flush() fmt.Fprintln(w, "replica\tgeneration\tindex\tsize\tcreated") - for _, r := range replicas { - infos, err := r.Snapshots(ctx) - if err != nil { - log.Printf("cannot determine snapshots: %s", err) - continue - } - for _, info := range infos { - fmt.Fprintf(w, "%s\t%s\t%d\t%d\t%s\n", - r.Name(), - info.Generation, - info.Index, - info.Size, - info.CreatedAt.Format(time.RFC3339), - ) - } + for _, info := range infos { + fmt.Fprintf(w, "%s\t%s\t%s\t%d\t%s\n", + info.replicaName, + info.Generation, + litestream.FormatIndex(info.Index), + info.Size, + info.CreatedAt.Format(time.RFC3339), + ) } - return nil + return ret } // Usage prints the help screen to STDOUT. func (c *SnapshotsCommand) Usage() { - fmt.Printf(` + fmt.Fprintf(c.stdout, ` The snapshots command lists all snapshots available for a database or replica. Usage: @@ -137,3 +133,9 @@ Examples: DefaultConfigPath(), ) } + +// replicaSnapshotInfo represents snapshot metadata with associated replica name. +type replicaSnapshotInfo struct { + litestream.SnapshotInfo + replicaName string +} diff --git a/cmd/litestream/snapshots_test.go b/cmd/litestream/snapshots_test.go new file mode 100644 index 00000000..f845cdc6 --- /dev/null +++ b/cmd/litestream/snapshots_test.go @@ -0,0 +1,128 @@ +package main_test + +import ( + "context" + "flag" + "path/filepath" + "strings" + "testing" + + "github.com/benbjohnson/litestream/internal/testingutil" +) + +func TestSnapshotsCommand(t *testing.T) { + t.Run("OK", func(t *testing.T) { + testDir := filepath.Join("testdata", "snapshots", "ok") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + + m, _, stdout, _ := newMain() + if err := m.Run(context.Background(), []string{"snapshots", "-config", filepath.Join(testDir, "litestream.yml"), filepath.Join(testDir, "db")}); err != nil { + t.Fatal(err) + } else if got, want := stdout.String(), string(testingutil.ReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + t.Fatalf("stdout=%q, want %q", got, want) + } + }) + + t.Run("ReplicaName", func(t *testing.T) { + testDir := filepath.Join("testdata", "snapshots", "replica-name") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + + m, _, stdout, _ := newMain() + if err := m.Run(context.Background(), []string{"snapshots", "-config", filepath.Join(testDir, "litestream.yml"), "-replica", "replica1", filepath.Join(testDir, "db")}); err != nil { + t.Fatal(err) + } else if got, want := stdout.String(), string(testingutil.ReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + t.Fatalf("stdout=%q, want %q", got, want) + } + }) + + t.Run("ReplicaURL", func(t *testing.T) { + testDir := filepath.Join(testingutil.Getwd(t), "testdata", "snapshots", "replica-url") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + replicaURL := "file://" + filepath.ToSlash(testDir) + "/replica" + + m, _, stdout, _ := newMain() + if err := m.Run(context.Background(), []string{"snapshots", replicaURL}); err != nil { + t.Fatal(err) + } else if got, want := stdout.String(), string(testingutil.ReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + t.Fatalf("stdout=%q, want %q", got, want) + } + }) + + t.Run("ErrDatabaseOrReplicaRequired", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"snapshots"}) + if err == nil || err.Error() != `database path or replica URL required` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrTooManyArguments", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"snapshots", "abc", "123"}) + if err == nil || err.Error() != `too many arguments` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrInvalidFlags", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"snapshots", "-no-such-flag"}) + if err == nil || err.Error() != `flag provided but not defined: -no-such-flag` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrConfigFileNotFound", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"snapshots", "-config", "/no/such/file", "/var/lib/db"}) + if err == nil || err.Error() != `config file not found: /no/such/file` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrInvalidConfig", func(t *testing.T) { + testDir := filepath.Join("testdata", "snapshots", "invalid-config") + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"snapshots", "-config", filepath.Join(testDir, "litestream.yml"), "/var/lib/db"}) + if err == nil || !strings.Contains(err.Error(), `replica path cannot be a url`) { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrDatabaseNotFound", func(t *testing.T) { + testDir := filepath.Join("testdata", "snapshots", "database-not-found") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"snapshots", "-config", filepath.Join(testDir, "litestream.yml"), "/no/such/db"}) + if err == nil || err.Error() != `database not found in config: /no/such/db` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrReplicaNotFound", func(t *testing.T) { + testDir := filepath.Join(testingutil.Getwd(t), "testdata", "snapshots", "replica-not-found") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"snapshots", "-config", filepath.Join(testDir, "litestream.yml"), "-replica", "no_such_replica", filepath.Join(testDir, "db")}) + if err == nil || err.Error() != `replica "no_such_replica" not found for database "`+filepath.Join(testDir, "db")+`"` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrInvalidReplicaURL", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"snapshots", "xyz://xyz"}) + if err == nil || !strings.Contains(err.Error(), `unknown replica type in config: "xyz"`) { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("Usage", func(t *testing.T) { + m, _, _, _ := newMain() + if err := m.Run(context.Background(), []string{"snapshots", "-h"}); err != flag.ErrHelp { + t.Fatalf("unexpected error: %s", err) + } + }) +} diff --git a/cmd/litestream/testdata/Makefile b/cmd/litestream/testdata/Makefile new file mode 100644 index 00000000..14e20850 --- /dev/null +++ b/cmd/litestream/testdata/Makefile @@ -0,0 +1,13 @@ +.PHONY: default +default: + make -C generations/ok + make -C generations/no-database + make -C generations/replica-name + make -C generations/replica-url + make -C restore/latest-replica + make -C snapshots/ok + make -C snapshots/replica-name + make -C snapshots/replica-url + make -C wal/ok + make -C wal/replica-name + make -C wal/replica-url diff --git a/cmd/litestream/testdata/databases/invalid-config/litestream.yml b/cmd/litestream/testdata/databases/invalid-config/litestream.yml new file mode 100644 index 00000000..26eb1ffe --- /dev/null +++ b/cmd/litestream/testdata/databases/invalid-config/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: /var/lib/db + replicas: + - path: s3://bkt/db diff --git a/cmd/litestream/testdata/databases/no-config/.gitignore b/cmd/litestream/testdata/databases/no-config/.gitignore new file mode 100644 index 00000000..e69de29b diff --git a/cmd/litestream/testdata/databases/no-databases/litestream.yml b/cmd/litestream/testdata/databases/no-databases/litestream.yml new file mode 100644 index 00000000..f6fff35c --- /dev/null +++ b/cmd/litestream/testdata/databases/no-databases/litestream.yml @@ -0,0 +1 @@ +dbs: diff --git a/cmd/litestream/testdata/databases/no-databases/stdout b/cmd/litestream/testdata/databases/no-databases/stdout new file mode 100644 index 00000000..9f9c245e --- /dev/null +++ b/cmd/litestream/testdata/databases/no-databases/stdout @@ -0,0 +1 @@ +No databases found in config file. diff --git a/cmd/litestream/testdata/databases/ok/litestream.yml b/cmd/litestream/testdata/databases/ok/litestream.yml new file mode 100644 index 00000000..14788e4d --- /dev/null +++ b/cmd/litestream/testdata/databases/ok/litestream.yml @@ -0,0 +1,7 @@ +dbs: + - path: /var/lib/db + replicas: + - path: /var/lib/replica + - url: s3://mybkt/db + + - path: /my/other/db \ No newline at end of file diff --git a/cmd/litestream/testdata/databases/ok/stdout b/cmd/litestream/testdata/databases/ok/stdout new file mode 100644 index 00000000..58fcd650 --- /dev/null +++ b/cmd/litestream/testdata/databases/ok/stdout @@ -0,0 +1,3 @@ +path replicas +/var/lib/db file,s3 +/my/other/db diff --git a/cmd/litestream/testdata/generations/database-not-found/litestream.yml b/cmd/litestream/testdata/generations/database-not-found/litestream.yml new file mode 100644 index 00000000..266721eb --- /dev/null +++ b/cmd/litestream/testdata/generations/database-not-found/litestream.yml @@ -0,0 +1,2 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db diff --git a/cmd/litestream/testdata/generations/invalid-config/litestream.yml b/cmd/litestream/testdata/generations/invalid-config/litestream.yml new file mode 100644 index 00000000..26eb1ffe --- /dev/null +++ b/cmd/litestream/testdata/generations/invalid-config/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: /var/lib/db + replicas: + - path: s3://bkt/db diff --git a/cmd/litestream/testdata/generations/no-database/Makefile b/cmd/litestream/testdata/generations/no-database/Makefile new file mode 100644 index 00000000..61fea093 --- /dev/null +++ b/cmd/litestream/testdata/generations/no-database/Makefile @@ -0,0 +1,4 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001010000 replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 replica/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/generations/no-database/litestream.yml b/cmd/litestream/testdata/generations/no-database/litestream.yml new file mode 100644 index 00000000..544b74f8 --- /dev/null +++ b/cmd/litestream/testdata/generations/no-database/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - path: $LITESTREAM_TESTDIR/replica diff --git a/cmd/litestream/testdata/generations/no-database/replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 b/cmd/litestream/testdata/generations/no-database/replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/generations/no-database/replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/cmd/litestream/testdata/generations/no-database/replica/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 b/cmd/litestream/testdata/generations/no-database/replica/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/generations/no-database/replica/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/cmd/litestream/testdata/generations/no-database/stdout b/cmd/litestream/testdata/generations/no-database/stdout new file mode 100644 index 00000000..774650c6 --- /dev/null +++ b/cmd/litestream/testdata/generations/no-database/stdout @@ -0,0 +1,3 @@ +name generation lag start end +file 0000000000000000 - 2000-01-01T00:00:00Z 2000-01-01T00:00:00Z +file 0000000000000001 - 2000-01-02T00:00:00Z 2000-01-02T00:00:00Z diff --git a/cmd/litestream/testdata/generations/ok/Makefile b/cmd/litestream/testdata/generations/ok/Makefile new file mode 100644 index 00000000..d18db159 --- /dev/null +++ b/cmd/litestream/testdata/generations/ok/Makefile @@ -0,0 +1,9 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001030000 db + TZ=UTC touch -ct 200001010000 replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 replica/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 + TZ=UTC touch -ct 200001010000 replica/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 + TZ=UTC touch -ct 200001020000 replica/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 + TZ=UTC touch -ct 200001030000 replica/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 + TZ=UTC touch -ct 200001010000 replica/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/generations/ok/db b/cmd/litestream/testdata/generations/ok/db new file mode 100644 index 00000000..e69de29b diff --git a/cmd/litestream/testdata/generations/ok/litestream.yml b/cmd/litestream/testdata/generations/ok/litestream.yml new file mode 100644 index 00000000..544b74f8 --- /dev/null +++ b/cmd/litestream/testdata/generations/ok/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - path: $LITESTREAM_TESTDIR/replica diff --git a/cmd/litestream/testdata/generations/ok/replica/db b/cmd/litestream/testdata/generations/ok/replica/db new file mode 100644 index 00000000..e69de29b diff --git a/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 b/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 b/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 differ diff --git a/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 b/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 differ diff --git a/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 b/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 differ diff --git a/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 b/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 differ diff --git a/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 b/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/generations/ok/replica/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/cmd/litestream/testdata/generations/ok/stdout b/cmd/litestream/testdata/generations/ok/stdout new file mode 100644 index 00000000..23d77954 --- /dev/null +++ b/cmd/litestream/testdata/generations/ok/stdout @@ -0,0 +1,3 @@ +name generation lag start end +file 0000000000000000 0s 2000-01-01T00:00:00Z 2000-01-03T00:00:00Z +file 0000000000000001 48h0m0s 2000-01-01T00:00:00Z 2000-01-01T00:00:00Z diff --git a/cmd/litestream/testdata/generations/replica-name/Makefile b/cmd/litestream/testdata/generations/replica-name/Makefile new file mode 100644 index 00000000..aaab4b13 --- /dev/null +++ b/cmd/litestream/testdata/generations/replica-name/Makefile @@ -0,0 +1,5 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001030000 db + TZ=UTC touch -ct 200001010000 replica0/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 replica1/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/generations/replica-name/db b/cmd/litestream/testdata/generations/replica-name/db new file mode 100644 index 00000000..e69de29b diff --git a/cmd/litestream/testdata/generations/replica-name/litestream.yml b/cmd/litestream/testdata/generations/replica-name/litestream.yml new file mode 100644 index 00000000..8511213a --- /dev/null +++ b/cmd/litestream/testdata/generations/replica-name/litestream.yml @@ -0,0 +1,7 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - name: replica0 + path: $LITESTREAM_TESTDIR/replica0 + - name: replica1 + path: $LITESTREAM_TESTDIR/replica1 diff --git a/cmd/litestream/testdata/generations/replica-name/replica0/db b/cmd/litestream/testdata/generations/replica-name/replica0/db new file mode 100644 index 00000000..e69de29b diff --git a/cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 b/cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 b/cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 differ diff --git a/cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 b/cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 differ diff --git a/cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 b/cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 differ diff --git a/cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 b/cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/generations/replica-name/replica0/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 differ diff --git a/cmd/litestream/testdata/generations/replica-name/replica1/db b/cmd/litestream/testdata/generations/replica-name/replica1/db new file mode 100644 index 00000000..e69de29b diff --git a/cmd/litestream/testdata/generations/replica-name/replica1/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 b/cmd/litestream/testdata/generations/replica-name/replica1/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/generations/replica-name/replica1/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/cmd/litestream/testdata/generations/replica-name/stdout b/cmd/litestream/testdata/generations/replica-name/stdout new file mode 100644 index 00000000..111a6b23 --- /dev/null +++ b/cmd/litestream/testdata/generations/replica-name/stdout @@ -0,0 +1,2 @@ +name generation lag start end +replica1 0000000000000001 24h0m0s 2000-01-02T00:00:00Z 2000-01-02T00:00:00Z diff --git a/cmd/litestream/testdata/generations/replica-not-found/litestream.yml b/cmd/litestream/testdata/generations/replica-not-found/litestream.yml new file mode 100644 index 00000000..5d911bd6 --- /dev/null +++ b/cmd/litestream/testdata/generations/replica-not-found/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - url: s3://bkt/db diff --git a/cmd/litestream/testdata/generations/replica-url/Makefile b/cmd/litestream/testdata/generations/replica-url/Makefile new file mode 100644 index 00000000..01edcf5d --- /dev/null +++ b/cmd/litestream/testdata/generations/replica-url/Makefile @@ -0,0 +1,9 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001010000 replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 replica/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 + TZ=UTC touch -ct 200001010000 replica/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 + TZ=UTC touch -ct 200001020000 replica/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 + TZ=UTC touch -ct 200001030000 replica/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 + TZ=UTC touch -ct 200001020000 replica/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 + diff --git a/cmd/litestream/testdata/generations/replica-url/litestream.yml b/cmd/litestream/testdata/generations/replica-url/litestream.yml new file mode 100644 index 00000000..544b74f8 --- /dev/null +++ b/cmd/litestream/testdata/generations/replica-url/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - path: $LITESTREAM_TESTDIR/replica diff --git a/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 b/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 b/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 differ diff --git a/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 b/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 differ diff --git a/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 b/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 differ diff --git a/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 b/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 differ diff --git a/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 b/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/generations/replica-url/replica/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/cmd/litestream/testdata/generations/replica-url/stdout b/cmd/litestream/testdata/generations/replica-url/stdout new file mode 100644 index 00000000..e099c745 --- /dev/null +++ b/cmd/litestream/testdata/generations/replica-url/stdout @@ -0,0 +1,3 @@ +name generation lag start end +file 0000000000000000 - 2000-01-01T00:00:00Z 2000-01-03T00:00:00Z +file 0000000000000001 - 2000-01-02T00:00:00Z 2000-01-02T00:00:00Z diff --git a/cmd/litestream/testdata/restore/database-not-found/litestream.yml b/cmd/litestream/testdata/restore/database-not-found/litestream.yml new file mode 100644 index 00000000..544b74f8 --- /dev/null +++ b/cmd/litestream/testdata/restore/database-not-found/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - path: $LITESTREAM_TESTDIR/replica diff --git a/cmd/litestream/testdata/restore/generation-with-no-replica/litestream.yml b/cmd/litestream/testdata/restore/generation-with-no-replica/litestream.yml new file mode 100644 index 00000000..8696dbe0 --- /dev/null +++ b/cmd/litestream/testdata/restore/generation-with-no-replica/litestream.yml @@ -0,0 +1,5 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - path: $LITESTREAM_TESTDIR/replica0 + - path: $LITESTREAM_TESTDIR/replica1 diff --git a/cmd/litestream/testdata/restore/if-db-not-exists-flag/db b/cmd/litestream/testdata/restore/if-db-not-exists-flag/db new file mode 100644 index 00000000..cfd2b8d8 Binary files /dev/null and b/cmd/litestream/testdata/restore/if-db-not-exists-flag/db differ diff --git a/cmd/litestream/testdata/restore/if-db-not-exists-flag/litestream.yml b/cmd/litestream/testdata/restore/if-db-not-exists-flag/litestream.yml new file mode 100644 index 00000000..544b74f8 --- /dev/null +++ b/cmd/litestream/testdata/restore/if-db-not-exists-flag/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - path: $LITESTREAM_TESTDIR/replica diff --git a/cmd/litestream/testdata/restore/if-db-not-exists-flag/stdout b/cmd/litestream/testdata/restore/if-db-not-exists-flag/stdout new file mode 100644 index 00000000..2e1bdc70 --- /dev/null +++ b/cmd/litestream/testdata/restore/if-db-not-exists-flag/stdout @@ -0,0 +1 @@ +database already exists, skipping diff --git a/cmd/litestream/testdata/restore/if-replica-exists-flag/litestream.yml b/cmd/litestream/testdata/restore/if-replica-exists-flag/litestream.yml new file mode 100644 index 00000000..544b74f8 --- /dev/null +++ b/cmd/litestream/testdata/restore/if-replica-exists-flag/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - path: $LITESTREAM_TESTDIR/replica diff --git a/cmd/litestream/testdata/restore/if-replica-exists-flag/stdout b/cmd/litestream/testdata/restore/if-replica-exists-flag/stdout new file mode 100644 index 00000000..bb156b94 --- /dev/null +++ b/cmd/litestream/testdata/restore/if-replica-exists-flag/stdout @@ -0,0 +1 @@ +no matching backups found, skipping diff --git a/cmd/litestream/testdata/restore/invalid-config/litestream.yml b/cmd/litestream/testdata/restore/invalid-config/litestream.yml new file mode 100644 index 00000000..26eb1ffe --- /dev/null +++ b/cmd/litestream/testdata/restore/invalid-config/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: /var/lib/db + replicas: + - path: s3://bkt/db diff --git a/cmd/litestream/testdata/restore/latest-replica/Makefile b/cmd/litestream/testdata/restore/latest-replica/Makefile new file mode 100644 index 00000000..24334a30 --- /dev/null +++ b/cmd/litestream/testdata/restore/latest-replica/Makefile @@ -0,0 +1,6 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001010000 replica0/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 replica1/generations/0000000000000002/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001030000 replica0/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 + diff --git a/cmd/litestream/testdata/restore/latest-replica/litestream.yml b/cmd/litestream/testdata/restore/latest-replica/litestream.yml new file mode 100644 index 00000000..8511213a --- /dev/null +++ b/cmd/litestream/testdata/restore/latest-replica/litestream.yml @@ -0,0 +1,7 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - name: replica0 + path: $LITESTREAM_TESTDIR/replica0 + - name: replica1 + path: $LITESTREAM_TESTDIR/replica1 diff --git a/cmd/litestream/testdata/restore/latest-replica/replica0/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 b/cmd/litestream/testdata/restore/latest-replica/replica0/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/restore/latest-replica/replica0/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/cmd/litestream/testdata/restore/latest-replica/replica0/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 b/cmd/litestream/testdata/restore/latest-replica/replica0/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/restore/latest-replica/replica0/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/cmd/litestream/testdata/restore/latest-replica/replica1/generations/0000000000000002/snapshots/0000000000000000.snapshot.lz4 b/cmd/litestream/testdata/restore/latest-replica/replica1/generations/0000000000000002/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/restore/latest-replica/replica1/generations/0000000000000002/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/cmd/litestream/testdata/restore/no-backups/litestream.yml b/cmd/litestream/testdata/restore/no-backups/litestream.yml new file mode 100644 index 00000000..544b74f8 --- /dev/null +++ b/cmd/litestream/testdata/restore/no-backups/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - path: $LITESTREAM_TESTDIR/replica diff --git a/cmd/litestream/testdata/restore/no-backups/stderr b/cmd/litestream/testdata/restore/no-backups/stderr new file mode 100644 index 00000000..e69de29b diff --git a/cmd/litestream/testdata/restore/no-backups/stdout b/cmd/litestream/testdata/restore/no-backups/stdout new file mode 100644 index 00000000..e69de29b diff --git a/cmd/litestream/testdata/restore/no-generation/litestream.yml b/cmd/litestream/testdata/restore/no-generation/litestream.yml new file mode 100644 index 00000000..544b74f8 --- /dev/null +++ b/cmd/litestream/testdata/restore/no-generation/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - path: $LITESTREAM_TESTDIR/replica diff --git a/cmd/litestream/testdata/restore/no-replicas/litestream.yml b/cmd/litestream/testdata/restore/no-replicas/litestream.yml new file mode 100644 index 00000000..266721eb --- /dev/null +++ b/cmd/litestream/testdata/restore/no-replicas/litestream.yml @@ -0,0 +1,2 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db diff --git a/cmd/litestream/testdata/restore/no-snapshots/litestream.yml b/cmd/litestream/testdata/restore/no-snapshots/litestream.yml new file mode 100644 index 00000000..544b74f8 --- /dev/null +++ b/cmd/litestream/testdata/restore/no-snapshots/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - path: $LITESTREAM_TESTDIR/replica diff --git a/cmd/litestream/testdata/restore/ok/0000000000000002.db b/cmd/litestream/testdata/restore/ok/0000000000000002.db new file mode 100644 index 00000000..cfd2b8d8 Binary files /dev/null and b/cmd/litestream/testdata/restore/ok/0000000000000002.db differ diff --git a/cmd/litestream/testdata/restore/ok/README b/cmd/litestream/testdata/restore/ok/README new file mode 100644 index 00000000..48c0fd4e --- /dev/null +++ b/cmd/litestream/testdata/restore/ok/README @@ -0,0 +1,36 @@ +To reproduce this testdata, run sqlite3 and execute: + + PRAGMA journal_mode = WAL; + CREATE TABLE t (x); + INSERT INTO t (x) VALUES (1); + INSERT INTO t (x) VALUES (2); + + sl3 split -o generations/0000000000000000/wal/0000000000000000 db-wal + cp db generations/0000000000000000/snapshots/0000000000000000.snapshot + lz4 -c --rm generations/0000000000000000/snapshots/0000000000000000.snapshot + + +Then execute: + + PRAGMA wal_checkpoint(TRUNCATE); + INSERT INTO t (x) VALUES (3); + + sl3 split -o generations/0000000000000000/wal/0000000000000001 db-wal + + +Then execute: + + PRAGMA wal_checkpoint(TRUNCATE); + INSERT INTO t (x) VALUES (4); + INSERT INTO t (x) VALUES (5); + + sl3 split -o generations/0000000000000000/wal/0000000000000002 db-wal + + +Finally, obtain the final snapshot: + + PRAGMA wal_checkpoint(TRUNCATE); + + cp db 0000000000000002.db + rm db* + diff --git a/cmd/litestream/testdata/restore/ok/litestream.yml b/cmd/litestream/testdata/restore/ok/litestream.yml new file mode 100644 index 00000000..544b74f8 --- /dev/null +++ b/cmd/litestream/testdata/restore/ok/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - path: $LITESTREAM_TESTDIR/replica diff --git a/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 b/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 b/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 new file mode 100644 index 00000000..37e1dcf9 Binary files /dev/null and b/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 differ diff --git a/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/0000000000000000/0000000000002050.wal.lz4 b/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/0000000000000000/0000000000002050.wal.lz4 new file mode 100644 index 00000000..3bd7ab70 Binary files /dev/null and b/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/0000000000000000/0000000000002050.wal.lz4 differ diff --git a/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/0000000000000000/0000000000003068.wal.lz4 b/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/0000000000000000/0000000000003068.wal.lz4 new file mode 100644 index 00000000..c73bf2cb Binary files /dev/null and b/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/0000000000000000/0000000000003068.wal.lz4 differ diff --git a/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 b/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 new file mode 100644 index 00000000..64a4899b Binary files /dev/null and b/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 differ diff --git a/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 b/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 new file mode 100644 index 00000000..2265d0e0 Binary files /dev/null and b/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 differ diff --git a/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/0000000000000002/0000000000001038.wal.lz4 b/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/0000000000000002/0000000000001038.wal.lz4 new file mode 100644 index 00000000..c7dc94ff Binary files /dev/null and b/cmd/litestream/testdata/restore/ok/replica/generations/0000000000000000/wal/0000000000000002/0000000000001038.wal.lz4 differ diff --git a/cmd/litestream/testdata/restore/output-path-exists/db b/cmd/litestream/testdata/restore/output-path-exists/db new file mode 100644 index 00000000..cfd2b8d8 Binary files /dev/null and b/cmd/litestream/testdata/restore/output-path-exists/db differ diff --git a/cmd/litestream/testdata/restore/output-path-exists/litestream.yml b/cmd/litestream/testdata/restore/output-path-exists/litestream.yml new file mode 100644 index 00000000..544b74f8 --- /dev/null +++ b/cmd/litestream/testdata/restore/output-path-exists/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - path: $LITESTREAM_TESTDIR/replica diff --git a/cmd/litestream/testdata/restore/replica-name/litestream.yml b/cmd/litestream/testdata/restore/replica-name/litestream.yml new file mode 100644 index 00000000..8511213a --- /dev/null +++ b/cmd/litestream/testdata/restore/replica-name/litestream.yml @@ -0,0 +1,7 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - name: replica0 + path: $LITESTREAM_TESTDIR/replica0 + - name: replica1 + path: $LITESTREAM_TESTDIR/replica1 diff --git a/cmd/litestream/testdata/restore/replica-name/replica0/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 b/cmd/litestream/testdata/restore/replica-name/replica0/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/restore/replica-name/replica0/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/cmd/litestream/testdata/restore/replica-name/replica1/generations/0000000000000001/snapshots/0000000000000001.snapshot.lz4 b/cmd/litestream/testdata/restore/replica-name/replica1/generations/0000000000000001/snapshots/0000000000000001.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/restore/replica-name/replica1/generations/0000000000000001/snapshots/0000000000000001.snapshot.lz4 differ diff --git a/cmd/litestream/testdata/restore/replica-not-found/litestream.yml b/cmd/litestream/testdata/restore/replica-not-found/litestream.yml new file mode 100644 index 00000000..b2a5e141 --- /dev/null +++ b/cmd/litestream/testdata/restore/replica-not-found/litestream.yml @@ -0,0 +1,5 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - name: replica0 + path: $LITESTREAM_TESTDIR/replica diff --git a/cmd/litestream/testdata/restore/replica-url/replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 b/cmd/litestream/testdata/restore/replica-url/replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/restore/replica-url/replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/cmd/litestream/testdata/snapshots/database-not-found/litestream.yml b/cmd/litestream/testdata/snapshots/database-not-found/litestream.yml new file mode 100644 index 00000000..266721eb --- /dev/null +++ b/cmd/litestream/testdata/snapshots/database-not-found/litestream.yml @@ -0,0 +1,2 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db diff --git a/cmd/litestream/testdata/snapshots/invalid-config/litestream.yml b/cmd/litestream/testdata/snapshots/invalid-config/litestream.yml new file mode 100644 index 00000000..26eb1ffe --- /dev/null +++ b/cmd/litestream/testdata/snapshots/invalid-config/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: /var/lib/db + replicas: + - path: s3://bkt/db diff --git a/cmd/litestream/testdata/snapshots/ok/Makefile b/cmd/litestream/testdata/snapshots/ok/Makefile new file mode 100644 index 00000000..739022ae --- /dev/null +++ b/cmd/litestream/testdata/snapshots/ok/Makefile @@ -0,0 +1,6 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001010000 replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 replica/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 + TZ=UTC touch -ct 200001030000 replica/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 + diff --git a/cmd/litestream/testdata/snapshots/ok/litestream.yml b/cmd/litestream/testdata/snapshots/ok/litestream.yml new file mode 100644 index 00000000..544b74f8 --- /dev/null +++ b/cmd/litestream/testdata/snapshots/ok/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - path: $LITESTREAM_TESTDIR/replica diff --git a/cmd/litestream/testdata/snapshots/ok/replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 b/cmd/litestream/testdata/snapshots/ok/replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/snapshots/ok/replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/cmd/litestream/testdata/snapshots/ok/replica/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 b/cmd/litestream/testdata/snapshots/ok/replica/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/snapshots/ok/replica/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 differ diff --git a/cmd/litestream/testdata/snapshots/ok/replica/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 b/cmd/litestream/testdata/snapshots/ok/replica/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/snapshots/ok/replica/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/cmd/litestream/testdata/snapshots/ok/stdout b/cmd/litestream/testdata/snapshots/ok/stdout new file mode 100644 index 00000000..604aefcc --- /dev/null +++ b/cmd/litestream/testdata/snapshots/ok/stdout @@ -0,0 +1,4 @@ +replica generation index size created +file 0000000000000001 0000000000000000 93 2000-01-03T00:00:00Z +file 0000000000000000 0000000000000001 93 2000-01-02T00:00:00Z +file 0000000000000000 0000000000000000 93 2000-01-01T00:00:00Z diff --git a/cmd/litestream/testdata/snapshots/replica-name/Makefile b/cmd/litestream/testdata/snapshots/replica-name/Makefile new file mode 100644 index 00000000..c7399029 --- /dev/null +++ b/cmd/litestream/testdata/snapshots/replica-name/Makefile @@ -0,0 +1,4 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001010000 replica0/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 replica1/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/snapshots/replica-name/litestream.yml b/cmd/litestream/testdata/snapshots/replica-name/litestream.yml new file mode 100644 index 00000000..8511213a --- /dev/null +++ b/cmd/litestream/testdata/snapshots/replica-name/litestream.yml @@ -0,0 +1,7 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - name: replica0 + path: $LITESTREAM_TESTDIR/replica0 + - name: replica1 + path: $LITESTREAM_TESTDIR/replica1 diff --git a/cmd/litestream/testdata/snapshots/replica-name/replica0/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 b/cmd/litestream/testdata/snapshots/replica-name/replica0/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/snapshots/replica-name/replica0/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/cmd/litestream/testdata/snapshots/replica-name/replica0/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 b/cmd/litestream/testdata/snapshots/replica-name/replica0/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/snapshots/replica-name/replica0/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 differ diff --git a/cmd/litestream/testdata/snapshots/replica-name/replica1/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 b/cmd/litestream/testdata/snapshots/replica-name/replica1/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/snapshots/replica-name/replica1/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/cmd/litestream/testdata/snapshots/replica-name/stdout b/cmd/litestream/testdata/snapshots/replica-name/stdout new file mode 100644 index 00000000..276224a9 --- /dev/null +++ b/cmd/litestream/testdata/snapshots/replica-name/stdout @@ -0,0 +1,2 @@ +replica generation index size created +replica1 0000000000000001 0000000000000000 93 2000-01-02T00:00:00Z diff --git a/cmd/litestream/testdata/snapshots/replica-not-found/litestream.yml b/cmd/litestream/testdata/snapshots/replica-not-found/litestream.yml new file mode 100644 index 00000000..5d911bd6 --- /dev/null +++ b/cmd/litestream/testdata/snapshots/replica-not-found/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - url: s3://bkt/db diff --git a/cmd/litestream/testdata/snapshots/replica-url/Makefile b/cmd/litestream/testdata/snapshots/replica-url/Makefile new file mode 100644 index 00000000..233e9223 --- /dev/null +++ b/cmd/litestream/testdata/snapshots/replica-url/Makefile @@ -0,0 +1,5 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001010000 replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 replica/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 + TZ=UTC touch -ct 200001030000 replica/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 diff --git a/cmd/litestream/testdata/snapshots/replica-url/replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 b/cmd/litestream/testdata/snapshots/replica-url/replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/snapshots/replica-url/replica/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/cmd/litestream/testdata/snapshots/replica-url/replica/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 b/cmd/litestream/testdata/snapshots/replica-url/replica/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/snapshots/replica-url/replica/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 differ diff --git a/cmd/litestream/testdata/snapshots/replica-url/replica/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 b/cmd/litestream/testdata/snapshots/replica-url/replica/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/snapshots/replica-url/replica/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/cmd/litestream/testdata/snapshots/replica-url/stdout b/cmd/litestream/testdata/snapshots/replica-url/stdout new file mode 100644 index 00000000..604aefcc --- /dev/null +++ b/cmd/litestream/testdata/snapshots/replica-url/stdout @@ -0,0 +1,4 @@ +replica generation index size created +file 0000000000000001 0000000000000000 93 2000-01-03T00:00:00Z +file 0000000000000000 0000000000000001 93 2000-01-02T00:00:00Z +file 0000000000000000 0000000000000000 93 2000-01-01T00:00:00Z diff --git a/cmd/litestream/testdata/wal/database-not-found/litestream.yml b/cmd/litestream/testdata/wal/database-not-found/litestream.yml new file mode 100644 index 00000000..266721eb --- /dev/null +++ b/cmd/litestream/testdata/wal/database-not-found/litestream.yml @@ -0,0 +1,2 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db diff --git a/cmd/litestream/testdata/wal/invalid-config/litestream.yml b/cmd/litestream/testdata/wal/invalid-config/litestream.yml new file mode 100644 index 00000000..26eb1ffe --- /dev/null +++ b/cmd/litestream/testdata/wal/invalid-config/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: /var/lib/db + replicas: + - path: s3://bkt/db diff --git a/cmd/litestream/testdata/wal/ok/Makefile b/cmd/litestream/testdata/wal/ok/Makefile new file mode 100644 index 00000000..6522d941 --- /dev/null +++ b/cmd/litestream/testdata/wal/ok/Makefile @@ -0,0 +1,7 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001010000 replica/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 + TZ=UTC touch -ct 200001020000 replica/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 + TZ=UTC touch -ct 200001030000 replica/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 + TZ=UTC touch -ct 200001040000 replica/generations/0000000000000001/wal/0000000000000000/0000000000000000.wal.lz4 + diff --git a/cmd/litestream/testdata/wal/ok/litestream.yml b/cmd/litestream/testdata/wal/ok/litestream.yml new file mode 100644 index 00000000..544b74f8 --- /dev/null +++ b/cmd/litestream/testdata/wal/ok/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - path: $LITESTREAM_TESTDIR/replica diff --git a/cmd/litestream/testdata/wal/ok/replica/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 b/cmd/litestream/testdata/wal/ok/replica/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/wal/ok/replica/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 differ diff --git a/cmd/litestream/testdata/wal/ok/replica/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 b/cmd/litestream/testdata/wal/ok/replica/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/wal/ok/replica/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 differ diff --git a/cmd/litestream/testdata/wal/ok/replica/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 b/cmd/litestream/testdata/wal/ok/replica/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/wal/ok/replica/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 differ diff --git a/cmd/litestream/testdata/wal/ok/replica/generations/0000000000000001/wal/0000000000000000/0000000000000000.wal.lz4 b/cmd/litestream/testdata/wal/ok/replica/generations/0000000000000001/wal/0000000000000000/0000000000000000.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/wal/ok/replica/generations/0000000000000001/wal/0000000000000000/0000000000000000.wal.lz4 differ diff --git a/cmd/litestream/testdata/wal/ok/stdout b/cmd/litestream/testdata/wal/ok/stdout new file mode 100644 index 00000000..f420a7b8 --- /dev/null +++ b/cmd/litestream/testdata/wal/ok/stdout @@ -0,0 +1,5 @@ +replica generation index offset size created +file 0000000000000001 0000000000000000 0000000000000000 93 2000-01-04T00:00:00Z +file 0000000000000000 0000000000000001 0000000000000000 93 2000-01-03T00:00:00Z +file 0000000000000000 0000000000000000 0000000000000001 93 2000-01-02T00:00:00Z +file 0000000000000000 0000000000000000 0000000000000000 93 2000-01-01T00:00:00Z diff --git a/cmd/litestream/testdata/wal/replica-name/Makefile b/cmd/litestream/testdata/wal/replica-name/Makefile new file mode 100644 index 00000000..2347a2f1 --- /dev/null +++ b/cmd/litestream/testdata/wal/replica-name/Makefile @@ -0,0 +1,6 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001010000 replica0/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 + TZ=UTC touch -ct 200001020000 replica1/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 + TZ=UTC touch -ct 200001030000 replica1/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 + TZ=UTC touch -ct 200001040000 replica1/generations/0000000000000001/wal/0000000000000000/0000000000000000.wal.lz4 diff --git a/cmd/litestream/testdata/wal/replica-name/litestream.yml b/cmd/litestream/testdata/wal/replica-name/litestream.yml new file mode 100644 index 00000000..8511213a --- /dev/null +++ b/cmd/litestream/testdata/wal/replica-name/litestream.yml @@ -0,0 +1,7 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - name: replica0 + path: $LITESTREAM_TESTDIR/replica0 + - name: replica1 + path: $LITESTREAM_TESTDIR/replica1 diff --git a/cmd/litestream/testdata/wal/replica-name/replica0/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 b/cmd/litestream/testdata/wal/replica-name/replica0/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/wal/replica-name/replica0/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 differ diff --git a/cmd/litestream/testdata/wal/replica-name/replica0/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 b/cmd/litestream/testdata/wal/replica-name/replica0/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/wal/replica-name/replica0/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 differ diff --git a/cmd/litestream/testdata/wal/replica-name/replica0/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 b/cmd/litestream/testdata/wal/replica-name/replica0/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/wal/replica-name/replica0/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 differ diff --git a/cmd/litestream/testdata/wal/replica-name/replica1/generations/0000000000000001/wal/0000000000000000/0000000000000000.wal.lz4 b/cmd/litestream/testdata/wal/replica-name/replica1/generations/0000000000000001/wal/0000000000000000/0000000000000000.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/wal/replica-name/replica1/generations/0000000000000001/wal/0000000000000000/0000000000000000.wal.lz4 differ diff --git a/cmd/litestream/testdata/wal/replica-name/stdout b/cmd/litestream/testdata/wal/replica-name/stdout new file mode 100644 index 00000000..80756dc8 --- /dev/null +++ b/cmd/litestream/testdata/wal/replica-name/stdout @@ -0,0 +1,2 @@ +replica generation index offset size created +replica1 0000000000000001 0000000000000000 0000000000000000 93 2000-01-04T00:00:00Z diff --git a/cmd/litestream/testdata/wal/replica-not-found/litestream.yml b/cmd/litestream/testdata/wal/replica-not-found/litestream.yml new file mode 100644 index 00000000..5d911bd6 --- /dev/null +++ b/cmd/litestream/testdata/wal/replica-not-found/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: $LITESTREAM_TESTDIR/db + replicas: + - url: s3://bkt/db diff --git a/cmd/litestream/testdata/wal/replica-url/Makefile b/cmd/litestream/testdata/wal/replica-url/Makefile new file mode 100644 index 00000000..6522d941 --- /dev/null +++ b/cmd/litestream/testdata/wal/replica-url/Makefile @@ -0,0 +1,7 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001010000 replica/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 + TZ=UTC touch -ct 200001020000 replica/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 + TZ=UTC touch -ct 200001030000 replica/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 + TZ=UTC touch -ct 200001040000 replica/generations/0000000000000001/wal/0000000000000000/0000000000000000.wal.lz4 + diff --git a/cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 b/cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 differ diff --git a/cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 b/cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 differ diff --git a/cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 b/cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 differ diff --git a/cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000001/wal/0000000000000000/0000000000000000.wal.lz4 b/cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000001/wal/0000000000000000/0000000000000000.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/cmd/litestream/testdata/wal/replica-url/replica/generations/0000000000000001/wal/0000000000000000/0000000000000000.wal.lz4 differ diff --git a/cmd/litestream/testdata/wal/replica-url/stdout b/cmd/litestream/testdata/wal/replica-url/stdout new file mode 100644 index 00000000..f420a7b8 --- /dev/null +++ b/cmd/litestream/testdata/wal/replica-url/stdout @@ -0,0 +1,5 @@ +replica generation index offset size created +file 0000000000000001 0000000000000000 0000000000000000 93 2000-01-04T00:00:00Z +file 0000000000000000 0000000000000001 0000000000000000 93 2000-01-03T00:00:00Z +file 0000000000000000 0000000000000000 0000000000000001 93 2000-01-02T00:00:00Z +file 0000000000000000 0000000000000000 0000000000000000 93 2000-01-01T00:00:00Z diff --git a/cmd/litestream/version.go b/cmd/litestream/version.go index 46698610..ccfae6d9 100644 --- a/cmd/litestream/version.go +++ b/cmd/litestream/version.go @@ -4,10 +4,24 @@ import ( "context" "flag" "fmt" + "io" ) // VersionCommand represents a command to print the current version. -type VersionCommand struct{} +type VersionCommand struct { + stdin io.Reader + stdout io.Writer + stderr io.Writer +} + +// NewVersionCommand returns a new instance of VersionCommand. +func NewVersionCommand(stdin io.Reader, stdout, stderr io.Writer) *VersionCommand { + return &VersionCommand{ + stdin: stdin, + stdout: stdout, + stderr: stderr, + } +} // Run executes the command. func (c *VersionCommand) Run(ctx context.Context, args []string) (err error) { @@ -17,14 +31,14 @@ func (c *VersionCommand) Run(ctx context.Context, args []string) (err error) { return err } - fmt.Println(Version) + fmt.Fprintln(c.stdout, Version) return nil } // Usage prints the help screen to STDOUT. func (c *VersionCommand) Usage() { - fmt.Println(` + fmt.Fprintln(c.stdout, ` Prints the version. Usage: diff --git a/cmd/litestream/wal.go b/cmd/litestream/wal.go index 9b7b9efc..fc54840c 100644 --- a/cmd/litestream/wal.go +++ b/cmd/litestream/wal.go @@ -4,8 +4,9 @@ import ( "context" "flag" "fmt" + "io" "log" - "os" + "sort" "text/tabwriter" "time" @@ -13,118 +14,116 @@ import ( ) // WALCommand represents a command to list WAL files for a database. -type WALCommand struct{} +type WALCommand struct { + stdin io.Reader + stdout io.Writer + stderr io.Writer + + configPath string + noExpandEnv bool + + replicaName string + generation string +} + +// NewWALCommand returns a new instance of WALCommand. +func NewWALCommand(stdin io.Reader, stdout, stderr io.Writer) *WALCommand { + return &WALCommand{ + stdin: stdin, + stdout: stdout, + stderr: stderr, + } +} // Run executes the command. -func (c *WALCommand) Run(ctx context.Context, args []string) (err error) { +func (c *WALCommand) Run(ctx context.Context, args []string) (ret error) { fs := flag.NewFlagSet("litestream-wal", flag.ContinueOnError) - configPath, noExpandEnv := registerConfigFlag(fs) - replicaName := fs.String("replica", "", "replica name") - generation := fs.String("generation", "", "generation name") + registerConfigFlag(fs, &c.configPath, &c.noExpandEnv) + fs.StringVar(&c.replicaName, "replica", "", "replica name") + fs.StringVar(&c.generation, "generation", "", "generation name") fs.Usage = c.Usage if err := fs.Parse(args); err != nil { return err } else if fs.NArg() == 0 || fs.Arg(0) == "" { - return fmt.Errorf("database path required") + return fmt.Errorf("database path or replica URL required") } else if fs.NArg() > 1 { return fmt.Errorf("too many arguments") } - var db *litestream.DB - var r *litestream.Replica - if isURL(fs.Arg(0)) { - if *configPath != "" { - return fmt.Errorf("cannot specify a replica URL and the -config flag") - } - if r, err = NewReplicaFromConfig(&ReplicaConfig{URL: fs.Arg(0)}, nil); err != nil { - return err - } - } else { - if *configPath == "" { - *configPath = DefaultConfigPath() - } - - // Load configuration. - config, err := ReadConfigFile(*configPath, !*noExpandEnv) - if err != nil { - return err - } - - // Lookup database from configuration file by path. - if path, err := expand(fs.Arg(0)); err != nil { - return err - } else if dbc := config.DBConfig(path); dbc == nil { - return fmt.Errorf("database not found in config: %s", path) - } else if db, err = NewDBFromConfig(dbc); err != nil { - return err - } - - // Filter by replica, if specified. - if *replicaName != "" { - if r = db.Replica(*replicaName); r == nil { - return fmt.Errorf("replica %q not found for database %q", *replicaName, db.Path()) - } - } + // Load configuration. + config, err := ReadConfigFile(c.configPath, !c.noExpandEnv) + if err != nil { + return err } - // Find WAL files by db or replica. - var replicas []*litestream.Replica - if r != nil { - replicas = []*litestream.Replica{r} - } else { - replicas = db.Replicas + // Build list of replicas from CLI flags. + replicas, _, err := loadReplicas(ctx, config, fs.Arg(0), c.replicaName) + if err != nil { + return err } - // List all WAL files. - w := tabwriter.NewWriter(os.Stdout, 0, 8, 2, ' ', 0) - defer w.Flush() - - fmt.Fprintln(w, "replica\tgeneration\tindex\toffset\tsize\tcreated") + // Build list of WAL metadata with associated replica. + var infos []replicaWALSegmentInfo for _, r := range replicas { var generations []string - if *generation != "" { - generations = []string{*generation} + if c.generation != "" { + generations = []string{c.generation} } else { - if generations, err = r.Client.Generations(ctx); err != nil { + if generations, err = r.Client().Generations(ctx); err != nil { log.Printf("%s: cannot determine generations: %s", r.Name(), err) + ret = errExit // signal error return without printing message continue } } for _, generation := range generations { if err := func() error { - itr, err := r.Client.WALSegments(ctx, generation) + itr, err := r.Client().WALSegments(ctx, generation) if err != nil { return err } defer itr.Close() for itr.Next() { - info := itr.WALSegment() - - fmt.Fprintf(w, "%s\t%s\t%d\t%d\t%d\t%s\n", - r.Name(), - info.Generation, - info.Index, - info.Offset, - info.Size, - info.CreatedAt.Format(time.RFC3339), - ) + infos = append(infos, replicaWALSegmentInfo{ + WALSegmentInfo: itr.WALSegment(), + replicaName: r.Name(), + }) } return itr.Close() }(); err != nil { log.Printf("%s: cannot fetch wal segments: %s", r.Name(), err) + ret = errExit // signal error return without printing message continue } } } - return nil + // Sort WAL segments by creation time from newest to oldest. + sort.Slice(infos, func(i, j int) bool { return infos[i].CreatedAt.After(infos[j].CreatedAt) }) + + // List all WAL files. + w := tabwriter.NewWriter(c.stdout, 0, 8, 2, ' ', 0) + defer w.Flush() + + fmt.Fprintln(w, "replica\tgeneration\tindex\toffset\tsize\tcreated") + for _, info := range infos { + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%d\t%s\n", + info.replicaName, + info.Generation, + litestream.FormatIndex(info.Index), + litestream.FormatOffset(info.Offset), + info.Size, + info.CreatedAt.Format(time.RFC3339), + ) + } + + return ret } // Usage prints the help screen to STDOUT. func (c *WALCommand) Usage() { - fmt.Printf(` + fmt.Fprintf(c.stdout, ` The wal command lists all wal segments available for a database. Usage: @@ -163,3 +162,9 @@ Examples: DefaultConfigPath(), ) } + +// replicaWALSegmentInfo represents WAL segment metadata with associated replica name. +type replicaWALSegmentInfo struct { + litestream.WALSegmentInfo + replicaName string +} diff --git a/cmd/litestream/wal_test.go b/cmd/litestream/wal_test.go new file mode 100644 index 00000000..6fbe0b02 --- /dev/null +++ b/cmd/litestream/wal_test.go @@ -0,0 +1,128 @@ +package main_test + +import ( + "context" + "flag" + "path/filepath" + "strings" + "testing" + + "github.com/benbjohnson/litestream/internal/testingutil" +) + +func TestWALCommand(t *testing.T) { + t.Run("OK", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal", "ok") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + + m, _, stdout, _ := newMain() + if err := m.Run(context.Background(), []string{"wal", "-config", filepath.Join(testDir, "litestream.yml"), filepath.Join(testDir, "db")}); err != nil { + t.Fatal(err) + } else if got, want := stdout.String(), string(testingutil.ReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + t.Fatalf("stdout=%q, want %q", got, want) + } + }) + + t.Run("ReplicaName", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal", "replica-name") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + + m, _, stdout, _ := newMain() + if err := m.Run(context.Background(), []string{"wal", "-config", filepath.Join(testDir, "litestream.yml"), "-replica", "replica1", filepath.Join(testDir, "db")}); err != nil { + t.Fatal(err) + } else if got, want := stdout.String(), string(testingutil.ReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + t.Fatalf("stdout=%q, want %q", got, want) + } + }) + + t.Run("ReplicaURL", func(t *testing.T) { + testDir := filepath.Join(testingutil.Getwd(t), "testdata", "wal", "replica-url") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + replicaURL := "file://" + filepath.ToSlash(testDir) + "/replica" + + m, _, stdout, _ := newMain() + if err := m.Run(context.Background(), []string{"wal", replicaURL}); err != nil { + t.Fatal(err) + } else if got, want := stdout.String(), string(testingutil.ReadFile(t, filepath.Join(testDir, "stdout"))); got != want { + t.Fatalf("stdout=%q, want %q", got, want) + } + }) + + t.Run("ErrDatabaseOrReplicaRequired", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"wal"}) + if err == nil || err.Error() != `database path or replica URL required` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrTooManyArguments", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"wal", "abc", "123"}) + if err == nil || err.Error() != `too many arguments` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrInvalidFlags", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"wal", "-no-such-flag"}) + if err == nil || err.Error() != `flag provided but not defined: -no-such-flag` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrConfigFileNotFound", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"wal", "-config", "/no/such/file", "/var/lib/db"}) + if err == nil || err.Error() != `config file not found: /no/such/file` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrInvalidConfig", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal", "invalid-config") + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"wal", "-config", filepath.Join(testDir, "litestream.yml"), "/var/lib/db"}) + if err == nil || !strings.Contains(err.Error(), `replica path cannot be a url`) { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrDatabaseNotFound", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal", "database-not-found") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"wal", "-config", filepath.Join(testDir, "litestream.yml"), "/no/such/db"}) + if err == nil || err.Error() != `database not found in config: /no/such/db` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrReplicaNotFound", func(t *testing.T) { + testDir := filepath.Join(testingutil.Getwd(t), "testdata", "wal", "replica-not-found") + defer testingutil.Setenv(t, "LITESTREAM_TESTDIR", testDir)() + + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"wal", "-config", filepath.Join(testDir, "litestream.yml"), "-replica", "no_such_replica", filepath.Join(testDir, "db")}) + if err == nil || err.Error() != `replica "no_such_replica" not found for database "`+filepath.Join(testDir, "db")+`"` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrInvalidReplicaURL", func(t *testing.T) { + m, _, _, _ := newMain() + err := m.Run(context.Background(), []string{"wal", "xyz://xyz"}) + if err == nil || !strings.Contains(err.Error(), `unknown replica type in config: "xyz"`) { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("Usage", func(t *testing.T) { + m, _, _, _ := newMain() + if err := m.Run(context.Background(), []string{"wal", "-h"}); err != flag.ErrHelp { + t.Fatalf("unexpected error: %s", err) + } + }) +} diff --git a/db.go b/db.go index dd33d7e5..baeeada5 100644 --- a/db.go +++ b/db.go @@ -8,29 +8,32 @@ import ( "encoding/hex" "errors" "fmt" - "hash/crc64" "io" "io/ioutil" "log" - "math" "math/rand" "os" "path/filepath" + "sort" "strings" "sync" "time" "github.com/benbjohnson/litestream/internal" + "github.com/pierrec/lz4/v4" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "golang.org/x/sync/errgroup" ) // Default DB settings. const ( - DefaultMonitorInterval = 1 * time.Second - DefaultCheckpointInterval = 1 * time.Minute + DefaultMonitorDelayInterval = 10 * time.Millisecond + DefaultCheckpointInterval = 1 * time.Minute + DefaultMinCheckpointPageN = 1000 DefaultMaxCheckpointPageN = 10000 + DefaultShadowRetentionN = 32 ) // MaxIndex is the maximum possible WAL index. @@ -47,15 +50,24 @@ type DB struct { db *sql.DB // target database f *os.File // long-running db file descriptor rtx *sql.Tx // long running read transaction + pos Pos // cached position pageSize int // page size, in bytes - notify chan struct{} // closes on WAL change + notifyCh chan struct{} // notifies DB of changes + + // Cached salt & checksum from current shadow header. + hdr []byte + frame []byte + salt0, salt1 uint32 + chksum0, chksum1 uint32 + byteOrder binary.ByteOrder - fileInfo os.FileInfo // db info cached during init - dirInfo os.FileInfo // parent dir info cached during init + fileMode os.FileMode // db mode cached during init + dirMode os.FileMode // parent dir mode cached during init + uid, gid int // db user & group id cached during init ctx context.Context cancel func() - wg sync.WaitGroup + g errgroup.Group // Metrics dbSizeGauge prometheus.Gauge @@ -83,29 +95,40 @@ type DB struct { // unbounded if there are always read transactions occurring. MaxCheckpointPageN int + // Number of shadow WAL indexes to retain. This keeps files long enough for + // live replicas to retrieve the data but allows files to eventually be removed. + ShadowRetentionN int + + // Time after receiving change notification before reading next WAL segment. + // Used for batching changes into fewer files instead of every transaction + // creating its own file. + MonitorDelayInterval time.Duration + // Time between automatic checkpoints in the WAL. This is done to allow // more fine-grained WAL files so that restores can be performed with // better precision. CheckpointInterval time.Duration - // Frequency at which to perform db sync. - MonitorInterval time.Duration - // List of replicas for the database. // Must be set before calling Open(). Replicas []*Replica + + Logger *log.Logger } // NewDB returns a new instance of DB for a given path. func NewDB(path string) *DB { db := &DB{ - path: path, - notify: make(chan struct{}), + path: path, + notifyCh: make(chan struct{}, 1), + + MinCheckpointPageN: DefaultMinCheckpointPageN, + MaxCheckpointPageN: DefaultMaxCheckpointPageN, + ShadowRetentionN: DefaultShadowRetentionN, + MonitorDelayInterval: DefaultMonitorDelayInterval, + CheckpointInterval: DefaultCheckpointInterval, - MinCheckpointPageN: DefaultMinCheckpointPageN, - MaxCheckpointPageN: DefaultMaxCheckpointPageN, - CheckpointInterval: DefaultCheckpointInterval, - MonitorInterval: DefaultMonitorInterval, + Logger: log.New(LogWriter, fmt.Sprintf("%s: ", logPrefixPath(path)), LogFlags), } db.dbSizeGauge = dbSizeGaugeVec.WithLabelValues(db.path) @@ -140,10 +163,15 @@ func (db *DB) WALPath() string { return db.path + "-wal" } +// SHMPath returns the path to the database's shared memory file. +func (db *DB) SHMPath() string { + return db.path + "-shm" +} + // MetaPath returns the path to the database metadata. func (db *DB) MetaPath() string { dir, file := filepath.Split(db.path) - return filepath.Join(dir, "."+file+MetaDirSuffix) + return filepath.Join(dir, file+MetaDirSuffix) } // GenerationNamePath returns the path of the name of the current generation. @@ -158,99 +186,201 @@ func (db *DB) GenerationPath(generation string) string { return filepath.Join(db.MetaPath(), "generations", generation) } +// PositionPath returns the path of the file that stores the current position. +// This file is only used to communicate state to external processes. +func (db *DB) PositionPath() string { + return filepath.Join(db.MetaPath(), "position") +} + // ShadowWALDir returns the path of the shadow wal directory. // Panics if generation is blank. func (db *DB) ShadowWALDir(generation string) string { return filepath.Join(db.GenerationPath(generation), "wal") } -// ShadowWALPath returns the path of a single shadow WAL file. -// Panics if generation is blank or index is negative. -func (db *DB) ShadowWALPath(generation string, index int) string { - assert(index >= 0, "shadow wal index cannot be negative") - return filepath.Join(db.ShadowWALDir(generation), FormatWALPath(index)) +// Replica returns a replica by name. +func (db *DB) Replica(name string) *Replica { + for _, r := range db.Replicas { + if r.Name() == name { + return r + } + } + return nil } -// CurrentShadowWALPath returns the path to the last shadow WAL in a generation. -func (db *DB) CurrentShadowWALPath(generation string) (string, error) { - index, _, err := db.CurrentShadowWALIndex(generation) - if err != nil { - return "", err - } - return db.ShadowWALPath(generation, index), nil +// Pos returns the cached position of the database. +// Returns a zero position if no position has been calculated or if there is no generation. +func (db *DB) Pos() Pos { + db.mu.RLock() + defer db.mu.RUnlock() + return db.pos } -// CurrentShadowWALIndex returns the current WAL index & total size. -func (db *DB) CurrentShadowWALIndex(generation string) (index int, size int64, err error) { - fis, err := ioutil.ReadDir(filepath.Join(db.GenerationPath(generation), "wal")) - if os.IsNotExist(err) { - return 0, 0, nil // no wal files written for generation - } else if err != nil { - return 0, 0, err - } +// reset clears all cached data. +func (db *DB) reset() { + db.pos = Pos{} + db.hdr, db.frame = nil, nil + db.salt0, db.salt1 = 0, 0 + db.chksum0, db.chksum1 = 0, 0 + db.byteOrder = nil +} - // Find highest wal index. - for _, fi := range fis { - if v, err := ParseWALPath(fi.Name()); err != nil { - continue // invalid wal filename - } else if v > index { - index = v +// invalidate refreshes cached position, salt, & checksum from on-disk data. +func (db *DB) invalidate(ctx context.Context) (err error) { + // Clear cached data before starting. + db.reset() + + // If any error occurs, ensure all cached data is cleared. + defer func() { + if err != nil { + db.reset() } + }() - size += fi.Size() + // Determine the last position of the current generation. + if err := db.invalidatePos(ctx); err != nil { + return fmt.Errorf("cannot determine pos: %w", err) + } else if db.pos.IsZero() { + db.Logger.Printf("init: no wal files available, clearing generation") + if err := db.clearGeneration(ctx); err != nil { + return fmt.Errorf("clear generation: %w", err) + } + return nil // no position, exit } - return index, size, nil -} -// FileInfo returns the cached file stats for the database file when it was initialized. -func (db *DB) FileInfo() os.FileInfo { - return db.fileInfo + // Determine salt & last checksum. + if err := db.invalidateChecksum(ctx); err != nil { + return fmt.Errorf("cannot determine last salt/checksum: %w", err) + } + return nil } -// DirInfo returns the cached file stats for the parent directory of the database file when it was initialized. -func (db *DB) DirInfo() os.FileInfo { - return db.dirInfo +func (db *DB) invalidatePos(ctx context.Context) error { + // Determine generation based off "generation" file in meta directory. + generation, err := db.CurrentGeneration() + if err != nil { + return err + } else if generation == "" { + return nil + } + + // Iterate over all segments to find the last one. + itr, err := db.walSegments(context.Background(), generation) + if err != nil { + return err + } + defer itr.Close() + + var pos Pos + for itr.Next() { + info := itr.WALSegment() + pos = info.Pos() + } + if err := itr.Close(); err != nil { + return err + } + + // Exit if no WAL segments exist. + if pos.IsZero() { + return nil + } + + // Read size of last segment to determine ending position. + rd, err := db.WALSegmentReader(ctx, pos) + if err != nil { + return fmt.Errorf("cannot read last wal segment: %w", err) + } + defer rd.Close() + + n, err := io.Copy(ioutil.Discard, lz4.NewReader(rd)) + if err != nil { + return err + } + pos.Offset += n + + // Save position to cache. + db.pos = pos + + return nil } -// Replica returns a replica by name. -func (db *DB) Replica(name string) *Replica { - for _, r := range db.Replicas { - if r.Name() == name { - return r - } +func (db *DB) invalidateChecksum(ctx context.Context) error { + assert(!db.pos.IsZero(), "position required to invalidate checksum") + + // Read entire WAL from combined segments. + rc, err := db.WALReader(ctx, db.pos.Generation, db.pos.Index) + if err != nil { + return fmt.Errorf("cannot read last wal: %w", err) + } + defer func() { _ = rc.Close() }() + + // Ensure we don't read past our position. + r := &io.LimitedReader{R: rc, N: db.pos.Offset} + + // Determine cache values from the current WAL file. + db.salt0, db.salt1, db.chksum0, db.chksum1, db.byteOrder, db.hdr, db.frame, err = ReadWALFields(r, db.pageSize) + if err != nil { + return fmt.Errorf("calc checksum: %w", err) } return nil } -// Pos returns the current position of the database. -func (db *DB) Pos() (Pos, error) { - generation, err := db.CurrentGeneration() +// WALReader returns the entire uncompressed WAL file for a given index. +func (db *DB) WALReader(ctx context.Context, generation string, index int) (_ io.ReadCloser, err error) { + // If any error occurs, we need to clean up all open handles. + var rcs []io.ReadCloser + defer func() { + if err != nil { + for _, rc := range rcs { + rc.Close() + } + } + }() + + offsets, err := db.walSegmentOffsetsByIndex(generation, index) if err != nil { - return Pos{}, err - } else if generation == "" { - return Pos{}, nil + return nil, fmt.Errorf("wal segment offsets: %w", err) + } + + for _, offset := range offsets { + f, err := os.Open(filepath.Join(db.ShadowWALDir(generation), FormatIndex(index), FormatOffset(offset)+".wal.lz4")) + if err != nil { + return nil, err + } + rcs = append(rcs, internal.NewReadCloser(lz4.NewReader(f), f)) } - index, _, err := db.CurrentShadowWALIndex(generation) + return internal.NewMultiReadCloser(rcs), nil +} + +func (db *DB) walSegmentOffsetsByIndex(generation string, index int) ([]int64, error) { + // Read files from index directory. + ents, err := os.ReadDir(filepath.Join(db.ShadowWALDir(generation), FormatIndex(index))) if err != nil { - return Pos{}, err + return nil, err } - fi, err := os.Stat(db.ShadowWALPath(generation, index)) - if os.IsNotExist(err) { - return Pos{Generation: generation, Index: index}, nil - } else if err != nil { - return Pos{}, err + var offsets []int64 + for _, ent := range ents { + if !strings.HasSuffix(ent.Name(), ".wal.lz4") { + continue + } + offset, err := ParseOffset(strings.TrimSuffix(filepath.Base(ent.Name()), ".wal.lz4")) + if err != nil { + continue + } + offsets = append(offsets, offset) } - return Pos{Generation: generation, Index: index, Offset: frameAlign(fi.Size(), db.pageSize)}, nil + // Sort before returning. + sort.Slice(offsets, func(i, j int) bool { return offsets[i] < offsets[j] }) + + return offsets, nil } -// Notify returns a channel that closes when the shadow WAL changes. -func (db *DB) Notify() <-chan struct{} { - db.mu.RLock() - defer db.mu.RUnlock() - return db.notify +// NotifyCh returns a channel that can be used to signal changes in the DB. +func (db *DB) NotifyCh() chan<- struct{} { + return db.notifyCh } // PageSize returns the page size of the underlying database. @@ -282,31 +412,19 @@ func (db *DB) Open() (err error) { return fmt.Errorf("cannot remove tmp files: %w", err) } - // Start monitoring SQLite database in a separate goroutine. - if db.MonitorInterval > 0 { - db.wg.Add(1) - go func() { defer db.wg.Done(); db.monitor() }() - } + // Continually monitor local changes in a separate goroutine. + db.g.Go(func() error { return db.monitor(db.ctx) }) return nil } -// Close releases the read lock & closes the database. This method should only -// be called by tests as it causes the underlying database to be checkpointed. +// Close flushes outstanding WAL writes to replicas, releases the read lock, +// and closes the database. func (db *DB) Close() (err error) { - return db.close(false) -} - -// SoftClose closes everything but the underlying db connection. This method -// is available because the binary needs to avoid closing the database on exit -// to prevent autocheckpointing. -func (db *DB) SoftClose() (err error) { - return db.close(true) -} - -func (db *DB) close(soft bool) (err error) { db.cancel() - db.wg.Wait() + if e := db.g.Wait(); e != nil && err == nil { + err = e + } // Start a new context for shutdown since we canceled the DB context. ctx := context.Background() @@ -318,14 +436,22 @@ func (db *DB) close(soft bool) (err error) { } } - // Ensure replicas perform a final sync and stop replicating. + // Ensure replicas stop replicating and perform a final sync. for _, r := range db.Replicas { + // Stop normal background sync. + r.Stop() + + // Force one final sync if DB is open. if db.db != nil { if e := r.Sync(ctx); e != nil && err == nil { err = e } } - r.Stop(!soft) + + // Close out replica. + if e := r.Close(); e != nil && err == nil { + err = e + } } // Release the read lock to allow other applications to handle checkpointing. @@ -335,9 +461,7 @@ func (db *DB) close(soft bool) (err error) { } } - // Only perform full close if this is not a soft close. - // This closes the underlying database connection which can clean up the WAL. - if !soft && db.db != nil { + if db.db != nil { if e := db.db.Close(); e != nil && err == nil { err = e } @@ -366,8 +490,8 @@ func (db *DB) UpdatedAt() (time.Time, error) { return t, nil } -// init initializes the connection to the database. -// Skipped if already initialized or if the database file does not exist. +// init initializes the connection to the database. Skipped if already +// initialized or if the database file does not exist. func (db *DB) init() (err error) { // Exit if already initialized. if db.db != nil { @@ -375,44 +499,28 @@ func (db *DB) init() (err error) { } // Exit if no database file exists. - fi, err := os.Stat(db.path) - if os.IsNotExist(err) { + if _, err := os.Stat(db.path); os.IsNotExist(err) { return nil } else if err != nil { return err } - db.fileInfo = fi // Obtain permissions for parent directory. - if fi, err = os.Stat(filepath.Dir(db.path)); err != nil { + fi, err := os.Stat(filepath.Dir(db.path)) + if err != nil { return err } - db.dirInfo = fi + db.dirMode = fi.Mode() dsn := db.path dsn += fmt.Sprintf("?_busy_timeout=%d", BusyTimeout.Milliseconds()) - // Connect to SQLite database. - if db.db, err = sql.Open("sqlite3", dsn); err != nil { + // Connect to SQLite database. Use the driver registered with a hook to + // prevent WAL files from being removed. + if db.db, err = sql.Open("litestream-sqlite3", dsn); err != nil { return err } - // Open long-running database file descriptor. Required for non-OFD locks. - if db.f, err = os.Open(db.path); err != nil { - return fmt.Errorf("open db file descriptor: %w", err) - } - - // Ensure database is closed if init fails. - // Initialization can retry on next sync. - defer func() { - if err != nil { - _ = db.releaseReadLock() - db.db.Close() - db.f.Close() - db.db, db.f = nil, nil - } - }() - // Enable WAL and ensure it is set. New mode should be returned on success: // https://www.sqlite.org/pragma.html#pragma_journal_mode var mode string @@ -429,16 +537,50 @@ func (db *DB) init() (err error) { // Create a table to force writes to the WAL when empty. // There should only ever be one row with id=1. - if _, err := db.db.Exec(`CREATE TABLE IF NOT EXISTS _litestream_seq (id INTEGER PRIMARY KEY, seq INTEGER);`); err != nil { + if _, err := db.db.ExecContext(db.ctx, `CREATE TABLE IF NOT EXISTS _litestream_seq (id INTEGER PRIMARY KEY, seq INTEGER);`); err != nil { return fmt.Errorf("create _litestream_seq table: %w", err) } // Create a lock table to force write locks during sync. // The sync write transaction always rolls back so no data should be in this table. - if _, err := db.db.Exec(`CREATE TABLE IF NOT EXISTS _litestream_lock (id INTEGER);`); err != nil { + if _, err := db.db.ExecContext(db.ctx, `CREATE TABLE IF NOT EXISTS _litestream_lock (id INTEGER);`); err != nil { return fmt.Errorf("create _litestream_lock table: %w", err) } + // Open long-running database file descriptor. Required for non-OFD locks. + if db.f, err = os.Open(db.path); err != nil { + return fmt.Errorf("open db file descriptor: %w", err) + } + + // Ensure database is closed if init fails. + // Initialization can retry on next sync. + defer func() { + if err != nil { + _ = db.releaseReadLock() + db.db.Close() + db.f.Close() + db.db, db.f = nil, nil + } + }() + + // Obtain file info once we know the database exists. + fi, err = os.Stat(db.path) + if err != nil { + return fmt.Errorf("init file stat: %w", err) + } + db.fileMode = fi.Mode() + db.uid, db.gid = internal.Fileinfo(fi) + + // Pass permissions to file replicas, if they exist. + for _, r := range db.Replicas { + if client, ok := r.Client().(*FileReplicaClient); ok { + client.FileMode = db.fileMode + client.DirMode = db.dirMode + client.Uid = db.uid + client.Gid = db.gid + } + } + // Start a long-running read transaction to prevent other transactions // from checkpointing. if err := db.acquireReadLock(); err != nil { @@ -446,27 +588,32 @@ func (db *DB) init() (err error) { } // Read page size. - if err := db.db.QueryRow(`PRAGMA page_size;`).Scan(&db.pageSize); err != nil { + if err := db.db.QueryRowContext(db.ctx, `PRAGMA page_size;`).Scan(&db.pageSize); err != nil { return fmt.Errorf("read page size: %w", err) } else if db.pageSize <= 0 { return fmt.Errorf("invalid db page size: %d", db.pageSize) } // Ensure meta directory structure exists. - if err := internal.MkdirAll(db.MetaPath(), db.dirInfo); err != nil { + if err := internal.MkdirAll(db.MetaPath(), db.dirMode, db.uid, db.gid); err != nil { return err } + // Determine current position, if available. + if err := db.invalidate(db.ctx); err != nil { + return fmt.Errorf("invalidate: %w", err) + } + // If we have an existing shadow WAL, ensure the headers match. if err := db.verifyHeadersMatch(); err != nil { - log.Printf("%s: init: cannot determine last wal position, clearing generation; %s", db.path, err) - if err := os.Remove(db.GenerationNamePath()); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("remove generation name: %w", err) + db.Logger.Printf("init: cannot determine last wal position, clearing generation; %s", err) + if err := db.clearGeneration(db.ctx); err != nil { + return fmt.Errorf("clear generation: %w", err) } } // Clean up previous generations. - if err := db.clean(); err != nil { + if err := db.clean(db.ctx); err != nil { return fmt.Errorf("clean: %w", err) } @@ -478,52 +625,46 @@ func (db *DB) init() (err error) { return nil } -// verifyHeadersMatch returns true if the primary WAL and last shadow WAL header match. -func (db *DB) verifyHeadersMatch() error { - // Determine current generation. - generation, err := db.CurrentGeneration() - if err != nil { +func (db *DB) clearGeneration(ctx context.Context) error { + if err := os.Remove(db.GenerationNamePath()); err != nil && !os.IsNotExist(err) { return err - } else if generation == "" { - return nil } + return nil +} - // Find current generation & latest shadow WAL. - shadowWALPath, err := db.CurrentShadowWALPath(generation) - if err != nil { - return fmt.Errorf("cannot determine current shadow wal path: %w", err) +// verifyHeadersMatch returns true if the primary WAL and last shadow WAL header match. +func (db *DB) verifyHeadersMatch() error { + // Skip verification if we have no current position. + if db.pos.IsZero() { + return nil } - hdr0, err := readWALHeader(db.WALPath()) + // Read header from the real WAL file. + hdr, err := readWALHeader(db.WALPath()) if os.IsNotExist(err) { return fmt.Errorf("no primary wal: %w", err) } else if err != nil { return fmt.Errorf("primary wal header: %w", err) } - hdr1, err := readWALHeader(shadowWALPath) - if os.IsNotExist(err) { - return fmt.Errorf("no shadow wal") - } else if err != nil { - return fmt.Errorf("shadow wal header: %w", err) - } - - if !bytes.Equal(hdr0, hdr1) { - return fmt.Errorf("wal header mismatch %x <> %x on %s", hdr0, hdr1, shadowWALPath) + // Compare real WAL header with shadow WAL header. + // If there is a mismatch then the real WAL has been restarted outside Litestream. + if !bytes.Equal(hdr, db.hdr) { + return fmt.Errorf("wal header mismatch at %s", db.pos.Truncate()) } return nil } // clean removes old generations & WAL files. -func (db *DB) clean() error { - if err := db.cleanGenerations(); err != nil { +func (db *DB) clean(ctx context.Context) error { + if err := db.cleanGenerations(ctx); err != nil { return err } - return db.cleanWAL() + return db.cleanWAL(ctx) } // cleanGenerations removes old generations. -func (db *DB) cleanGenerations() error { +func (db *DB) cleanGenerations(ctx context.Context) error { generation, err := db.CurrentGeneration() if err != nil { return err @@ -551,46 +692,57 @@ func (db *DB) cleanGenerations() error { } // cleanWAL removes WAL files that have been replicated. -func (db *DB) cleanWAL() error { +func (db *DB) cleanWAL(ctx context.Context) error { generation, err := db.CurrentGeneration() if err != nil { - return err + return fmt.Errorf("current generation: %w", err) + } else if generation == "" { + return nil } // Determine lowest index that's been replicated to all replicas. - min := -1 + minReplicaIndex := -1 for _, r := range db.Replicas { - pos := r.Pos() + pos := r.Pos().Truncate() if pos.Generation != generation { - pos = Pos{} // different generation, reset index to zero - } - if min == -1 || pos.Index < min { - min = pos.Index + continue // different generation, skip + } else if minReplicaIndex == -1 || pos.Index < minReplicaIndex { + minReplicaIndex = pos.Index } } - // Skip if our lowest index is too small. - if min <= 0 { + // Retain a certain number of WAL indexes since + minRetentionIndex := db.pos.Index - db.ShadowRetentionN + + // Skip if we have replicas but none have replicated this generation yet. + if len(db.Replicas) > 0 && minReplicaIndex <= 0 { return nil } - min-- // Keep an extra WAL file. - // Remove all WAL files for the generation before the lowest index. + // Delete all WAL index directories below the minimum position. dir := db.ShadowWALDir(generation) - fis, err := ioutil.ReadDir(dir) - if os.IsNotExist(err) { - return nil - } else if err != nil { + ents, err := os.ReadDir(dir) + if err != nil { return err } - for _, fi := range fis { - if idx, err := ParseWALPath(fi.Name()); err != nil || idx >= min { + + for _, ent := range ents { + index, err := ParseIndex(ent.Name()) + if err != nil { continue + } else if len(db.Replicas) > 0 && index >= minReplicaIndex { + continue // not replicated yet, skip + } else if index >= minRetentionIndex { + continue // retain certain number of indexes, skip } - if err := os.Remove(filepath.Join(dir, fi.Name())); err != nil { + + if err := os.RemoveAll(filepath.Join(dir, FormatIndex(index))); err != nil { return err } + + db.Logger.Printf("remove shadow index: %s/%s", generation, FormatIndex(index)) } + return nil } @@ -607,7 +759,7 @@ func (db *DB) acquireReadLock() error { } // Execute read query to obtain read lock. - if _, err := tx.ExecContext(db.ctx, `SELECT COUNT(1) FROM _litestream_seq;`); err != nil { + if _, err := tx.Exec(`SELECT COUNT(1) FROM _litestream_seq;`); err != nil { _ = tx.Rollback() return err } @@ -633,15 +785,13 @@ func (db *DB) releaseReadLock() error { // CurrentGeneration returns the name of the generation saved to the "generation" // file in the meta data directory. Returns empty string if none exists. func (db *DB) CurrentGeneration() (string, error) { - buf, err := ioutil.ReadFile(db.GenerationNamePath()) + buf, err := os.ReadFile(db.GenerationNamePath()) if os.IsNotExist(err) { return "", nil } else if err != nil { return "", err } - // TODO: Verify if generation directory exists. If not, delete name file. - generation := strings.TrimSpace(string(buf)) if len(generation) != GenerationNameLen { return "", nil @@ -652,7 +802,7 @@ func (db *DB) CurrentGeneration() (string, error) { // createGeneration starts a new generation by creating the generation // directory, snapshotting to each replica, and updating the current // generation name. -func (db *DB) createGeneration() (string, error) { +func (db *DB) createGeneration(ctx context.Context) (string, error) { // Generate random generation hex name. buf := make([]byte, GenerationNameLen/2) _, _ = rand.New(rand.NewSource(time.Now().UnixNano())).Read(buf) @@ -660,32 +810,27 @@ func (db *DB) createGeneration() (string, error) { // Generate new directory. dir := filepath.Join(db.MetaPath(), "generations", generation) - if err := internal.MkdirAll(dir, db.dirInfo); err != nil { + if err := internal.MkdirAll(dir, db.dirMode, db.uid, db.gid); err != nil { return "", err } // Initialize shadow WAL with copy of header. - if _, err := db.initShadowWALFile(db.ShadowWALPath(generation, 0)); err != nil { + if err := db.initShadowWALIndex(ctx, Pos{Generation: generation}); err != nil { return "", fmt.Errorf("initialize shadow wal: %w", err) } // Atomically write generation name as current generation. generationNamePath := db.GenerationNamePath() - mode := os.FileMode(0600) - if db.fileInfo != nil { - mode = db.fileInfo.Mode() - } - if err := ioutil.WriteFile(generationNamePath+".tmp", []byte(generation+"\n"), mode); err != nil { + if err := os.WriteFile(generationNamePath+".tmp", []byte(generation+"\n"), db.fileMode); err != nil { return "", fmt.Errorf("write generation temp file: %w", err) } - uid, gid := internal.Fileinfo(db.fileInfo) - _ = os.Chown(generationNamePath+".tmp", uid, gid) + _ = os.Chown(generationNamePath+".tmp", db.uid, db.gid) if err := os.Rename(generationNamePath+".tmp", generationNamePath); err != nil { return "", fmt.Errorf("rename generation file: %w", err) } // Remove old generations. - if err := db.clean(); err != nil { + if err := db.clean(db.ctx); err != nil { return "", err } @@ -693,18 +838,46 @@ func (db *DB) createGeneration() (string, error) { } // Sync copies pending data from the WAL to the shadow WAL. -func (db *DB) Sync(ctx context.Context) (err error) { - db.mu.Lock() - defer db.mu.Unlock() +func (db *DB) Sync(ctx context.Context) error { + const retryN = 5 + + for i := 0; i < retryN; i++ { + if err := func() error { + db.mu.Lock() + defer db.mu.Unlock() + return db.sync(ctx) + }(); err != nil { + db.Logger.Printf("sync error, retrying: %s", err) + } else { + break + } + } + return nil + +} +func (db *DB) sync(ctx context.Context) (err error) { // Initialize database, if necessary. Exit if no DB exists. if err := db.init(); err != nil { return err } else if db.db == nil { - Tracef("%s: sync: no database found", db.path) return nil } + // Ensure the cached position exists. + if db.pos.IsZero() { + if err := db.invalidate(ctx); err != nil { + return fmt.Errorf("invalidate: %w", err) + } + } + + // If sync fails, reset position & cache. + defer func() { + if err != nil { + db.reset() + } + }() + // Track total sync metrics. t := time.Now() defer func() { @@ -727,72 +900,75 @@ func (db *DB) Sync(ctx context.Context) (err error) { if err != nil { return fmt.Errorf("cannot verify wal state: %w", err) } - Tracef("%s: sync: info=%#v", db.path, info) - - // Track if anything in the shadow WAL changes and then notify at the end. - changed := info.walSize != info.shadowWALSize || info.restart || info.reason != "" // If we are unable to verify the WAL state then we start a new generation. if info.reason != "" { // Start new generation & notify user via log message. - if info.generation, err = db.createGeneration(); err != nil { + if info.generation, err = db.createGeneration(ctx); err != nil { return fmt.Errorf("create generation: %w", err) } - log.Printf("%s: sync: new generation %q, %s", db.path, info.generation, info.reason) + db.Logger.Printf("sync: new generation %q, %s", info.generation, info.reason) // Clear shadow wal info. - info.shadowWALPath = db.ShadowWALPath(info.generation, 0) - info.shadowWALSize = WALHeaderSize info.restart = false info.reason = "" - } // Synchronize real WAL with current shadow WAL. - newWALSize, err := db.syncWAL(info) - if err != nil { - return fmt.Errorf("sync wal: %w", err) + if err := db.copyToShadowWAL(ctx); err != nil { + return fmt.Errorf("cannot copy to shadow wal: %w", err) + } + + // If we are at the end of the WAL file, start a new index. + if info.restart { + // Move to beginning of next index. + pos := db.pos.Truncate() + pos.Index++ + + // Attempt to restart WAL from beginning of new index. + // Position is only committed to cache if successful. + if err := db.initShadowWALIndex(ctx, pos); err != nil { + return fmt.Errorf("cannot init shadow wal: pos=%s err=%w", pos, err) + } } // If WAL size is great than max threshold, force checkpoint. // If WAL size is greater than min threshold, attempt checkpoint. var checkpoint bool checkpointMode := CheckpointModePassive - if db.MaxCheckpointPageN > 0 && newWALSize >= calcWALSize(db.pageSize, db.MaxCheckpointPageN) { + if db.MaxCheckpointPageN > 0 && db.pos.Offset >= calcWALSize(db.pageSize, db.MaxCheckpointPageN) { checkpoint, checkpointMode = true, CheckpointModeRestart - } else if newWALSize >= calcWALSize(db.pageSize, db.MinCheckpointPageN) { + } else if db.pos.Offset >= calcWALSize(db.pageSize, db.MinCheckpointPageN) { checkpoint = true - } else if db.CheckpointInterval > 0 && !info.dbModTime.IsZero() && time.Since(info.dbModTime) > db.CheckpointInterval && newWALSize > calcWALSize(db.pageSize, 1) { + } else if db.CheckpointInterval > 0 && !info.dbModTime.IsZero() && time.Since(info.dbModTime) > db.CheckpointInterval && db.pos.Offset > calcWALSize(db.pageSize, 1) { checkpoint = true } // Issue the checkpoint. if checkpoint { - changed = true + // Under rare circumstances, a checkpoint can be unable to verify continuity + // and will require a restart. + if err := db.checkpoint(ctx, info.generation, checkpointMode); errors.Is(err, errRestartGeneration) { + generation, err := db.createGeneration(ctx) + if err != nil { + return fmt.Errorf("create generation: %w", err) + } + db.Logger.Printf("sync: new generation %q, possible WAL overrun occurred", generation) - if err := db.checkpoint(ctx, info.generation, checkpointMode); err != nil { + } else if err != nil { return fmt.Errorf("checkpoint: mode=%v err=%w", checkpointMode, err) } } // Clean up any old files. - if err := db.clean(); err != nil { + if err := db.clean(ctx); err != nil { return fmt.Errorf("cannot clean: %w", err) } // Compute current index and total shadow WAL size. // This is only for metrics so we ignore any errors that occur. - index, size, _ := db.CurrentShadowWALIndex(info.generation) - db.shadowWALIndexGauge.Set(float64(index)) - db.shadowWALSizeGauge.Set(float64(size)) - - // Notify replicas of WAL changes. - if changed { - close(db.notify) - db.notify = make(chan struct{}) - } - - Tracef("%s: sync: ok", db.path) + db.shadowWALIndexGauge.Set(float64(db.pos.Index)) + db.shadowWALSizeGauge.Set(float64(db.pos.Offset)) return nil } @@ -836,67 +1012,36 @@ func (db *DB) verify() (info syncInfo, err error) { if err != nil { return info, err } - info.walSize = frameAlign(fi.Size(), db.pageSize) + walSize := fi.Size() info.walModTime = fi.ModTime() - db.walSizeGauge.Set(float64(fi.Size())) + db.walSizeGauge.Set(float64(walSize)) - // Open shadow WAL to copy append to. - index, _, err := db.CurrentShadowWALIndex(info.generation) - if err != nil { - return info, fmt.Errorf("cannot determine shadow WAL index: %w", err) - } else if index >= MaxIndex { + // Verify the index is not out of bounds. + if db.pos.Index >= MaxIndex { info.reason = "max index exceeded" return info, nil } - info.shadowWALPath = db.ShadowWALPath(generation, index) - // Determine shadow WAL current size. - fi, err = os.Stat(info.shadowWALPath) - if os.IsNotExist(err) { - info.reason = "no shadow wal" - return info, nil - } else if err != nil { - return info, err - } - info.shadowWALSize = frameAlign(fi.Size(), db.pageSize) - - // Exit if shadow WAL does not contain a full header. - if info.shadowWALSize < WALHeaderSize { - info.reason = "short shadow wal" - return info, nil - } - - // If shadow WAL is larger than real WAL then the WAL has been truncated - // so we cannot determine our last state. - if info.shadowWALSize > info.walSize { + // If shadow WAL position is larger than real WAL then the WAL has been + // truncated so we cannot determine our last state. + if db.pos.Offset > walSize { info.reason = "wal truncated by another process" return info, nil } // Compare WAL headers. Start a new shadow WAL if they are mismatched. - if hdr0, err := readWALHeader(db.WALPath()); err != nil { + if hdr, err := readWALHeader(db.WALPath()); err != nil { return info, fmt.Errorf("cannot read wal header: %w", err) - } else if hdr1, err := readWALHeader(info.shadowWALPath); err != nil { - return info, fmt.Errorf("cannot read shadow wal header: %w", err) - } else if !bytes.Equal(hdr0, hdr1) { - info.restart = !bytes.Equal(hdr0, hdr1) - } - - // If we only have a header then ensure header matches. - // Otherwise we need to start a new generation. - if info.shadowWALSize == WALHeaderSize && info.restart { - info.reason = "wal header only, mismatched" - return info, nil + } else if !bytes.Equal(hdr, db.hdr) { + info.restart = true } - // Verify last page synced still matches. - if info.shadowWALSize > WALHeaderSize { - offset := info.shadowWALSize - int64(db.pageSize+WALFrameHeaderSize) - if buf0, err := readWALFileAt(db.WALPath(), offset, int64(db.pageSize+WALFrameHeaderSize)); err != nil { + // Verify last frame synced still matches. + if db.pos.Offset > WALHeaderSize { + offset := db.pos.Offset - int64(db.pageSize+WALFrameHeaderSize) + if frame, err := readWALFileAt(db.WALPath(), offset, int64(db.pageSize+WALFrameHeaderSize)); err != nil { return info, fmt.Errorf("cannot read last synced wal page: %w", err) - } else if buf1, err := readWALFileAt(info.shadowWALPath, offset, int64(db.pageSize+WALFrameHeaderSize)); err != nil { - return info, fmt.Errorf("cannot read last synced shadow wal page: %w", err) - } else if !bytes.Equal(buf0, buf1) { + } else if !bytes.Equal(frame, db.frame) { info.reason = "wal overwritten by another process" return info, nil } @@ -906,303 +1051,302 @@ func (db *DB) verify() (info syncInfo, err error) { } type syncInfo struct { - generation string // generation name - dbModTime time.Time // last modified date of real DB file - walSize int64 // size of real WAL file - walModTime time.Time // last modified date of real WAL file - shadowWALPath string // name of last shadow WAL file - shadowWALSize int64 // size of last shadow WAL file - restart bool // if true, real WAL header does not match shadow WAL - reason string // if non-blank, reason for sync failure + generation string // generation name + dbModTime time.Time // last modified date of real DB file + walModTime time.Time // last modified date of real WAL file + restart bool // if true, real WAL header does not match shadow WAL + reason string // if non-blank, reason for sync failure } -// syncWAL copies pending bytes from the real WAL to the shadow WAL. -func (db *DB) syncWAL(info syncInfo) (newSize int64, err error) { - // Copy WAL starting from end of shadow WAL. Exit if no new shadow WAL needed. - newSize, err = db.copyToShadowWAL(info.shadowWALPath) - if err != nil { - return newSize, fmt.Errorf("cannot copy to shadow wal: %w", err) - } else if !info.restart { - return newSize, nil // If no restart required, exit. - } - - // Parse index of current shadow WAL file. - dir, base := filepath.Split(info.shadowWALPath) - index, err := ParseWALPath(base) - if err != nil { - return 0, fmt.Errorf("cannot parse shadow wal filename: %s", base) - } - - // Start a new shadow WAL file with next index. - newShadowWALPath := filepath.Join(dir, FormatWALPath(index+1)) - newSize, err = db.initShadowWALFile(newShadowWALPath) - if err != nil { - return 0, fmt.Errorf("cannot init shadow wal file: name=%s err=%w", newShadowWALPath, err) - } - return newSize, nil -} +func (db *DB) initShadowWALIndex(ctx context.Context, pos Pos) error { + assert(pos.Offset == 0, "must init shadow wal index with zero offset") -func (db *DB) initShadowWALFile(filename string) (int64, error) { hdr, err := readWALHeader(db.WALPath()) if err != nil { - return 0, fmt.Errorf("read header: %w", err) + return fmt.Errorf("read header: %w", err) } // Determine byte order for checksumming from header magic. - bo, err := headerByteOrder(hdr) + byteOrder, err := headerByteOrder(hdr) if err != nil { - return 0, err + return err } // Verify checksum. - s0 := binary.BigEndian.Uint32(hdr[24:]) - s1 := binary.BigEndian.Uint32(hdr[28:]) - if v0, v1 := Checksum(bo, 0, 0, hdr[:24]); v0 != s0 || v1 != s1 { - return 0, fmt.Errorf("invalid header checksum: (%x,%x) != (%x,%x)", v0, v1, s0, s1) + chksum0 := binary.BigEndian.Uint32(hdr[24:]) + chksum1 := binary.BigEndian.Uint32(hdr[28:]) + if v0, v1 := Checksum(byteOrder, 0, 0, hdr[:24]); v0 != chksum0 || v1 != chksum1 { + return fmt.Errorf("invalid header checksum: (%x,%x) != (%x,%x)", v0, v1, chksum0, chksum1) } - // Write header to new WAL shadow file. - mode := os.FileMode(0600) - if fi := db.fileInfo; fi != nil { - mode = fi.Mode() + // Compress header to LZ4. + var buf bytes.Buffer + zw := lz4.NewWriter(&buf) + if _, err := zw.Write(hdr); err != nil { + return err + } else if err := zw.Close(); err != nil { + return err } - if err := internal.MkdirAll(filepath.Dir(filename), db.dirInfo); err != nil { - return 0, err - } else if err := ioutil.WriteFile(filename, hdr, mode); err != nil { - return 0, err + + // Write header segment to shadow WAL & update position. + if err := db.writeWALSegment(ctx, pos, &buf); err != nil { + return fmt.Errorf("write shadow wal header: %w", err) } - uid, gid := internal.Fileinfo(db.fileInfo) - _ = os.Chown(filename, uid, gid) + pos.Offset += int64(len(hdr)) + db.pos = pos + + // Save header, salt & checksum to cache. + db.hdr = hdr + db.salt0 = binary.BigEndian.Uint32(hdr[16:]) + db.salt1 = binary.BigEndian.Uint32(hdr[20:]) + db.chksum0, db.chksum1 = chksum0, chksum1 + db.byteOrder = byteOrder // Copy as much shadow WAL as available. - newSize, err := db.copyToShadowWAL(filename) - if err != nil { - return 0, fmt.Errorf("cannot copy to new shadow wal: %w", err) + if err := db.copyToShadowWAL(ctx); err != nil { + return fmt.Errorf("cannot copy to new shadow wal: %w", err) } - return newSize, nil + return nil } -func (db *DB) copyToShadowWAL(filename string) (newSize int64, err error) { - Tracef("%s: copy-shadow: %s", db.path, filename) +func (db *DB) copyToShadowWAL(ctx context.Context) error { + pos := db.pos + assert(!pos.IsZero(), "zero pos for wal copy") r, err := os.Open(db.WALPath()) if err != nil { - return 0, err + return err } defer r.Close() - w, err := os.OpenFile(filename, os.O_RDWR, 0666) - if err != nil { - return 0, err - } - defer w.Close() - - fi, err := w.Stat() - if err != nil { - return 0, err - } - origSize := frameAlign(fi.Size(), db.pageSize) - - // Read shadow WAL header to determine byte order for checksum & salt. - hdr := make([]byte, WALHeaderSize) - if _, err := io.ReadFull(w, hdr); err != nil { - return 0, fmt.Errorf("read header: %w", err) - } - hsalt0 := binary.BigEndian.Uint32(hdr[16:]) - hsalt1 := binary.BigEndian.Uint32(hdr[20:]) + // Write to a temporary WAL segment file. + tempFilename := filepath.Join(db.ShadowWALDir(pos.Generation), FormatIndex(pos.Index), FormatOffset(pos.Offset)+".wal.tmp") + defer os.Remove(tempFilename) - bo, err := headerByteOrder(hdr) + f, err := internal.CreateFile(tempFilename, db.fileMode, db.uid, db.gid) if err != nil { - return 0, err - } - - // Read previous checksum. - chksum0, chksum1, err := readLastChecksumFrom(w, db.pageSize) - if err != nil { - return 0, fmt.Errorf("last checksum: %w", err) + return err } + defer f.Close() // Seek to correct position on real wal. - if _, err := r.Seek(origSize, io.SeekStart); err != nil { - return 0, fmt.Errorf("real wal seek: %w", err) - } else if _, err := w.Seek(origSize, io.SeekStart); err != nil { - return 0, fmt.Errorf("shadow wal seek: %w", err) + if _, err := r.Seek(pos.Offset, io.SeekStart); err != nil { + return fmt.Errorf("real wal seek: %w", err) } - // Read through WAL from last position to find the page of the last - // committed transaction. + // The high water mark (HWM) tracks the position & checksum of the position + // of the last committed transaction frame. + hwm := struct { + pos Pos + chksum0 uint32 + chksum1 uint32 + frame []byte + }{db.pos, db.chksum0, db.chksum1, make([]byte, db.pageSize+WALFrameHeaderSize)} + + // Copy from last position in real WAL to the last committed transaction. frame := make([]byte, db.pageSize+WALFrameHeaderSize) - var buf bytes.Buffer - offset := origSize - lastCommitSize := origSize + chksum0, chksum1 := db.chksum0, db.chksum1 for { // Read next page from WAL file. if _, err := io.ReadFull(r, frame); err == io.EOF || err == io.ErrUnexpectedEOF { - Tracef("%s: copy-shadow: break %s @ %d; err=%s", db.path, filename, offset, err) break // end of file or partial page } else if err != nil { - return 0, fmt.Errorf("read wal: %w", err) + return fmt.Errorf("read wal: %w", err) } // Read frame salt & compare to header salt. Stop reading on mismatch. salt0 := binary.BigEndian.Uint32(frame[8:]) salt1 := binary.BigEndian.Uint32(frame[12:]) - if salt0 != hsalt0 || salt1 != hsalt1 { - Tracef("%s: copy-shadow: break: salt mismatch", db.path) + if salt0 != db.salt0 || salt1 != db.salt1 { break } // Verify checksum of page is valid. fchksum0 := binary.BigEndian.Uint32(frame[16:]) fchksum1 := binary.BigEndian.Uint32(frame[20:]) - chksum0, chksum1 = Checksum(bo, chksum0, chksum1, frame[:8]) // frame header - chksum0, chksum1 = Checksum(bo, chksum0, chksum1, frame[24:]) // frame data + chksum0, chksum1 = Checksum(db.byteOrder, chksum0, chksum1, frame[:8]) // frame header + chksum0, chksum1 = Checksum(db.byteOrder, chksum0, chksum1, frame[24:]) // frame data if chksum0 != fchksum0 || chksum1 != fchksum1 { - Tracef("%s: copy shadow: checksum mismatch, skipping: offset=%d (%x,%x) != (%x,%x)", db.path, offset, chksum0, chksum1, fchksum0, fchksum1) break } // Add page to the new size of the shadow WAL. - buf.Write(frame) + if _, err := f.Write(frame); err != nil { + return fmt.Errorf("write temp shadow wal segment: %w", err) + } - Tracef("%s: copy-shadow: ok %s offset=%d salt=%x %x", db.path, filename, offset, salt0, salt1) - offset += int64(len(frame)) + pos.Offset += int64(len(frame)) // Flush to shadow WAL if commit record. newDBSize := binary.BigEndian.Uint32(frame[4:]) if newDBSize != 0 { - if _, err := buf.WriteTo(w); err != nil { - return 0, fmt.Errorf("write shadow wal: %w", err) - } - buf.Reset() - lastCommitSize = offset + hwm.pos = pos + hwm.chksum0, hwm.chksum1 = chksum0, chksum1 + copy(hwm.frame, frame) } } - // Sync & close. - if err := w.Sync(); err != nil { - return 0, err - } else if err := w.Close(); err != nil { - return 0, err + // If no WAL writes found, exit. + if db.pos == hwm.pos { + return nil + } + + walByteN := hwm.pos.Offset - db.pos.Offset + + // Move to beginning of temporary file. + if _, err := f.Seek(0, io.SeekStart); err != nil { + return fmt.Errorf("temp file seek: %w", err) + } + + // Copy temporary file to a pipe while compressing the data. + // Only read up to the number of bytes from the original position to the HWM. + pr, pw := io.Pipe() + go func() { + zw := lz4.NewWriter(pw) + if _, err := io.Copy(zw, &io.LimitedReader{R: f, N: walByteN}); err != nil { + _ = pw.CloseWithError(err) + } else if err := zw.Close(); err != nil { + _ = pw.CloseWithError(err) + } + _ = pw.Close() + }() + + // Write a new, compressed segment via pipe. + if err := db.writeWALSegment(ctx, db.pos, pr); err != nil { + return fmt.Errorf("write wal segment: pos=%s err=%w", db.pos, err) + } + + // Update the position & checksum on success. + db.pos = hwm.pos + db.chksum0, db.chksum1 = hwm.chksum0, hwm.chksum1 + db.frame = hwm.frame + + // Close & remove temporary file. + if err := f.Close(); err != nil { + return err + } else if err := os.Remove(tempFilename); err != nil { + return err } // Track total number of bytes written to WAL. - db.totalWALBytesCounter.Add(float64(lastCommitSize - origSize)) + db.totalWALBytesCounter.Add(float64(walByteN)) - return lastCommitSize, nil + return nil } -// ShadowWALReader opens a reader for a shadow WAL file at a given position. -// If the reader is at the end of the file, it attempts to return the next file. -// -// The caller should check Pos() & Size() on the returned reader to check offset. -func (db *DB) ShadowWALReader(pos Pos) (r *ShadowWALReader, err error) { - // Fetch reader for the requested position. Return if it has data. - r, err = db.shadowWALReader(pos) +// verifyLastShadowFrame re-reads the last frame read during the shadow copy. +// This ensures that the frame has not been overrun after a checkpoint occurs +// but before the new write lock has been obtained to initialize the new wal index. +func (db *DB) verifyLastShadowFrame(ctx context.Context) error { + // Skip if we don't have a previous frame to verify. + if db.frame == nil { + return nil + } + + r, err := os.Open(db.WALPath()) if err != nil { - return nil, err - } else if r.N() > 0 { - return r, nil - } else if err := r.Close(); err != nil { // no data, close, try next - return nil, err + return err } + defer r.Close() - // Otherwise attempt to read the start of the next WAL file. - pos.Index, pos.Offset = pos.Index+1, 0 + // Seek to position of where the last frame was read. + buf := make([]byte, len(db.frame)) + if _, err := r.Seek(db.pos.Offset-int64(len(db.frame)), io.SeekStart); err != nil { + return fmt.Errorf("seek to last frame: %w", err) + } else if _, err := io.ReadFull(r, buf); err != nil { + return fmt.Errorf("read last frame: %w", err) + } - r, err = db.shadowWALReader(pos) - if os.IsNotExist(err) { - return nil, io.EOF + // Return a marker error if frames do not match. + if !bytes.Equal(db.frame, buf) { + return errRestartGeneration } - return r, err + + return nil } -// shadowWALReader opens a file reader for a shadow WAL file at a given position. -func (db *DB) shadowWALReader(pos Pos) (r *ShadowWALReader, err error) { - filename := db.ShadowWALPath(pos.Generation, pos.Index) +// WALSegmentReader returns a reader for a section of WAL data at the given position. +// Returns os.ErrNotExist if no matching index/offset is found. +func (db *DB) WALSegmentReader(ctx context.Context, pos Pos) (io.ReadCloser, error) { + if pos.Generation == "" { + return nil, fmt.Errorf("generation required") + } + return os.Open(filepath.Join(db.ShadowWALDir(pos.Generation), FormatIndex(pos.Index), FormatOffset(pos.Offset)+".wal.lz4")) +} - f, err := os.Open(filename) - if err != nil { - return nil, err +// writeWALSegment writes LZ4 compressed data from rd into a file on disk. +func (db *DB) writeWALSegment(ctx context.Context, pos Pos, rd io.Reader) error { + if pos.Generation == "" { + return fmt.Errorf("generation required") } + filename := filepath.Join(db.ShadowWALDir(pos.Generation), FormatIndex(pos.Index), FormatOffset(pos.Offset)+".wal.lz4") - // Ensure file is closed if any error occurs. - defer func() { - if err != nil { - f.Close() - } - }() + // Ensure parent directory exists. + if err := internal.MkdirAll(filepath.Dir(filename), db.dirMode, db.uid, db.gid); err != nil { + return err + } - // Fetch frame-aligned file size and ensure requested offset is not past EOF. - fi, err := f.Stat() + // Write WAL segment to temporary file next to destination path. + f, err := internal.CreateFile(filename+".tmp", db.fileMode, db.uid, db.gid) if err != nil { - return nil, err + return err } + defer f.Close() - fileSize := frameAlign(fi.Size(), db.pageSize) - if pos.Offset > fileSize { - return nil, fmt.Errorf("wal reader offset too high: %d > %d", pos.Offset, fi.Size()) + if _, err := io.Copy(f, rd); err != nil { + return err + } else if err := f.Sync(); err != nil { + return err + } else if err := f.Close(); err != nil { + return err } - // Move file handle to offset position. - if _, err := f.Seek(pos.Offset, io.SeekStart); err != nil { - return nil, err + // Move WAL segment to final path when it has been written & synced to disk. + if err := os.Rename(filename+".tmp", filename); err != nil { + return err } - return &ShadowWALReader{ - f: f, - n: fileSize - pos.Offset, - pos: pos, - }, nil -} - -// frameAlign returns a frame-aligned offset. -// Returns zero if offset is less than the WAL header size. -func frameAlign(offset int64, pageSize int) int64 { - assert(offset >= 0, "frameAlign(): offset must be non-negative") - assert(pageSize >= 0, "frameAlign(): page size must be non-negative") - - if offset < WALHeaderSize { - return 0 + // Write position to file so other processes can read it. + if err := db.writePositionFile(pos); err != nil { + return fmt.Errorf("write position file: %w", err) } - frameSize := WALFrameHeaderSize + int64(pageSize) - frameN := (offset - WALHeaderSize) / frameSize - return (frameN * frameSize) + WALHeaderSize + return nil } -// ShadowWALReader represents a reader for a shadow WAL file that tracks WAL position. -type ShadowWALReader struct { - f *os.File - n int64 - pos Pos +// writePositionFile writes pos as the current position. +func (db *DB) writePositionFile(pos Pos) error { + return internal.WriteFile(db.PositionPath(), []byte(pos.String()+"\n"), db.fileMode, db.uid, db.gid) } -// Name returns the filename of the underlying file. -func (r *ShadowWALReader) Name() string { return r.f.Name() } +// WALSegments returns an iterator over all available WAL files for a generation. +func (db *DB) WALSegments(ctx context.Context, generation string) (*FileWALSegmentIterator, error) { + db.mu.Lock() + defer db.mu.Unlock() + return db.walSegments(ctx, generation) +} -// Close closes the underlying WAL file handle. -func (r *ShadowWALReader) Close() error { return r.f.Close() } +func (db *DB) walSegments(ctx context.Context, generation string) (*FileWALSegmentIterator, error) { + ents, err := os.ReadDir(db.ShadowWALDir(generation)) + if os.IsNotExist(err) { + return NewFileWALSegmentIterator(db.ShadowWALDir(generation), generation, nil), nil + } else if err != nil { + return nil, err + } -// N returns the remaining bytes in the reader. -func (r *ShadowWALReader) N() int64 { return r.n } + // Iterate over every file and convert to metadata. + indexes := make([]int, 0, len(ents)) + for _, ent := range ents { + index, err := ParseIndex(ent.Name()) + if err != nil { + continue + } + indexes = append(indexes, index) + } -// Pos returns the current WAL position. -func (r *ShadowWALReader) Pos() Pos { return r.pos } + sort.Ints(indexes) -// Read reads bytes into p, updates the position, and returns the bytes read. -// Returns io.EOF at the end of the available section of the WAL. -func (r *ShadowWALReader) Read(p []byte) (n int, err error) { - if r.n <= 0 { - return 0, io.EOF - } - if int64(len(p)) > r.n { - p = p[0:r.n] - } - n, err = r.f.Read(p) - r.n -= int64(n) - r.pos.Offset += int64(n) - return n, err + return NewFileWALSegmentIterator(db.ShadowWALDir(generation), generation, indexes), nil } // SQLite WAL constants @@ -1211,26 +1355,6 @@ const ( WALFrameHeaderChecksumOffset = 16 ) -func readLastChecksumFrom(f *os.File, pageSize int) (uint32, uint32, error) { - // Determine the byte offset of the checksum for the header (if no pages - // exist) or for the last page (if at least one page exists). - offset := int64(WALHeaderChecksumOffset) - if fi, err := f.Stat(); err != nil { - return 0, 0, err - } else if sz := frameAlign(fi.Size(), pageSize); fi.Size() > WALHeaderSize { - offset = sz - int64(pageSize) - WALFrameHeaderSize + WALFrameHeaderChecksumOffset - } - - // Read big endian checksum. - b := make([]byte, 8) - if n, err := f.ReadAt(b, offset); err != nil { - return 0, 0, err - } else if n != len(b) { - return 0, 0, io.ErrUnexpectedEOF - } - return binary.BigEndian.Uint32(b[0:]), binary.BigEndian.Uint32(b[4:]), nil -} - // Checkpoint performs a checkpoint on the WAL file. func (db *DB) Checkpoint(ctx context.Context, mode string) (err error) { db.mu.Lock() @@ -1246,11 +1370,6 @@ func (db *DB) Checkpoint(ctx context.Context, mode string) (err error) { // checkpointAndInit performs a checkpoint on the WAL file and initializes a // new shadow WAL file. func (db *DB) checkpoint(ctx context.Context, generation, mode string) error { - shadowWALPath, err := db.CurrentShadowWALPath(generation) - if err != nil { - return err - } - // Read WAL header before checkpoint to check if it has been restarted. hdr, err := readWALHeader(db.WALPath()) if err != nil { @@ -1258,7 +1377,7 @@ func (db *DB) checkpoint(ctx context.Context, generation, mode string) error { } // Copy shadow WAL before checkpoint to copy as much as possible. - if _, err := db.copyToShadowWAL(shadowWALPath); err != nil { + if err := db.copyToShadowWAL(ctx); err != nil { return fmt.Errorf("cannot copy to end of shadow wal before checkpoint: %w", err) } @@ -1292,21 +1411,25 @@ func (db *DB) checkpoint(ctx context.Context, generation, mode string) error { return fmt.Errorf("_litestream_lock: %w", err) } - // Copy the end of the previous WAL before starting a new shadow WAL. - if _, err := db.copyToShadowWAL(shadowWALPath); err != nil { - return fmt.Errorf("cannot copy to end of shadow wal: %w", err) + // Verify we can re-read the last frame copied to the shadow WAL. + // This ensures that another transaction has not overrun the WAL past where + // our previous copy was which would overwrite any additional unread + // frames between the checkpoint & the new write lock. + // + // This only occurs with high load and a short sync frequency so it is rare. + if err := db.verifyLastShadowFrame(ctx); err != nil { + return fmt.Errorf("cannot verify last frame copied from shadow wal: %w", err) } - // Parse index of current shadow WAL file. - index, err := ParseWALPath(shadowWALPath) - if err != nil { - return fmt.Errorf("cannot parse shadow wal filename: %s", shadowWALPath) + // Copy the end of the previous WAL before starting a new shadow WAL. + if err := db.copyToShadowWAL(ctx); err != nil { + return fmt.Errorf("cannot copy to end of shadow wal: %w", err) } // Start a new shadow WAL file with next index. - newShadowWALPath := filepath.Join(filepath.Dir(shadowWALPath), FormatWALPath(index+1)) - if _, err := db.initShadowWALFile(newShadowWALPath); err != nil { - return fmt.Errorf("cannot init shadow wal file: name=%s err=%w", newShadowWALPath, err) + pos := Pos{Generation: db.pos.Generation, Index: db.pos.Index + 1} + if err := db.initShadowWALIndex(ctx, pos); err != nil { + return fmt.Errorf("cannot init shadow wal file: pos=%s err=%w", pos, err) } // Release write lock before checkpointing & exiting. @@ -1352,78 +1475,68 @@ func (db *DB) execCheckpoint(mode string) (err error) { if err := db.db.QueryRow(rawsql).Scan(&row[0], &row[1], &row[2]); err != nil { return err } - Tracef("%s: checkpoint: mode=%v (%d,%d,%d)", db.path, mode, row[0], row[1], row[2]) + db.Logger.Printf("checkpoint(%s): [%d,%d,%d]", mode, row[0], row[1], row[2]) + + // Clear last read frame if we are truncating. + if mode == CheckpointModeTruncate { + db.frame = nil + } // Reacquire the read lock immediately after the checkpoint. if err := db.acquireReadLock(); err != nil { - return fmt.Errorf("release read lock: %w", err) + return fmt.Errorf("reacquire read lock: %w", err) } return nil } -// monitor runs in a separate goroutine and monitors the database & WAL. -func (db *DB) monitor() { - ticker := time.NewTicker(db.MonitorInterval) - defer ticker.Stop() +// monitor runs in a separate goroutine and monitors the local database & WAL. +func (db *DB) monitor(ctx context.Context) error { + var timer *time.Timer + if db.MonitorDelayInterval > 0 { + timer = time.NewTimer(db.MonitorDelayInterval) + defer timer.Stop() + } for { - // Wait for ticker or context close. + // Wait for a file change notification from the file system. select { - case <-db.ctx.Done(): - return - case <-ticker.C: + case <-ctx.Done(): + return nil + case <-db.notifyCh: } - // Sync the database to the shadow WAL. - if err := db.Sync(db.ctx); err != nil && !errors.Is(err, context.Canceled) { - log.Printf("%s: sync error: %s", db.path, err) - } - } -} - -// CalcRestoreTarget returns a replica & generation to restore from based on opt criteria. -func (db *DB) CalcRestoreTarget(ctx context.Context, opt RestoreOptions) (*Replica, string, error) { - var target struct { - replica *Replica - generation string - updatedAt time.Time - } - - for _, r := range db.Replicas { - // Skip replica if it does not match filter. - if opt.ReplicaName != "" && r.Name() != opt.ReplicaName { - continue + // Wait for small delay before processing changes. + if timer != nil { + timer.Reset(db.MonitorDelayInterval) + <-timer.C } - generation, updatedAt, err := r.CalcRestoreTarget(ctx, opt) - if err != nil { - return nil, "", err + // Clear any additional change notifications that occurred during delay. + select { + case <-db.notifyCh: + default: } - // Use the latest replica if we have multiple candidates. - if !updatedAt.After(target.updatedAt) { - continue + if err := db.Sync(ctx); err != nil && !errors.Is(err, context.Canceled) { + db.Logger.Printf("sync error: %s", err) } - - target.replica, target.generation, target.updatedAt = r, generation, updatedAt } - return target.replica, target.generation, nil } -// applyWAL performs a truncating checkpoint on the given database. -func applyWAL(ctx context.Context, index int, dbPath string) error { +// ApplyWAL performs a truncating checkpoint on the given database. +func ApplyWAL(ctx context.Context, dbPath, walPath string) error { // Copy WAL file from it's staging path to the correct "-wal" location. - if err := os.Rename(fmt.Sprintf("%s-%08x-wal", dbPath, index), dbPath+"-wal"); err != nil { + if err := os.Rename(walPath, dbPath+"-wal"); err != nil { return err } // Open SQLite database and force a truncating checkpoint. - d, err := sql.Open("sqlite3", dbPath) + d, err := sql.Open("litestream-sqlite3", dbPath) if err != nil { return err } - defer d.Close() + defer func() { _ = d.Close() }() var row [3]int if err := d.QueryRow(`PRAGMA wal_checkpoint(TRUNCATE);`).Scan(&row[0], &row[1], &row[2]); err != nil { @@ -1434,92 +1547,49 @@ func applyWAL(ctx context.Context, index int, dbPath string) error { return d.Close() } -// CRC64 returns a CRC-64 ISO checksum of the database and its current position. -// -// This function obtains a read lock so it prevents syncs from occurring until -// the operation is complete. The database will still be usable but it will be -// unable to checkpoint during this time. -// -// If dst is set, the database file is copied to that location before checksum. -func (db *DB) CRC64(ctx context.Context) (uint64, Pos, error) { - db.mu.Lock() - defer db.mu.Unlock() - - if err := db.init(); err != nil { - return 0, Pos{}, err - } else if db.db == nil { - return 0, Pos{}, os.ErrNotExist - } - - generation, err := db.CurrentGeneration() - if err != nil { - return 0, Pos{}, fmt.Errorf("cannot find current generation: %w", err) - } else if generation == "" { - return 0, Pos{}, fmt.Errorf("no current generation") - } - - // Force a RESTART checkpoint to ensure the database is at the start of the WAL. - if err := db.checkpoint(ctx, generation, CheckpointModeRestart); err != nil { - return 0, Pos{}, err - } +// ReadWALFields iterates over the header & frames in the WAL data in r. +// Returns salt, checksum, byte order & the last frame. WAL data must start +// from the beginning of the WAL header and must end on either the WAL header +// or at the end of a WAL frame. +func ReadWALFields(r io.Reader, pageSize int) (salt0, salt1, chksum0, chksum1 uint32, byteOrder binary.ByteOrder, hdr, frame []byte, err error) { + // Read header. + hdr = make([]byte, WALHeaderSize) + if _, err := io.ReadFull(r, hdr); err != nil { + return 0, 0, 0, 0, nil, nil, nil, fmt.Errorf("short wal header: %w", err) + } + + // Save salt, initial checksum, & byte order. + salt0 = binary.BigEndian.Uint32(hdr[16:]) + salt1 = binary.BigEndian.Uint32(hdr[20:]) + chksum0 = binary.BigEndian.Uint32(hdr[24:]) + chksum1 = binary.BigEndian.Uint32(hdr[28:]) + if byteOrder, err = headerByteOrder(hdr); err != nil { + return 0, 0, 0, 0, nil, nil, nil, err + } + + // Iterate over each page in the WAL and save the checksum. + frame = make([]byte, pageSize+WALFrameHeaderSize) + var hasFrame bool + for { + // Read next page from WAL file. + if n, err := io.ReadFull(r, frame); err == io.EOF { + break // end of WAL file + } else if err != nil { + return 0, 0, 0, 0, nil, nil, nil, fmt.Errorf("short wal frame (n=%d): %w", n, err) + } - // Obtain current position. Clear the offset since we are only reading the - // DB and not applying the current WAL. - pos, err := db.Pos() - if err != nil { - return 0, pos, err + // Update checksum on each successful frame. + hasFrame = true + chksum0 = binary.BigEndian.Uint32(frame[16:]) + chksum1 = binary.BigEndian.Uint32(frame[20:]) } - pos.Offset = 0 - // Seek to the beginning of the db file descriptor and checksum whole file. - h := crc64.New(crc64.MakeTable(crc64.ISO)) - if _, err := db.f.Seek(0, io.SeekStart); err != nil { - return 0, pos, err - } else if _, err := io.Copy(h, db.f); err != nil { - return 0, pos, err + // Clear frame if none were successfully read. + if !hasFrame { + frame = nil } - return h.Sum64(), pos, nil -} - -// DefaultRestoreParallelism is the default parallelism when downloading WAL files. -const DefaultRestoreParallelism = 8 - -// RestoreOptions represents options for DB.Restore(). -type RestoreOptions struct { - // Target path to restore into. - // If blank, the original DB path is used. - OutputPath string - // Specific replica to restore from. - // If blank, all replicas are considered. - ReplicaName string - - // Specific generation to restore from. - // If blank, all generations considered. - Generation string - - // Specific index to restore from. - // Set to math.MaxInt32 to ignore index. - Index int - - // Point-in-time to restore database. - // If zero, database restore to most recent state available. - Timestamp time.Time - - // Specifies how many WAL files are downloaded in parallel during restore. - Parallelism int - - // Logging settings. - Logger *log.Logger - Verbose bool -} - -// NewRestoreOptions returns a new instance of RestoreOptions with defaults. -func NewRestoreOptions() RestoreOptions { - return RestoreOptions{ - Index: math.MaxInt32, - Parallelism: DefaultRestoreParallelism, - } + return salt0, salt1, chksum0, chksum1, byteOrder, hdr, frame, nil } // Database metrics. @@ -1591,3 +1661,16 @@ func headerByteOrder(hdr []byte) (binary.ByteOrder, error) { return nil, fmt.Errorf("invalid wal header magic: %x", magic) } } + +// logPrefixPath returns the path to be used for logging. +// The path is reduced to its base if it appears to be a temporary test path. +func logPrefixPath(path string) string { + if strings.Contains(path, "TestCmd") { + return filepath.Base(path) + } + return path +} + +// A marker error to indicate that a restart checkpoint could not verify +// continuity between WAL indices and a new generation should be started. +var errRestartGeneration = errors.New("restart generation") diff --git a/db_bsd.go b/db_bsd.go new file mode 100644 index 00000000..9c0c6bba --- /dev/null +++ b/db_bsd.go @@ -0,0 +1,21 @@ +//go:build !linux + +package litestream + +import ( + "io" + "os" +) + +// WithFile executes fn with a file handle for the main database file. +// On Linux, this is a unique file handle for each call. On non-Linux +// systems, the file handle is shared because of lock semantics. +func (db *DB) WithFile(fn func(f *os.File) error) error { + db.mu.Lock() + defer db.mu.Unlock() + + if _, err := db.f.Seek(0, io.SeekStart); err != nil { + return err + } + return fn(db.f) +} diff --git a/db_linux.go b/db_linux.go new file mode 100644 index 00000000..b6691090 --- /dev/null +++ b/db_linux.go @@ -0,0 +1,18 @@ +//go:build linux + +package litestream + +import "os" + +// WithFile executes fn with a file handle for the main database file. +// On Linux, this is a unique file handle for each call. On non-Linux +// systems, the file handle is shared because of lock semantics. +func (db *DB) WithFile(fn func(f *os.File) error) error { + f, err := os.Open(db.path) + if err != nil { + return err + } + defer f.Close() + + return fn(f) +} diff --git a/db_test.go b/db_test.go index b7eb54b0..6f54ea8d 100644 --- a/db_test.go +++ b/db_test.go @@ -1,9 +1,10 @@ package litestream_test import ( + "bytes" "context" "database/sql" - "io/ioutil" + "encoding/binary" "os" "path/filepath" "strings" @@ -30,13 +31,13 @@ func TestDB_WALPath(t *testing.T) { func TestDB_MetaPath(t *testing.T) { t.Run("Absolute", func(t *testing.T) { db := litestream.NewDB("/tmp/db") - if got, want := db.MetaPath(), `/tmp/.db-litestream`; got != want { + if got, want := db.MetaPath(), `/tmp/db-litestream`; got != want { t.Fatalf("MetaPath()=%v, want %v", got, want) } }) t.Run("Relative", func(t *testing.T) { db := litestream.NewDB("db") - if got, want := db.MetaPath(), `.db-litestream`; got != want { + if got, want := db.MetaPath(), `db-litestream`; got != want { t.Fatalf("MetaPath()=%v, want %v", got, want) } }) @@ -44,32 +45,25 @@ func TestDB_MetaPath(t *testing.T) { func TestDB_GenerationNamePath(t *testing.T) { db := litestream.NewDB("/tmp/db") - if got, want := db.GenerationNamePath(), `/tmp/.db-litestream/generation`; got != want { + if got, want := db.GenerationNamePath(), `/tmp/db-litestream/generation`; got != want { t.Fatalf("GenerationNamePath()=%v, want %v", got, want) } } func TestDB_GenerationPath(t *testing.T) { db := litestream.NewDB("/tmp/db") - if got, want := db.GenerationPath("xxxx"), `/tmp/.db-litestream/generations/xxxx`; got != want { + if got, want := db.GenerationPath("xxxx"), `/tmp/db-litestream/generations/xxxx`; got != want { t.Fatalf("GenerationPath()=%v, want %v", got, want) } } func TestDB_ShadowWALDir(t *testing.T) { db := litestream.NewDB("/tmp/db") - if got, want := db.ShadowWALDir("xxxx"), `/tmp/.db-litestream/generations/xxxx/wal`; got != want { + if got, want := db.ShadowWALDir("xxxx"), `/tmp/db-litestream/generations/xxxx/wal`; got != want { t.Fatalf("ShadowWALDir()=%v, want %v", got, want) } } -func TestDB_ShadowWALPath(t *testing.T) { - db := litestream.NewDB("/tmp/db") - if got, want := db.ShadowWALPath("xxxx", 1000), `/tmp/.db-litestream/generations/xxxx/wal/000003e8.wal`; got != want { - t.Fatalf("ShadowWALPath()=%v, want %v", got, want) - } -} - // Ensure we can check the last modified time of the real database and its WAL. func TestDB_UpdatedAt(t *testing.T) { t.Run("ErrNotExist", func(t *testing.T) { @@ -118,51 +112,6 @@ func TestDB_UpdatedAt(t *testing.T) { }) } -// Ensure we can compute a checksum on the real database. -func TestDB_CRC64(t *testing.T) { - t.Run("ErrNotExist", func(t *testing.T) { - db := MustOpenDB(t) - defer MustCloseDB(t, db) - if _, _, err := db.CRC64(context.Background()); !os.IsNotExist(err) { - t.Fatalf("unexpected error: %#v", err) - } - }) - - t.Run("DB", func(t *testing.T) { - db, sqldb := MustOpenDBs(t) - defer MustCloseDBs(t, db, sqldb) - - if err := db.Sync(context.Background()); err != nil { - t.Fatal(err) - } - - chksum0, _, err := db.CRC64(context.Background()) - if err != nil { - t.Fatal(err) - } - - // Issue change that is applied to the WAL. Checksum should not change. - if _, err := sqldb.Exec(`CREATE TABLE t (id INT);`); err != nil { - t.Fatal(err) - } else if chksum1, _, err := db.CRC64(context.Background()); err != nil { - t.Fatal(err) - } else if chksum0 == chksum1 { - t.Fatal("expected different checksum event after WAL change") - } - - // Checkpoint change into database. Checksum should change. - if err := db.Checkpoint(context.Background(), litestream.CheckpointModeTruncate); err != nil { - t.Fatal(err) - } - - if chksum2, _, err := db.CRC64(context.Background()); err != nil { - t.Fatal(err) - } else if chksum0 == chksum2 { - t.Fatal("expected different checksums after checkpoint") - } - }) -} - // Ensure we can sync the real WAL to the shadow WAL. func TestDB_Sync(t *testing.T) { // Ensure sync is skipped if no database exists. @@ -195,9 +144,7 @@ func TestDB_Sync(t *testing.T) { } // Ensure position now available. - if pos, err := db.Pos(); err != nil { - t.Fatal(err) - } else if pos.Generation == "" { + if pos := db.Pos(); pos.Generation == "" { t.Fatal("expected generation") } else if got, want := pos.Index, 0; got != want { t.Fatalf("pos.Index=%v, want %v", got, want) @@ -221,10 +168,7 @@ func TestDB_Sync(t *testing.T) { t.Fatal(err) } - pos0, err := db.Pos() - if err != nil { - t.Fatal(err) - } + pos0 := db.Pos() // Insert into table. if _, err := sqldb.Exec(`INSERT INTO foo (bar) VALUES ('baz');`); err != nil { @@ -234,9 +178,7 @@ func TestDB_Sync(t *testing.T) { // Sync to ensure position moves forward one page. if err := db.Sync(context.Background()); err != nil { t.Fatal(err) - } else if pos1, err := db.Pos(); err != nil { - t.Fatal(err) - } else if pos0.Generation != pos1.Generation { + } else if pos1 := db.Pos(); pos0.Generation != pos1.Generation { t.Fatal("expected the same generation") } else if got, want := pos1.Index, pos0.Index; got != want { t.Fatalf("Index=%v, want %v", got, want) @@ -256,10 +198,7 @@ func TestDB_Sync(t *testing.T) { } // Obtain initial position. - pos0, err := db.Pos() - if err != nil { - t.Fatal(err) - } + pos0 := db.Pos() // Checkpoint & fully close which should close WAL file. if err := db.Checkpoint(context.Background(), litestream.CheckpointModeTruncate); err != nil { @@ -270,8 +209,8 @@ func TestDB_Sync(t *testing.T) { t.Fatal(err) } - // Verify WAL does not exist. - if _, err := os.Stat(db.WALPath()); !os.IsNotExist(err) { + // Remove WAL file. + if err := os.Remove(db.WALPath()); err != nil { t.Fatal(err) } @@ -285,9 +224,7 @@ func TestDB_Sync(t *testing.T) { } // Obtain initial position. - if pos1, err := db.Pos(); err != nil { - t.Fatal(err) - } else if pos0.Generation == pos1.Generation { + if pos1 := db.Pos(); pos0.Generation == pos1.Generation { t.Fatal("expected new generation after truncation") } }) @@ -308,10 +245,7 @@ func TestDB_Sync(t *testing.T) { } // Obtain initial position. - pos0, err := db.Pos() - if err != nil { - t.Fatal(err) - } + pos0 := db.Pos() // Fully close which should close WAL file. if err := db.Close(); err != nil { @@ -344,190 +278,98 @@ func TestDB_Sync(t *testing.T) { } // Obtain initial position. - if pos1, err := db.Pos(); err != nil { - t.Fatal(err) - } else if pos0.Generation == pos1.Generation { + if pos1 := db.Pos(); pos0.Generation == pos1.Generation { t.Fatal("expected new generation after truncation") } }) - // Ensure DB can handle a mismatched header-only and start new generation. - t.Run("WALHeaderMismatch", func(t *testing.T) { - db, sqldb := MustOpenDBs(t) - defer MustCloseDBs(t, db, sqldb) + // TODO: Fix test to check for header mismatch + /* + // Ensure DB can handle a mismatched header-only and start new generation. + t.Run("WALHeaderMismatch", func(t *testing.T) { + db, sqldb := MustOpenDBs(t) + defer MustCloseDBs(t, db, sqldb) - // Execute a query to force a write to the WAL and then sync. - if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil { - t.Fatal(err) - } else if err := db.Sync(context.Background()); err != nil { - t.Fatal(err) - } - - // Grab initial position & close. - pos0, err := db.Pos() - if err != nil { - t.Fatal(err) - } else if err := db.Close(); err != nil { - t.Fatal(err) - } - - // Read existing file, update header checksum, and write back only header - // to simulate a header with a mismatched checksum. - shadowWALPath := db.ShadowWALPath(pos0.Generation, pos0.Index) - if buf, err := ioutil.ReadFile(shadowWALPath); err != nil { - t.Fatal(err) - } else if err := ioutil.WriteFile(shadowWALPath, append(buf[:litestream.WALHeaderSize-8], 0, 0, 0, 0, 0, 0, 0, 0), 0600); err != nil { - t.Fatal(err) - } - - // Reopen managed database & ensure sync will still work. - db = MustOpenDBAt(t, db.Path()) - defer MustCloseDB(t, db) - if err := db.Sync(context.Background()); err != nil { - t.Fatal(err) - } - - // Verify a new generation was started. - if pos1, err := db.Pos(); err != nil { - t.Fatal(err) - } else if pos0.Generation == pos1.Generation { - t.Fatal("expected new generation") - } - }) - - // Ensure DB can handle partial shadow WAL header write. - t.Run("PartialShadowWALHeader", func(t *testing.T) { - db, sqldb := MustOpenDBs(t) - defer MustCloseDBs(t, db, sqldb) - - // Execute a query to force a write to the WAL and then sync. - if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil { - t.Fatal(err) - } else if err := db.Sync(context.Background()); err != nil { - t.Fatal(err) - } - - pos0, err := db.Pos() - if err != nil { - t.Fatal(err) - } - - // Close & truncate shadow WAL to simulate a partial header write. - if err := db.Close(); err != nil { - t.Fatal(err) - } else if err := os.Truncate(db.ShadowWALPath(pos0.Generation, pos0.Index), litestream.WALHeaderSize-1); err != nil { - t.Fatal(err) - } - - // Reopen managed database & ensure sync will still work. - db = MustOpenDBAt(t, db.Path()) - defer MustCloseDB(t, db) - if err := db.Sync(context.Background()); err != nil { - t.Fatal(err) - } - - // Verify a new generation was started. - if pos1, err := db.Pos(); err != nil { - t.Fatal(err) - } else if pos0.Generation == pos1.Generation { - t.Fatal("expected new generation") - } - }) - - // Ensure DB can handle partial shadow WAL writes. - t.Run("PartialShadowWALFrame", func(t *testing.T) { - db, sqldb := MustOpenDBs(t) - defer MustCloseDBs(t, db, sqldb) - - // Execute a query to force a write to the WAL and then sync. - if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil { - t.Fatal(err) - } else if err := db.Sync(context.Background()); err != nil { - t.Fatal(err) - } - - pos0, err := db.Pos() - if err != nil { - t.Fatal(err) - } - - // Obtain current shadow WAL size. - fi, err := os.Stat(db.ShadowWALPath(pos0.Generation, pos0.Index)) - if err != nil { - t.Fatal(err) - } - - // Close & truncate shadow WAL to simulate a partial frame write. - if err := db.Close(); err != nil { - t.Fatal(err) - } else if err := os.Truncate(db.ShadowWALPath(pos0.Generation, pos0.Index), fi.Size()-1); err != nil { - t.Fatal(err) - } - - // Reopen managed database & ensure sync will still work. - db = MustOpenDBAt(t, db.Path()) - defer MustCloseDB(t, db) - if err := db.Sync(context.Background()); err != nil { - t.Fatal(err) - } + // Execute a query to force a write to the WAL and then sync. + if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil { + t.Fatal(err) + } else if err := db.Sync(context.Background()); err != nil { + t.Fatal(err) + } - // Verify same generation is kept. - if pos1, err := db.Pos(); err != nil { - t.Fatal(err) - } else if got, want := pos1, pos0; got != want { - t.Fatalf("Pos()=%s want %s", got, want) - } + // Grab initial position & close. + pos0 := db.Pos() + if err := db.Close(); err != nil { + t.Fatal(err) + } - // Ensure shadow WAL has recovered. - if fi0, err := os.Stat(db.ShadowWALPath(pos0.Generation, pos0.Index)); err != nil { - t.Fatal(err) - } else if got, want := fi0.Size(), fi.Size(); got != want { - t.Fatalf("Size()=%v, want %v", got, want) - } - }) + // Read existing file, update header checksum, and write back only header + // to simulate a header with a mismatched checksum. + shadowWALPath := db.ShadowWALPath(pos0.Generation, pos0.Index) + if buf, err := os.ReadFile(shadowWALPath); err != nil { + t.Fatal(err) + } else if err := os.WriteFile(shadowWALPath, append(buf[:litestream.WALHeaderSize-8], 0, 0, 0, 0, 0, 0, 0, 0), 0600); err != nil { + t.Fatal(err) + } - // Ensure DB can handle a generation directory with a missing shadow WAL. - t.Run("NoShadowWAL", func(t *testing.T) { - db, sqldb := MustOpenDBs(t) - defer MustCloseDBs(t, db, sqldb) + // Reopen managed database & ensure sync will still work. + db = MustOpenDBAt(t, db.Path()) + defer MustCloseDB(t, db) + if err := db.Sync(context.Background()); err != nil { + t.Fatal(err) + } - // Execute a query to force a write to the WAL and then sync. - if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil { - t.Fatal(err) - } else if err := db.Sync(context.Background()); err != nil { - t.Fatal(err) - } + // Verify a new generation was started. + if pos1, err := db.Pos(); err != nil { + t.Fatal(err) + } else if pos0.Generation == pos1.Generation { + t.Fatal("expected new generation") + } + }) + */ + + // TODO: Fix test for segmented shadow WAL. + /* + // Ensure DB can handle a generation directory with a missing shadow WAL. + t.Run("NoShadowWAL", func(t *testing.T) { + db, sqldb := MustOpenDBs(t) + defer MustCloseDBs(t, db, sqldb) + + // Execute a query to force a write to the WAL and then sync. + if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil { + t.Fatal(err) + } else if err := db.Sync(context.Background()); err != nil { + t.Fatal(err) + } - pos0, err := db.Pos() - if err != nil { - t.Fatal(err) - } + pos0 := db.Pos() - // Close & delete shadow WAL to simulate dir created but not WAL. - if err := db.Close(); err != nil { - t.Fatal(err) - } else if err := os.Remove(db.ShadowWALPath(pos0.Generation, pos0.Index)); err != nil { - t.Fatal(err) - } + // Close & delete shadow WAL to simulate dir created but not WAL. + if err := db.Close(); err != nil { + t.Fatal(err) + } else if err := os.Remove(db.ShadowWALPath(pos0.Generation, pos0.Index)); err != nil { + t.Fatal(err) + } - // Reopen managed database & ensure sync will still work. - db = MustOpenDBAt(t, db.Path()) - defer MustCloseDB(t, db) - if err := db.Sync(context.Background()); err != nil { - t.Fatal(err) - } + // Reopen managed database & ensure sync will still work. + db = MustOpenDBAt(t, db.Path()) + defer MustCloseDB(t, db) + if err := db.Sync(context.Background()); err != nil { + t.Fatal(err) + } - // Verify new generation created but index/offset the same. - if pos1, err := db.Pos(); err != nil { - t.Fatal(err) - } else if pos0.Generation == pos1.Generation { - t.Fatal("expected new generation") - } else if got, want := pos1.Index, pos0.Index; got != want { - t.Fatalf("Index=%v want %v", got, want) - } else if got, want := pos1.Offset, pos0.Offset; got != want { - t.Fatalf("Offset=%v want %v", got, want) - } - }) + // Verify new generation created but index/offset the same. + if pos1, err := db.Pos(); err != nil { + t.Fatal(err) + } else if pos0.Generation == pos1.Generation { + t.Fatal("expected new generation") + } else if got, want := pos1.Index, pos0.Index; got != want { + t.Fatalf("Index=%v want %v", got, want) + } else if got, want := pos1.Offset, pos0.Offset; got != want { + t.Fatalf("Offset=%v want %v", got, want) + } + }) + */ // Ensure DB checkpoints after minimum number of pages. t.Run("MinCheckpointPageN", func(t *testing.T) { @@ -554,9 +396,7 @@ func TestDB_Sync(t *testing.T) { } // Ensure position is now on the second index. - if pos, err := db.Pos(); err != nil { - t.Fatal(err) - } else if got, want := pos.Index, 1; got != want { + if got, want := db.Pos().Index, 1; got != want { t.Fatalf("Index=%v, want %v", got, want) } }) @@ -584,14 +424,73 @@ func TestDB_Sync(t *testing.T) { } // Ensure position is now on the second index. - if pos, err := db.Pos(); err != nil { - t.Fatal(err) - } else if got, want := pos.Index, 1; got != want { + if got, want := db.Pos().Index, 1; got != want { t.Fatalf("Index=%v, want %v", got, want) } }) } +func TestReadWALFields(t *testing.T) { + b, err := os.ReadFile("testdata/read-wal-fields/ok") + if err != nil { + t.Fatal(err) + } + + t.Run("OK", func(t *testing.T) { + if salt0, salt1, chksum0, chksum1, byteOrder, _, frame, err := litestream.ReadWALFields(bytes.NewReader(b), 4096); err != nil { + t.Fatal(err) + } else if got, want := salt0, uint32(0x4F7598FD); got != want { + t.Fatalf("salt0=%x, want %x", got, want) + } else if got, want := salt1, uint32(0x875FFD5B); got != want { + t.Fatalf("salt1=%x, want %x", got, want) + } else if got, want := chksum0, uint32(0x2081CAF7); got != want { + t.Fatalf("chksum0=%x, want %x", got, want) + } else if got, want := chksum1, uint32(0x31093CD3); got != want { + t.Fatalf("chksum1=%x, want %x", got, want) + } else if got, want := byteOrder, binary.LittleEndian; got != want { + t.Fatalf("chksum1=%x, want %x", got, want) + } else if !bytes.Equal(frame, b[8272:]) { + t.Fatal("last frame mismatch") + } + }) + + t.Run("HeaderOnly", func(t *testing.T) { + if salt0, salt1, chksum0, chksum1, byteOrder, _, frame, err := litestream.ReadWALFields(bytes.NewReader(b[:32]), 4096); err != nil { + t.Fatal(err) + } else if got, want := salt0, uint32(0x4F7598FD); got != want { + t.Fatalf("salt0=%x, want %x", got, want) + } else if got, want := salt1, uint32(0x875FFD5B); got != want { + t.Fatalf("salt1=%x, want %x", got, want) + } else if got, want := chksum0, uint32(0xD27F7862); got != want { + t.Fatalf("chksum0=%x, want %x", got, want) + } else if got, want := chksum1, uint32(0xE664AF8E); got != want { + t.Fatalf("chksum1=%x, want %x", got, want) + } else if got, want := byteOrder, binary.LittleEndian; got != want { + t.Fatalf("chksum1=%x, want %x", got, want) + } else if frame != nil { + t.Fatal("expected no frame") + } + }) + + t.Run("ErrShortHeader", func(t *testing.T) { + if _, _, _, _, _, _, _, err := litestream.ReadWALFields(bytes.NewReader([]byte{}), 4096); err == nil || err.Error() != `short wal header: EOF` { + t.Fatal(err) + } + }) + + t.Run("ErrBadMagic", func(t *testing.T) { + if _, _, _, _, _, _, _, err := litestream.ReadWALFields(bytes.NewReader(make([]byte, 32)), 4096); err == nil || err.Error() != `invalid wal header magic: 0` { + t.Fatal(err) + } + }) + + t.Run("ErrShortFrame", func(t *testing.T) { + if _, _, _, _, _, _, _, err := litestream.ReadWALFields(bytes.NewReader(b[:100]), 4096); err == nil || err.Error() != `short wal frame (n=68): unexpected EOF` { + t.Fatal(err) + } + }) +} + // MustOpenDBs returns a new instance of a DB & associated SQL DB. func MustOpenDBs(tb testing.TB) (*litestream.DB, *sql.DB) { tb.Helper() @@ -616,7 +515,6 @@ func MustOpenDB(tb testing.TB) *litestream.DB { func MustOpenDBAt(tb testing.TB, path string) *litestream.DB { tb.Helper() db := litestream.NewDB(path) - db.MonitorInterval = 0 // disable background goroutine if err := db.Open(); err != nil { tb.Fatal(err) } diff --git a/file/replica_client.go b/file/replica_client.go deleted file mode 100644 index 178797af..00000000 --- a/file/replica_client.go +++ /dev/null @@ -1,381 +0,0 @@ -package file - -import ( - "context" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "sort" - - "github.com/benbjohnson/litestream" - "github.com/benbjohnson/litestream/internal" -) - -// ReplicaClientType is the client type for this package. -const ReplicaClientType = "file" - -var _ litestream.ReplicaClient = (*ReplicaClient)(nil) - -// ReplicaClient is a client for writing snapshots & WAL segments to disk. -type ReplicaClient struct { - path string // destination path - - Replica *litestream.Replica -} - -// NewReplicaClient returns a new instance of ReplicaClient. -func NewReplicaClient(path string) *ReplicaClient { - return &ReplicaClient{ - path: path, - } -} - -// db returns the database, if available. -func (c *ReplicaClient) db() *litestream.DB { - if c.Replica == nil { - return nil - } - return c.Replica.DB() -} - -// Type returns "file" as the client type. -func (c *ReplicaClient) Type() string { - return ReplicaClientType -} - -// Path returns the destination path to replicate the database to. -func (c *ReplicaClient) Path() string { - return c.path -} - -// GenerationsDir returns the path to a generation root directory. -func (c *ReplicaClient) GenerationsDir() (string, error) { - if c.path == "" { - return "", fmt.Errorf("file replica path required") - } - return filepath.Join(c.path, "generations"), nil -} - -// GenerationDir returns the path to a generation's root directory. -func (c *ReplicaClient) GenerationDir(generation string) (string, error) { - dir, err := c.GenerationsDir() - if err != nil { - return "", err - } else if generation == "" { - return "", fmt.Errorf("generation required") - } - return filepath.Join(dir, generation), nil -} - -// SnapshotsDir returns the path to a generation's snapshot directory. -func (c *ReplicaClient) SnapshotsDir(generation string) (string, error) { - dir, err := c.GenerationDir(generation) - if err != nil { - return "", err - } - return filepath.Join(dir, "snapshots"), nil -} - -// SnapshotPath returns the path to an uncompressed snapshot file. -func (c *ReplicaClient) SnapshotPath(generation string, index int) (string, error) { - dir, err := c.SnapshotsDir(generation) - if err != nil { - return "", err - } - return filepath.Join(dir, litestream.FormatSnapshotPath(index)), nil -} - -// WALDir returns the path to a generation's WAL directory -func (c *ReplicaClient) WALDir(generation string) (string, error) { - dir, err := c.GenerationDir(generation) - if err != nil { - return "", err - } - return filepath.Join(dir, "wal"), nil -} - -// WALSegmentPath returns the path to a WAL segment file. -func (c *ReplicaClient) WALSegmentPath(generation string, index int, offset int64) (string, error) { - dir, err := c.WALDir(generation) - if err != nil { - return "", err - } - return filepath.Join(dir, litestream.FormatWALSegmentPath(index, offset)), nil -} - -// Generations returns a list of available generation names. -func (c *ReplicaClient) Generations(ctx context.Context) ([]string, error) { - root, err := c.GenerationsDir() - if err != nil { - return nil, fmt.Errorf("cannot determine generations path: %w", err) - } - - fis, err := ioutil.ReadDir(root) - if os.IsNotExist(err) { - return nil, nil - } else if err != nil { - return nil, err - } - - var generations []string - for _, fi := range fis { - if !litestream.IsGenerationName(fi.Name()) { - continue - } else if !fi.IsDir() { - continue - } - generations = append(generations, fi.Name()) - } - return generations, nil -} - -// DeleteGeneration deletes all snapshots & WAL segments within a generation. -func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string) error { - dir, err := c.GenerationDir(generation) - if err != nil { - return fmt.Errorf("cannot determine generation path: %w", err) - } - - if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) { - return err - } - return nil -} - -// Snapshots returns an iterator over all available snapshots for a generation. -func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { - dir, err := c.SnapshotsDir(generation) - if err != nil { - return nil, fmt.Errorf("cannot determine snapshots path: %w", err) - } - - f, err := os.Open(dir) - if os.IsNotExist(err) { - return litestream.NewSnapshotInfoSliceIterator(nil), nil - } else if err != nil { - return nil, err - } - defer f.Close() - - fis, err := f.Readdir(-1) - if err != nil { - return nil, err - } - - // Iterate over every file and convert to metadata. - infos := make([]litestream.SnapshotInfo, 0, len(fis)) - for _, fi := range fis { - // Parse index from filename. - index, err := litestream.ParseSnapshotPath(fi.Name()) - if err != nil { - continue - } - - infos = append(infos, litestream.SnapshotInfo{ - Generation: generation, - Index: index, - Size: fi.Size(), - CreatedAt: fi.ModTime().UTC(), - }) - } - - sort.Sort(litestream.SnapshotInfoSlice(infos)) - - return litestream.NewSnapshotInfoSliceIterator(infos), nil -} - -// WriteSnapshot writes LZ4 compressed data from rd into a file on disk. -func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, index int, rd io.Reader) (info litestream.SnapshotInfo, err error) { - filename, err := c.SnapshotPath(generation, index) - if err != nil { - return info, fmt.Errorf("cannot determine snapshot path: %w", err) - } - - var fileInfo, dirInfo os.FileInfo - if db := c.db(); db != nil { - fileInfo, dirInfo = db.FileInfo(), db.DirInfo() - } - - // Ensure parent directory exists. - if err := internal.MkdirAll(filepath.Dir(filename), dirInfo); err != nil { - return info, err - } - - // Write snapshot to temporary file next to destination path. - f, err := internal.CreateFile(filename+".tmp", fileInfo) - if err != nil { - return info, err - } - defer f.Close() - - if _, err := io.Copy(f, rd); err != nil { - return info, err - } else if err := f.Sync(); err != nil { - return info, err - } else if err := f.Close(); err != nil { - return info, err - } - - // Build metadata. - fi, err := os.Stat(filename + ".tmp") - if err != nil { - return info, err - } - info = litestream.SnapshotInfo{ - Generation: generation, - Index: index, - Size: fi.Size(), - CreatedAt: fi.ModTime().UTC(), - } - - // Move snapshot to final path when it has been fully written & synced to disk. - if err := os.Rename(filename+".tmp", filename); err != nil { - return info, err - } - - return info, nil -} - -// SnapshotReader returns a reader for snapshot data at the given generation/index. -// Returns os.ErrNotExist if no matching index is found. -func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error) { - filename, err := c.SnapshotPath(generation, index) - if err != nil { - return nil, fmt.Errorf("cannot determine snapshot path: %w", err) - } - return os.Open(filename) -} - -// DeleteSnapshot deletes a snapshot with the given generation & index. -func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, index int) error { - filename, err := c.SnapshotPath(generation, index) - if err != nil { - return fmt.Errorf("cannot determine snapshot path: %w", err) - } - if err := os.Remove(filename); err != nil && !os.IsNotExist(err) { - return err - } - return nil -} - -// WALSegments returns an iterator over all available WAL files for a generation. -func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) { - dir, err := c.WALDir(generation) - if err != nil { - return nil, fmt.Errorf("cannot determine wal path: %w", err) - } - - f, err := os.Open(dir) - if os.IsNotExist(err) { - return litestream.NewWALSegmentInfoSliceIterator(nil), nil - } else if err != nil { - return nil, err - } - defer f.Close() - - fis, err := f.Readdir(-1) - if err != nil { - return nil, err - } - - // Iterate over every file and convert to metadata. - infos := make([]litestream.WALSegmentInfo, 0, len(fis)) - for _, fi := range fis { - // Parse index from filename. - index, offset, err := litestream.ParseWALSegmentPath(fi.Name()) - if err != nil { - continue - } - - infos = append(infos, litestream.WALSegmentInfo{ - Generation: generation, - Index: index, - Offset: offset, - Size: fi.Size(), - CreatedAt: fi.ModTime().UTC(), - }) - } - - sort.Sort(litestream.WALSegmentInfoSlice(infos)) - - return litestream.NewWALSegmentInfoSliceIterator(infos), nil -} - -// WriteWALSegment writes LZ4 compressed data from rd into a file on disk. -func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, rd io.Reader) (info litestream.WALSegmentInfo, err error) { - filename, err := c.WALSegmentPath(pos.Generation, pos.Index, pos.Offset) - if err != nil { - return info, fmt.Errorf("cannot determine wal segment path: %w", err) - } - - var fileInfo, dirInfo os.FileInfo - if db := c.db(); db != nil { - fileInfo, dirInfo = db.FileInfo(), db.DirInfo() - } - - // Ensure parent directory exists. - if err := internal.MkdirAll(filepath.Dir(filename), dirInfo); err != nil { - return info, err - } - - // Write WAL segment to temporary file next to destination path. - f, err := internal.CreateFile(filename+".tmp", fileInfo) - if err != nil { - return info, err - } - defer f.Close() - - if _, err := io.Copy(f, rd); err != nil { - return info, err - } else if err := f.Sync(); err != nil { - return info, err - } else if err := f.Close(); err != nil { - return info, err - } - - // Build metadata. - fi, err := os.Stat(filename + ".tmp") - if err != nil { - return info, err - } - info = litestream.WALSegmentInfo{ - Generation: pos.Generation, - Index: pos.Index, - Offset: pos.Offset, - Size: fi.Size(), - CreatedAt: fi.ModTime().UTC(), - } - - // Move WAL segment to final path when it has been written & synced to disk. - if err := os.Rename(filename+".tmp", filename); err != nil { - return info, err - } - - return info, nil -} - -// WALSegmentReader returns a reader for a section of WAL data at the given position. -// Returns os.ErrNotExist if no matching index/offset is found. -func (c *ReplicaClient) WALSegmentReader(ctx context.Context, pos litestream.Pos) (io.ReadCloser, error) { - filename, err := c.WALSegmentPath(pos.Generation, pos.Index, pos.Offset) - if err != nil { - return nil, fmt.Errorf("cannot determine wal segment path: %w", err) - } - return os.Open(filename) -} - -// DeleteWALSegments deletes WAL segments at the given positions. -func (c *ReplicaClient) DeleteWALSegments(ctx context.Context, a []litestream.Pos) error { - for _, pos := range a { - filename, err := c.WALSegmentPath(pos.Generation, pos.Index, pos.Offset) - if err != nil { - return fmt.Errorf("cannot determine wal segment path: %w", err) - } - if err := os.Remove(filename); err != nil && !os.IsNotExist(err) { - return err - } - } - return nil -} diff --git a/file/replica_client_test.go b/file/replica_client_test.go deleted file mode 100644 index 94d2e447..00000000 --- a/file/replica_client_test.go +++ /dev/null @@ -1,223 +0,0 @@ -package file_test - -import ( - "testing" - - "github.com/benbjohnson/litestream/file" -) - -func TestReplicaClient_Path(t *testing.T) { - c := file.NewReplicaClient("/foo/bar") - if got, want := c.Path(), "/foo/bar"; got != want { - t.Fatalf("Path()=%v, want %v", got, want) - } -} - -func TestReplicaClient_Type(t *testing.T) { - if got, want := file.NewReplicaClient("").Type(), "file"; got != want { - t.Fatalf("Type()=%v, want %v", got, want) - } -} - -func TestReplicaClient_GenerationsDir(t *testing.T) { - t.Run("OK", func(t *testing.T) { - if got, err := file.NewReplicaClient("/foo").GenerationsDir(); err != nil { - t.Fatal(err) - } else if want := "/foo/generations"; got != want { - t.Fatalf("GenerationsDir()=%v, want %v", got, want) - } - }) - t.Run("ErrNoPath", func(t *testing.T) { - if _, err := file.NewReplicaClient("").GenerationsDir(); err == nil || err.Error() != `file replica path required` { - t.Fatalf("unexpected error: %v", err) - } - }) -} - -func TestReplicaClient_GenerationDir(t *testing.T) { - t.Run("OK", func(t *testing.T) { - if got, err := file.NewReplicaClient("/foo").GenerationDir("0123456701234567"); err != nil { - t.Fatal(err) - } else if want := "/foo/generations/0123456701234567"; got != want { - t.Fatalf("GenerationDir()=%v, want %v", got, want) - } - }) - t.Run("ErrNoPath", func(t *testing.T) { - if _, err := file.NewReplicaClient("").GenerationDir("0123456701234567"); err == nil || err.Error() != `file replica path required` { - t.Fatalf("expected error: %v", err) - } - }) - t.Run("ErrNoGeneration", func(t *testing.T) { - if _, err := file.NewReplicaClient("/foo").GenerationDir(""); err == nil || err.Error() != `generation required` { - t.Fatalf("expected error: %v", err) - } - }) -} - -func TestReplicaClient_SnapshotsDir(t *testing.T) { - t.Run("OK", func(t *testing.T) { - if got, err := file.NewReplicaClient("/foo").SnapshotsDir("0123456701234567"); err != nil { - t.Fatal(err) - } else if want := "/foo/generations/0123456701234567/snapshots"; got != want { - t.Fatalf("SnapshotsDir()=%v, want %v", got, want) - } - }) - t.Run("ErrNoPath", func(t *testing.T) { - if _, err := file.NewReplicaClient("").SnapshotsDir("0123456701234567"); err == nil || err.Error() != `file replica path required` { - t.Fatalf("unexpected error: %v", err) - } - }) - t.Run("ErrNoGeneration", func(t *testing.T) { - if _, err := file.NewReplicaClient("/foo").SnapshotsDir(""); err == nil || err.Error() != `generation required` { - t.Fatalf("unexpected error: %v", err) - } - }) -} - -func TestReplicaClient_SnapshotPath(t *testing.T) { - t.Run("OK", func(t *testing.T) { - if got, err := file.NewReplicaClient("/foo").SnapshotPath("0123456701234567", 1000); err != nil { - t.Fatal(err) - } else if want := "/foo/generations/0123456701234567/snapshots/000003e8.snapshot.lz4"; got != want { - t.Fatalf("SnapshotPath()=%v, want %v", got, want) - } - }) - t.Run("ErrNoPath", func(t *testing.T) { - if _, err := file.NewReplicaClient("").SnapshotPath("0123456701234567", 1000); err == nil || err.Error() != `file replica path required` { - t.Fatalf("unexpected error: %v", err) - } - }) - t.Run("ErrNoGeneration", func(t *testing.T) { - if _, err := file.NewReplicaClient("/foo").SnapshotPath("", 1000); err == nil || err.Error() != `generation required` { - t.Fatalf("unexpected error: %v", err) - } - }) -} - -func TestReplicaClient_WALDir(t *testing.T) { - t.Run("OK", func(t *testing.T) { - if got, err := file.NewReplicaClient("/foo").WALDir("0123456701234567"); err != nil { - t.Fatal(err) - } else if want := "/foo/generations/0123456701234567/wal"; got != want { - t.Fatalf("WALDir()=%v, want %v", got, want) - } - }) - t.Run("ErrNoPath", func(t *testing.T) { - if _, err := file.NewReplicaClient("").WALDir("0123456701234567"); err == nil || err.Error() != `file replica path required` { - t.Fatalf("unexpected error: %v", err) - } - }) - t.Run("ErrNoGeneration", func(t *testing.T) { - if _, err := file.NewReplicaClient("/foo").WALDir(""); err == nil || err.Error() != `generation required` { - t.Fatalf("unexpected error: %v", err) - } - }) -} - -func TestReplicaClient_WALSegmentPath(t *testing.T) { - t.Run("OK", func(t *testing.T) { - if got, err := file.NewReplicaClient("/foo").WALSegmentPath("0123456701234567", 1000, 1001); err != nil { - t.Fatal(err) - } else if want := "/foo/generations/0123456701234567/wal/000003e8_000003e9.wal.lz4"; got != want { - t.Fatalf("WALPath()=%v, want %v", got, want) - } - }) - t.Run("ErrNoPath", func(t *testing.T) { - if _, err := file.NewReplicaClient("").WALSegmentPath("0123456701234567", 1000, 0); err == nil || err.Error() != `file replica path required` { - t.Fatalf("unexpected error: %v", err) - } - }) - t.Run("ErrNoGeneration", func(t *testing.T) { - if _, err := file.NewReplicaClient("/foo").WALSegmentPath("", 1000, 0); err == nil || err.Error() != `generation required` { - t.Fatalf("unexpected error: %v", err) - } - }) -} - -/* -func TestReplica_Sync(t *testing.T) { - // Ensure replica can successfully sync after DB has sync'd. - t.Run("InitialSync", func(t *testing.T) { - db, sqldb := MustOpenDBs(t) - defer MustCloseDBs(t, db, sqldb) - - r := litestream.NewReplica(db, "", file.NewReplicaClient(t.TempDir())) - r.MonitorEnabled = false - db.Replicas = []*litestream.Replica{r} - - // Sync database & then sync replica. - if err := db.Sync(context.Background()); err != nil { - t.Fatal(err) - } else if err := r.Sync(context.Background()); err != nil { - t.Fatal(err) - } - - // Ensure posistions match. - if want, err := db.Pos(); err != nil { - t.Fatal(err) - } else if got, err := r.Pos(context.Background()); err != nil { - t.Fatal(err) - } else if got != want { - t.Fatalf("Pos()=%v, want %v", got, want) - } - }) - - // Ensure replica can successfully sync multiple times. - t.Run("MultiSync", func(t *testing.T) { - db, sqldb := MustOpenDBs(t) - defer MustCloseDBs(t, db, sqldb) - - r := litestream.NewReplica(db, "", file.NewReplicaClient(t.TempDir())) - r.MonitorEnabled = false - db.Replicas = []*litestream.Replica{r} - - if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil { - t.Fatal(err) - } - - // Write to the database multiple times and sync after each write. - for i, n := 0, db.MinCheckpointPageN*2; i < n; i++ { - if _, err := sqldb.Exec(`INSERT INTO foo (bar) VALUES ('baz')`); err != nil { - t.Fatal(err) - } - - // Sync periodically. - if i%100 == 0 || i == n-1 { - if err := db.Sync(context.Background()); err != nil { - t.Fatal(err) - } else if err := r.Sync(context.Background()); err != nil { - t.Fatal(err) - } - } - } - - // Ensure posistions match. - pos, err := db.Pos() - if err != nil { - t.Fatal(err) - } else if got, want := pos.Index, 2; got != want { - t.Fatalf("Index=%v, want %v", got, want) - } - - if want, err := r.Pos(context.Background()); err != nil { - t.Fatal(err) - } else if got := pos; got != want { - t.Fatalf("Pos()=%v, want %v", got, want) - } - }) - - // Ensure replica returns an error if there is no generation available from the DB. - t.Run("ErrNoGeneration", func(t *testing.T) { - db, sqldb := MustOpenDBs(t) - defer MustCloseDBs(t, db, sqldb) - - r := litestream.NewReplica(db, "", file.NewReplicaClient(t.TempDir())) - r.MonitorEnabled = false - db.Replicas = []*litestream.Replica{r} - - if err := r.Sync(context.Background()); err == nil || err.Error() != `no generation, waiting for data` { - t.Fatal(err) - } - }) -} -*/ diff --git a/file_replica_client.go b/file_replica_client.go new file mode 100644 index 00000000..fd97995e --- /dev/null +++ b/file_replica_client.go @@ -0,0 +1,576 @@ +package litestream + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + "sync" + + "github.com/benbjohnson/litestream/internal" +) + +// FileReplicaClientType is the client type for file replica clients. +const FileReplicaClientType = "file" + +var _ ReplicaClient = (*FileReplicaClient)(nil) + +// FileReplicaClient is a client for writing snapshots & WAL segments to disk. +type FileReplicaClient struct { + path string // destination path + + // File info + FileMode os.FileMode + DirMode os.FileMode + Uid, Gid int +} + +// NewFileReplicaClient returns a new instance of FileReplicaClient. +func NewFileReplicaClient(path string) *FileReplicaClient { + return &FileReplicaClient{ + path: path, + + FileMode: 0600, + DirMode: 0700, + } +} + +// Type returns "file" as the client type. +func (c *FileReplicaClient) Type() string { + return FileReplicaClientType +} + +// Path returns the destination path to replicate the database to. +func (c *FileReplicaClient) Path() string { + return c.path +} + +// GenerationsDir returns the path to a generation root directory. +func (c *FileReplicaClient) GenerationsDir() (string, error) { + if c.path == "" { + return "", fmt.Errorf("file replica path required") + } + return filepath.Join(c.path, "generations"), nil +} + +// GenerationDir returns the path to a generation's root directory. +func (c *FileReplicaClient) GenerationDir(generation string) (string, error) { + dir, err := c.GenerationsDir() + if err != nil { + return "", err + } else if generation == "" { + return "", fmt.Errorf("generation required") + } + return filepath.Join(dir, generation), nil +} + +// SnapshotsDir returns the path to a generation's snapshot directory. +func (c *FileReplicaClient) SnapshotsDir(generation string) (string, error) { + dir, err := c.GenerationDir(generation) + if err != nil { + return "", err + } + return filepath.Join(dir, "snapshots"), nil +} + +// SnapshotPath returns the path to an uncompressed snapshot file. +func (c *FileReplicaClient) SnapshotPath(generation string, index int) (string, error) { + dir, err := c.SnapshotsDir(generation) + if err != nil { + return "", err + } + return filepath.Join(dir, FormatIndex(index)+".snapshot.lz4"), nil +} + +// WALDir returns the path to a generation's WAL directory +func (c *FileReplicaClient) WALDir(generation string) (string, error) { + dir, err := c.GenerationDir(generation) + if err != nil { + return "", err + } + return filepath.Join(dir, "wal"), nil +} + +// WALSegmentPath returns the path to a WAL segment file. +func (c *FileReplicaClient) WALSegmentPath(generation string, index int, offset int64) (string, error) { + dir, err := c.WALDir(generation) + if err != nil { + return "", err + } + return filepath.Join(dir, FormatIndex(index), fmt.Sprintf("%s.wal.lz4", FormatOffset(offset))), nil +} + +// Generations returns a list of available generation names. +func (c *FileReplicaClient) Generations(ctx context.Context) ([]string, error) { + root, err := c.GenerationsDir() + if err != nil { + return nil, fmt.Errorf("cannot determine generations path: %w", err) + } + + fis, err := ioutil.ReadDir(root) + if os.IsNotExist(err) { + return nil, nil + } else if err != nil { + return nil, err + } + + var generations []string + for _, fi := range fis { + if !IsGenerationName(fi.Name()) { + continue + } else if !fi.IsDir() { + continue + } + generations = append(generations, fi.Name()) + } + return generations, nil +} + +// DeleteGeneration deletes all snapshots & WAL segments within a generation. +func (c *FileReplicaClient) DeleteGeneration(ctx context.Context, generation string) error { + dir, err := c.GenerationDir(generation) + if err != nil { + return fmt.Errorf("cannot determine generation path: %w", err) + } + + if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// Snapshots returns an iterator over all available snapshots for a generation. +func (c *FileReplicaClient) Snapshots(ctx context.Context, generation string) (SnapshotIterator, error) { + dir, err := c.SnapshotsDir(generation) + if err != nil { + return nil, err + } + + f, err := os.Open(dir) + if os.IsNotExist(err) { + return NewSnapshotInfoSliceIterator(nil), nil + } else if err != nil { + return nil, err + } + defer f.Close() + + fis, err := f.Readdir(-1) + if err != nil { + return nil, err + } + + // Iterate over every file and convert to metadata. + infos := make([]SnapshotInfo, 0, len(fis)) + for _, fi := range fis { + // Parse index from filename. + index, err := internal.ParseSnapshotPath(filepath.Base(fi.Name())) + if err != nil { + continue + } + + infos = append(infos, SnapshotInfo{ + Generation: generation, + Index: index, + Size: fi.Size(), + CreatedAt: fi.ModTime().UTC(), + }) + } + + sort.Sort(SnapshotInfoSlice(infos)) + + return NewSnapshotInfoSliceIterator(infos), nil +} + +// WriteSnapshot writes LZ4 compressed data from rd into a file on disk. +func (c *FileReplicaClient) WriteSnapshot(ctx context.Context, generation string, index int, rd io.Reader) (info SnapshotInfo, err error) { + filename, err := c.SnapshotPath(generation, index) + if err != nil { + return info, err + } + + // Ensure parent directory exists. + if err := internal.MkdirAll(filepath.Dir(filename), c.DirMode, c.Uid, c.Gid); err != nil { + return info, err + } + + // Write snapshot to temporary file next to destination path. + f, err := internal.CreateFile(filename+".tmp", c.FileMode, c.Uid, c.Gid) + if err != nil { + return info, err + } + defer f.Close() + + if _, err := io.Copy(f, rd); err != nil { + return info, err + } else if err := f.Sync(); err != nil { + return info, err + } else if err := f.Close(); err != nil { + return info, err + } + + // Build metadata. + fi, err := os.Stat(filename + ".tmp") + if err != nil { + return info, err + } + info = SnapshotInfo{ + Generation: generation, + Index: index, + Size: fi.Size(), + CreatedAt: fi.ModTime().UTC(), + } + + // Move snapshot to final path when it has been fully written & synced to disk. + if err := os.Rename(filename+".tmp", filename); err != nil { + return info, err + } + + return info, nil +} + +// SnapshotReader returns a reader for snapshot data at the given generation/index. +// Returns os.ErrNotExist if no matching index is found. +func (c *FileReplicaClient) SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error) { + filename, err := c.SnapshotPath(generation, index) + if err != nil { + return nil, err + } + return os.Open(filename) +} + +// DeleteSnapshot deletes a snapshot with the given generation & index. +func (c *FileReplicaClient) DeleteSnapshot(ctx context.Context, generation string, index int) error { + filename, err := c.SnapshotPath(generation, index) + if err != nil { + return fmt.Errorf("cannot determine snapshot path: %w", err) + } + if err := os.Remove(filename); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// WALSegments returns an iterator over all available WAL files for a generation. +func (c *FileReplicaClient) WALSegments(ctx context.Context, generation string) (WALSegmentIterator, error) { + dir, err := c.WALDir(generation) + if err != nil { + return nil, err + } + + f, err := os.Open(dir) + if os.IsNotExist(err) { + return NewWALSegmentInfoSliceIterator(nil), nil + } else if err != nil { + return nil, err + } + defer f.Close() + + fis, err := f.Readdir(-1) + if err != nil { + return nil, err + } + + // Iterate over every file and convert to metadata. + indexes := make([]int, 0, len(fis)) + for _, fi := range fis { + index, err := ParseIndex(fi.Name()) + if err != nil || !fi.IsDir() { + continue + } + indexes = append(indexes, index) + } + + sort.Ints(indexes) + + return NewFileWALSegmentIterator(dir, generation, indexes), nil +} + +// WriteWALSegment writes LZ4 compressed data from rd into a file on disk. +func (c *FileReplicaClient) WriteWALSegment(ctx context.Context, pos Pos, rd io.Reader) (info WALSegmentInfo, err error) { + filename, err := c.WALSegmentPath(pos.Generation, pos.Index, pos.Offset) + if err != nil { + return info, err + } + + // Ensure parent directory exists. + if err := internal.MkdirAll(filepath.Dir(filename), c.DirMode, c.Uid, c.Gid); err != nil { + return info, err + } + + // Write WAL segment to temporary file next to destination path. + f, err := internal.CreateFile(filename+".tmp", c.FileMode, c.Uid, c.Gid) + if err != nil { + return info, err + } + defer f.Close() + + if _, err := io.Copy(f, rd); err != nil { + return info, err + } else if err := f.Sync(); err != nil { + return info, err + } else if err := f.Close(); err != nil { + return info, err + } + + // Build metadata. + fi, err := os.Stat(filename + ".tmp") + if err != nil { + return info, err + } + info = WALSegmentInfo{ + Generation: pos.Generation, + Index: pos.Index, + Offset: pos.Offset, + Size: fi.Size(), + CreatedAt: fi.ModTime().UTC(), + } + + // Move WAL segment to final path when it has been written & synced to disk. + if err := os.Rename(filename+".tmp", filename); err != nil { + return info, err + } + + return info, nil +} + +// WALSegmentReader returns a reader for a section of WAL data at the given position. +// Returns os.ErrNotExist if no matching index/offset is found. +func (c *FileReplicaClient) WALSegmentReader(ctx context.Context, pos Pos) (io.ReadCloser, error) { + filename, err := c.WALSegmentPath(pos.Generation, pos.Index, pos.Offset) + if err != nil { + return nil, err + } + return os.Open(filename) +} + +// DeleteWALSegments deletes WAL segments at the given positions. +func (c *FileReplicaClient) DeleteWALSegments(ctx context.Context, a []Pos) error { + for _, pos := range a { + filename, err := c.WALSegmentPath(pos.Generation, pos.Index, pos.Offset) + if err != nil { + return err + } + if err := os.Remove(filename); err != nil && !os.IsNotExist(err) { + return err + } + } + return nil +} + +type FileWALSegmentIterator struct { + mu sync.Mutex + notifyCh chan struct{} + + dir string + generation string + indexes []int + + buffered bool + infos []WALSegmentInfo + err error +} + +func NewFileWALSegmentIterator(dir, generation string, indexes []int) *FileWALSegmentIterator { + return &FileWALSegmentIterator{ + dir: dir, + generation: generation, + indexes: indexes, + + notifyCh: make(chan struct{}, 1), + } +} + +func (itr *FileWALSegmentIterator) Close() (err error) { + if e := itr.Err(); e != nil && err == nil { + err = e + } + return err +} + +func (itr *FileWALSegmentIterator) NotifyCh() <-chan struct{} { + return itr.notifyCh +} + +// Generation returns the generation this iterator was initialized with. +func (itr *FileWALSegmentIterator) Generation() string { + return itr.generation +} + +// Indexes returns the pending indexes. Only used for testing. +func (itr *FileWALSegmentIterator) Indexes() []int { + itr.mu.Lock() + defer itr.mu.Unlock() + return itr.indexes +} + +func (itr *FileWALSegmentIterator) Next() bool { + itr.mu.Lock() + defer itr.mu.Unlock() + + // Exit if an error has already occurred. + if itr.err != nil { + return false + } + + // Read first info, if buffered. + if itr.buffered { + itr.buffered = false + return true + } + + for { + // Move to the next segment in cache, if available. + if len(itr.infos) > 1 { + itr.infos = itr.infos[1:] + return true + } + itr.infos = itr.infos[:0] // otherwise clear infos + + // If no indexes remain, stop iteration. + if len(itr.indexes) == 0 { + return false + } + + // Read segments into a cache for the current index. + index := itr.indexes[0] + itr.indexes = itr.indexes[1:] + f, err := os.Open(filepath.Join(itr.dir, FormatIndex(index))) + if err != nil { + itr.err = err + return false + } + defer f.Close() + + fis, err := f.Readdir(-1) + if err != nil { + itr.err = err + return false + } else if err := f.Close(); err != nil { + itr.err = err + return false + } + + for _, fi := range fis { + filename := filepath.Base(fi.Name()) + if fi.IsDir() { + continue + } + + offset, err := ParseOffset(strings.TrimSuffix(filename, ".wal.lz4")) + if err != nil { + continue + } + + itr.infos = append(itr.infos, WALSegmentInfo{ + Generation: itr.generation, + Index: index, + Offset: offset, + Size: fi.Size(), + CreatedAt: fi.ModTime().UTC(), + }) + } + + // Ensure segments are sorted within index. + sort.Sort(WALSegmentInfoSlice(itr.infos)) + + if len(itr.infos) > 0 { + return true + } + } +} + +// SetErr sets the error on the iterator and notifies it of the change. +func (itr *FileWALSegmentIterator) SetErr(err error) { + itr.mu.Lock() + defer itr.mu.Unlock() + if itr.err == nil { + itr.err = err + } + + select { + case itr.notifyCh <- struct{}{}: + default: + } +} + +// Err returns the first error that occurs on the iterator. +func (itr *FileWALSegmentIterator) Err() error { + itr.mu.Lock() + defer itr.mu.Unlock() + return itr.err +} + +func (itr *FileWALSegmentIterator) WALSegment() WALSegmentInfo { + itr.mu.Lock() + defer itr.mu.Unlock() + + if len(itr.infos) == 0 { + return WALSegmentInfo{} + } + return itr.infos[0] +} + +// Append add an additional WAL segment to the end of the iterator. This +// function expects that info will always be later than all previous infos +// that the iterator has or has seen. +func (itr *FileWALSegmentIterator) Append(info WALSegmentInfo) error { + itr.mu.Lock() + defer itr.mu.Unlock() + + if itr.err != nil { + return itr.err + } else if itr.generation != info.Generation { + return fmt.Errorf("generation mismatch") + } + + // If the info has an index that is still waiting to be read from disk into + // the cache then simply append it to the end of the indices. + // + // If we have no pending indices, then append to the end of the infos. If + // we don't have either then just append to the infos and avoid validation. + if len(itr.indexes) > 0 { + maxIndex := itr.indexes[len(itr.indexes)-1] + + if info.Index < maxIndex { + return fmt.Errorf("appended index %q below max index %q", FormatIndex(info.Index), FormatIndex(maxIndex)) + } else if info.Index > maxIndex+1 { + return fmt.Errorf("appended index %q skips index %q", FormatIndex(info.Index), FormatIndex(maxIndex+1)) + } else if info.Index == maxIndex+1 { + itr.indexes = append(itr.indexes, info.Index) + } + // NOTE: no-op if segment index matches the current last index + + } else if len(itr.infos) > 0 { + lastInfo := itr.infos[len(itr.infos)-1] + if info.Index < lastInfo.Index { + return fmt.Errorf("appended index %q below current index %q", FormatIndex(info.Index), FormatIndex(lastInfo.Index)) + } else if info.Index > lastInfo.Index+1 { + return fmt.Errorf("appended index %q skips next index %q", FormatIndex(info.Index), FormatIndex(lastInfo.Index+1)) + } else if info.Index == lastInfo.Index+1 { + itr.indexes = append(itr.indexes, info.Index) + } else { + // If the index matches the current infos, verify its offset and append. + if info.Offset < lastInfo.Offset { + return fmt.Errorf("appended offset %s/%s before last offset %s/%s", FormatIndex(info.Index), FormatOffset(info.Offset), FormatIndex(lastInfo.Index), FormatOffset(lastInfo.Offset)) + } else if info.Offset == lastInfo.Offset { + return fmt.Errorf("duplicate offset %s/%s appended", FormatIndex(info.Index), FormatOffset(info.Offset)) + } + itr.infos = append(itr.infos, info) + } + } else { + itr.buffered = true + itr.infos = append(itr.infos, info) + } + + // Signal that a new segment is available. + select { + case itr.notifyCh <- struct{}{}: + default: + } + + return nil +} diff --git a/file_replica_client_test.go b/file_replica_client_test.go new file mode 100644 index 00000000..a821ff24 --- /dev/null +++ b/file_replica_client_test.go @@ -0,0 +1,266 @@ +package litestream_test + +import ( + "reflect" + "testing" + + "github.com/benbjohnson/litestream" +) + +func TestReplicaClient_Path(t *testing.T) { + c := litestream.NewFileReplicaClient("/foo/bar") + if got, want := c.Path(), "/foo/bar"; got != want { + t.Fatalf("Path()=%v, want %v", got, want) + } +} + +func TestReplicaClient_Type(t *testing.T) { + if got, want := litestream.NewFileReplicaClient("").Type(), "file"; got != want { + t.Fatalf("Type()=%v, want %v", got, want) + } +} + +func TestReplicaClient_GenerationsDir(t *testing.T) { + t.Run("OK", func(t *testing.T) { + if got, err := litestream.NewFileReplicaClient("/foo").GenerationsDir(); err != nil { + t.Fatal(err) + } else if want := "/foo/generations"; got != want { + t.Fatalf("GenerationsDir()=%v, want %v", got, want) + } + }) + t.Run("ErrNoPath", func(t *testing.T) { + if _, err := litestream.NewFileReplicaClient("").GenerationsDir(); err == nil || err.Error() != `file replica path required` { + t.Fatalf("unexpected error: %v", err) + } + }) +} + +func TestReplicaClient_GenerationDir(t *testing.T) { + t.Run("OK", func(t *testing.T) { + if got, err := litestream.NewFileReplicaClient("/foo").GenerationDir("0123456701234567"); err != nil { + t.Fatal(err) + } else if want := "/foo/generations/0123456701234567"; got != want { + t.Fatalf("GenerationDir()=%v, want %v", got, want) + } + }) + t.Run("ErrNoPath", func(t *testing.T) { + if _, err := litestream.NewFileReplicaClient("").GenerationDir("0123456701234567"); err == nil || err.Error() != `file replica path required` { + t.Fatalf("expected error: %v", err) + } + }) + t.Run("ErrNoGeneration", func(t *testing.T) { + if _, err := litestream.NewFileReplicaClient("/foo").GenerationDir(""); err == nil || err.Error() != `generation required` { + t.Fatalf("expected error: %v", err) + } + }) +} + +func TestReplicaClient_SnapshotsDir(t *testing.T) { + t.Run("OK", func(t *testing.T) { + if got, err := litestream.NewFileReplicaClient("/foo").SnapshotsDir("0123456701234567"); err != nil { + t.Fatal(err) + } else if want := "/foo/generations/0123456701234567/snapshots"; got != want { + t.Fatalf("SnapshotsDir()=%v, want %v", got, want) + } + }) + t.Run("ErrNoPath", func(t *testing.T) { + if _, err := litestream.NewFileReplicaClient("").SnapshotsDir("0123456701234567"); err == nil || err.Error() != `file replica path required` { + t.Fatalf("unexpected error: %v", err) + } + }) + t.Run("ErrNoGeneration", func(t *testing.T) { + if _, err := litestream.NewFileReplicaClient("/foo").SnapshotsDir(""); err == nil || err.Error() != `generation required` { + t.Fatalf("unexpected error: %v", err) + } + }) +} + +func TestReplicaClient_SnapshotPath(t *testing.T) { + t.Run("OK", func(t *testing.T) { + if got, err := litestream.NewFileReplicaClient("/foo").SnapshotPath("0123456701234567", 1000); err != nil { + t.Fatal(err) + } else if want := "/foo/generations/0123456701234567/snapshots/00000000000003e8.snapshot.lz4"; got != want { + t.Fatalf("SnapshotPath()=%v, want %v", got, want) + } + }) + t.Run("ErrNoPath", func(t *testing.T) { + if _, err := litestream.NewFileReplicaClient("").SnapshotPath("0123456701234567", 1000); err == nil || err.Error() != `file replica path required` { + t.Fatalf("unexpected error: %v", err) + } + }) + t.Run("ErrNoGeneration", func(t *testing.T) { + if _, err := litestream.NewFileReplicaClient("/foo").SnapshotPath("", 1000); err == nil || err.Error() != `generation required` { + t.Fatalf("unexpected error: %v", err) + } + }) +} + +func TestReplicaClient_WALDir(t *testing.T) { + t.Run("OK", func(t *testing.T) { + if got, err := litestream.NewFileReplicaClient("/foo").WALDir("0123456701234567"); err != nil { + t.Fatal(err) + } else if want := "/foo/generations/0123456701234567/wal"; got != want { + t.Fatalf("WALDir()=%v, want %v", got, want) + } + }) + t.Run("ErrNoPath", func(t *testing.T) { + if _, err := litestream.NewFileReplicaClient("").WALDir("0123456701234567"); err == nil || err.Error() != `file replica path required` { + t.Fatalf("unexpected error: %v", err) + } + }) + t.Run("ErrNoGeneration", func(t *testing.T) { + if _, err := litestream.NewFileReplicaClient("/foo").WALDir(""); err == nil || err.Error() != `generation required` { + t.Fatalf("unexpected error: %v", err) + } + }) +} + +func TestReplicaClient_WALSegmentPath(t *testing.T) { + t.Run("OK", func(t *testing.T) { + if got, err := litestream.NewFileReplicaClient("/foo").WALSegmentPath("0123456701234567", 1000, 1001); err != nil { + t.Fatal(err) + } else if want := "/foo/generations/0123456701234567/wal/00000000000003e8/00000000000003e9.wal.lz4"; got != want { + t.Fatalf("WALPath()=%v, want %v", got, want) + } + }) + t.Run("ErrNoPath", func(t *testing.T) { + if _, err := litestream.NewFileReplicaClient("").WALSegmentPath("0123456701234567", 1000, 0); err == nil || err.Error() != `file replica path required` { + t.Fatalf("unexpected error: %v", err) + } + }) + t.Run("ErrNoGeneration", func(t *testing.T) { + if _, err := litestream.NewFileReplicaClient("/foo").WALSegmentPath("", 1000, 0); err == nil || err.Error() != `generation required` { + t.Fatalf("unexpected error: %v", err) + } + }) +} + +func TestFileWALSegmentIterator_Append(t *testing.T) { + t.Run("Empty", func(t *testing.T) { + itr := litestream.NewFileWALSegmentIterator(t.TempDir(), "0123456789abcdef", nil) + if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 0, Offset: 0}); err != nil { + t.Fatal(err) + } + + select { + case <-itr.NotifyCh(): + default: + t.Fatal("expected notification") + } + + if !itr.Next() { + t.Fatal("expected next") + } else if got, want := itr.WALSegment(), (litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 0, Offset: 0}); got != want { + t.Fatalf("info=%#v, want %#v", got, want) + } + }) + + t.Run("MultiOffset", func(t *testing.T) { + itr := litestream.NewFileWALSegmentIterator(t.TempDir(), "0123456789abcdef", nil) + if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 0, Offset: 0}); err != nil { + t.Fatal(err) + } else if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 0, Offset: 1}); err != nil { + t.Fatal(err) + } + + select { + case <-itr.NotifyCh(): + default: + t.Fatal("expected notification") + } + + if !itr.Next() { + t.Fatal("expected next") + } else if got, want := itr.WALSegment(), (litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 0, Offset: 0}); got != want { + t.Fatalf("info=%#v, want %#v", got, want) + } + + if !itr.Next() { + t.Fatal("expected next") + } else if got, want := itr.WALSegment(), (litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 0, Offset: 1}); got != want { + t.Fatalf("info=%#v, want %#v", got, want) + } + }) + + t.Run("MultiIndex", func(t *testing.T) { + itr := litestream.NewFileWALSegmentIterator(t.TempDir(), "0123456789abcdef", nil) + if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 0, Offset: 0}); err != nil { + t.Fatal(err) + } else if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 1, Offset: 0}); err != nil { + t.Fatal(err) + } else if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 1, Offset: 1}); err != nil { + t.Fatal(err) + } else if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 2, Offset: 0}); err != nil { + t.Fatal(err) + } + + if got, want := itr.Indexes(), []int{1, 2}; !reflect.DeepEqual(got, want) { + t.Fatalf("indexes=%v, want %v", got, want) + } + }) + + t.Run("ErrGenerationMismatch", func(t *testing.T) { + itr := litestream.NewFileWALSegmentIterator(t.TempDir(), "0000000000000000", nil) + if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 0, Offset: 0}); err == nil || err.Error() != `generation mismatch` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrBelowMaxIndex", func(t *testing.T) { + itr := litestream.NewFileWALSegmentIterator(t.TempDir(), "0123456789abcdef", nil) + if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 0, Offset: 0}); err != nil { + t.Fatal(err) + } else if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 1, Offset: 0}); err != nil { + t.Fatal(err) + } else if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 0, Offset: 0}); err == nil || err.Error() != `appended index "0000000000000000" below max index "0000000000000001"` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrAboveMaxIndex", func(t *testing.T) { + itr := litestream.NewFileWALSegmentIterator(t.TempDir(), "0123456789abcdef", nil) + if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 0, Offset: 0}); err != nil { + t.Fatal(err) + } else if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 1, Offset: 0}); err != nil { + t.Fatal(err) + } else if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 3, Offset: 0}); err == nil || err.Error() != `appended index "0000000000000003" skips index "0000000000000002"` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrBelowCurrentIndex", func(t *testing.T) { + itr := litestream.NewFileWALSegmentIterator(t.TempDir(), "0123456789abcdef", nil) + if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 1, Offset: 0}); err != nil { + t.Fatal(err) + } else if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 0, Offset: 0}); err == nil || err.Error() != `appended index "0000000000000000" below current index "0000000000000001"` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrSkipsNextIndex", func(t *testing.T) { + itr := litestream.NewFileWALSegmentIterator(t.TempDir(), "0123456789abcdef", nil) + if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 0, Offset: 0}); err != nil { + t.Fatal(err) + } else if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 2, Offset: 0}); err == nil || err.Error() != `appended index "0000000000000002" skips next index "0000000000000001"` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrBelowOffset", func(t *testing.T) { + itr := litestream.NewFileWALSegmentIterator(t.TempDir(), "0123456789abcdef", nil) + if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 0, Offset: 5}); err != nil { + t.Fatal(err) + } else if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 0, Offset: 4}); err == nil || err.Error() != `appended offset 0000000000000000/0000000000000004 before last offset 0000000000000000/0000000000000005` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrDuplicateOffset", func(t *testing.T) { + itr := litestream.NewFileWALSegmentIterator(t.TempDir(), "0123456789abcdef", nil) + if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 0, Offset: 5}); err != nil { + t.Fatal(err) + } else if err := itr.Append(litestream.WALSegmentInfo{Generation: "0123456789abcdef", Index: 0, Offset: 5}); err == nil || err.Error() != `duplicate offset 0000000000000000/0000000000000005 appended` { + t.Fatalf("unexpected error: %s", err) + } + }) +} diff --git a/go.mod b/go.mod index 1c5a0e81..f2c4596e 100644 --- a/go.mod +++ b/go.mod @@ -1,21 +1,52 @@ module github.com/benbjohnson/litestream -go 1.16 +go 1.18 require ( - cloud.google.com/go/storage v1.15.0 - github.com/Azure/azure-storage-blob-go v0.13.0 // indirect - github.com/Azure/go-autorest/autorest v0.9.0 // indirect - github.com/aws/aws-sdk-go v1.27.0 - github.com/davecgh/go-spew v1.1.1 - github.com/mattn/go-shellwords v1.0.11 // indirect - github.com/mattn/go-sqlite3 v1.14.5 - github.com/pierrec/lz4/v4 v4.1.3 - github.com/pkg/sftp v1.13.0 // indirect - github.com/prometheus/client_golang v1.9.0 - golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a // indirect - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20210412220455-f1c623a9e750 - google.golang.org/api v0.45.0 + cloud.google.com/go/storage v1.24.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.4 + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.6.1 + github.com/aws/aws-sdk-go v1.44.71 + github.com/fsnotify/fsnotify v1.5.4 + github.com/mattn/go-shellwords v1.0.12 + github.com/mattn/go-sqlite3 v1.14.14 + github.com/pierrec/lz4/v4 v4.1.15 + github.com/pkg/sftp v1.13.5 + github.com/prometheus/client_golang v1.13.0 + golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa + golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 + google.golang.org/api v0.91.0 gopkg.in/yaml.v2 v2.4.0 ) + +require ( + cloud.google.com/go v0.103.0 // indirect + cloud.google.com/go/compute v1.7.0 // indirect + cloud.google.com/go/iam v0.3.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.1 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/google/go-cmp v0.5.8 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect + github.com/googleapis/gax-go/v2 v2.5.1 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/kr/fs v0.1.0 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/common v0.37.0 // indirect + github.com/prometheus/procfs v0.8.0 // indirect + go.opencensus.io v0.23.0 // indirect + golang.org/x/net v0.0.0-20220805013720-a33c5aa5df48 // indirect + golang.org/x/oauth2 v0.0.0-20220808172628-8227340efae7 // indirect + golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664 // indirect + golang.org/x/text v0.3.7 // indirect + golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20220808204814-fd01256a5276 // indirect + google.golang.org/grpc v1.48.0 // indirect + google.golang.org/protobuf v1.28.1 // indirect + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect +) diff --git a/go.sum b/go.sum index bda8ce98..75ee4ec3 100644 --- a/go.sum +++ b/go.sum @@ -17,16 +17,37 @@ cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKP cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.103.0 h1:YXtxp9ymmZjlGzxV7VrYQ8aaQuAgcqxSy6YhDX4I458= +cloud.google.com/go v0.103.0/go.mod h1:vwLx1nqLrzLX/fpwSMOXmFIqBOyHsvHbnAdbGSJ+mKk= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0 h1:v/k9Eueb8aAJ0vZuxKMrgm6kPhCLZU9HxFU+AFDs9Uk= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/iam v0.3.0 h1:exkAomrVUuzx9kWFI1wm3KI0uoDeUFPB4kKGzx6x+Gc= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -36,115 +57,89 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.15.0 h1:Ljj+ZXVEhCr/1+4ZhvtteN1ND7UUsNTlduGclLh8GO0= -cloud.google.com/go/storage v1.15.0/go.mod h1:mjjQMoxxyGH7Jr8K5qrx6N2O0AHsczI61sMNn03GIZI= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.24.0 h1:a4N0gIkx83uoVFGz8B2eAV3OhN90QoWF5OZWLKl39ig= +cloud.google.com/go/storage v1.24.0/go.mod h1:3xrJEFMXBsQLgxwThyjuD3aYlroL0TMRec1ypGUQ0KE= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= -github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= -github.com/Azure/azure-storage-blob-go v0.13.0 h1:lgWHvFh+UYBNVQLFHXkvul2f6yOPA9PIH82RTG2cSwc= -github.com/Azure/azure-storage-blob-go v0.13.0/go.mod h1:pA9kNqtjUeQF2zOSu4s//nUdBD+e64lEuc4sVnuOfNs= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.4 h1:pqrAR74b6EoR4kcxF7L7Wg2B8Jgil9UUZtMvxhEFqWo= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.4/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 h1:QkAcEIAKbNL4KoFr4SathZPhDhF4mVwpBMFlYjyAqy8= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.1 h1:XUNQ4mw+zJmaA2KXzP9JlQiecy1SI+Eog7xVkPiqIbg= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.1/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.6.1 h1:YvQv9Mz6T8oR5ypQOL6erY0Z5t71ak1uHV4QFokCOZk= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.6.1/go.mod h1:c6WvOhtmjNUWbLfOG1qxM/q0SPvQNSVJvolm+C52dIU= +github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1 h1:BWe8a+f/t+7KY7zH2mqygeUD0t8hNFXe08p1Pb3/jKE= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= -github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.27.0 h1:0xphMHGMLBrPMfxR2AmVjZKcMEESEgWF8Kru94BNByk= -github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/aws/aws-sdk-go v1.44.71 h1:e5ZbeFAdDB9i7NcQWdmIiA/NOC4aWec3syOUtUE0dBA= +github.com/aws/aws-sdk-go v1.44.71/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= -github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -153,6 +148,7 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -166,13 +162,12 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -186,10 +181,17 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -201,63 +203,44 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0 h1:zO8WHNx/MYiAKJ3d5spxZXZE6KHmIQGQcAzwUzV7qQw= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1 h1:kBRZU0PSuI7PspsSb/ChWoVResUcwNVIdpB049pKTiw= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -265,126 +248,66 @@ github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= -github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI= -github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-shellwords v1.0.11 h1:vCoR9VPpsk/TZFW2JwK5I9S0xdrtUq2bph6/YjEPnaw= -github.com/mattn/go-shellwords v1.0.11/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= -github.com/mattn/go-sqlite3 v1.14.5 h1:1IdxlwTNazvbKJQSxoJ5/9ECbEeaTTyeU7sEAZ5KKTQ= -github.com/mattn/go-sqlite3 v1.14.5/go.mod h1:WVKg1VTActs4Qso6iwGbiFih2UIHo0ENGwNd0Lj+XmI= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= +github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/mattn/go-sqlite3 v1.14.14 h1:qZgc/Rwetq+MtyE18WhzjokPD93dNqLGNT3QJuLvBGw= +github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= -github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= -github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= -github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4 v2.6.0+incompatible h1:Ix9yFKn1nSPBLFl/yZknTp8TU5G4Ps0JDmguYK6iH1A= -github.com/pierrec/lz4/v4 v4.1.3 h1:/dvQpkb0o1pVlSgKNQqfkavlnXaIK+hJ0LXsKRUN9D4= -github.com/pierrec/lz4/v4 v4.1.3/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 h1:Qj1ukM4GlMWXNdMBuXcXfz/Kw9s1qm0CLY32QxuSImI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= -github.com/pkg/sftp v1.13.0 h1:Riw6pgOKK41foc1I1Uu03CjvbLZDXeGpInycM4shXoI= -github.com/pkg/sftp v1.13.0/go.mod h1:41g+FIPlQUTDCveupEmEA65IoiQFrtgCeDopC4ajGIM= +github.com/pkg/sftp v1.13.5 h1:a3RLUqkyjYRtBTZJZ1VRrKbN3zhuPLlUc3sphVz81go= +github.com/pkg/sftp v1.13.5/go.mod h1:wHDZ0IZX6JcBYRK1TH9bcVq8G7TLpVHYIGJRFnmPfxg= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.9.0 h1:Rrch9mh17XcxvEu9D9DEpb4isxjGBtcevQjKvxPRQIU= -github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= +github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.15.0 h1:4fgOnadei3EZvgRwxJ7RMpG1k1pOZth5Pc13tyspaKM= -github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4= -github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= +github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -392,18 +315,13 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -412,25 +330,16 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a h1:kr2P4QFmQr29mSLA43kwrOcgcReGTfbE9N577tCTuBc= -golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -453,8 +362,8 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 h1:2M3HP5CCK1Si9FQhwnzYhXdG6DXeebvUHFpre8QvbyI= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -464,17 +373,12 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1 h1:Kvvh58BN8Y9/lBi7hTekvtMpm07eUZ0ck5pRHpsMWrY= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -485,8 +389,6 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -507,8 +409,21 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4 h1:b0LrWgu8+q7z4J+0Y3Umo5q1dL7NXBkKBWkaVkAq17E= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220805013720-a33c5aa5df48 h1:N9Vc/rorQUDes6B9CNdIxAn5jODGj2wzfrei2x4wNj4= +golang.org/x/net v0.0.0-20220805013720-a33c5aa5df48/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -520,8 +435,18 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78 h1:rPRtHfUb0UKZeZ6GH4K4Nt4YRbE9V1u+QZX5upZXqJQ= -golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220808172628-8227340efae7 h1:dtndE8FcEta75/4kHF3AbpuWzV6f1LjnLrM4pe2SZrw= +golang.org/x/oauth2 v0.0.0-20220808172628-8227340efae7/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -532,16 +457,13 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -551,12 +473,8 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -574,44 +492,68 @@ golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e h1:AyodaIpKjppX+cBfTASF2E1US3H2JFBj920Ot3rtDjs= -golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210412220455-f1c623a9e750 h1:ZBu6861dZq7xBnG1bn5SRU0vA8nx42at4+kP07FMTog= -golang.org/x/sys v0.0.0-20210412220455-f1c623a9e750/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664 h1:v1W7bwXHsnLLloWYTVEdvGvA7BHMeBYsPcF0GLDxIRs= +golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -621,8 +563,6 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -630,7 +570,6 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -654,14 +593,20 @@ golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f h1:uF6paiQQebLeSXkrTqHqz0MXhXXS1KgF41eUdBNvxK0= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -683,10 +628,29 @@ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34q google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.45.0 h1:pqMffJFLBVUDIoYsHcqtxgQVTsmxMDpYLOc5MT4Jrww= -google.golang.org/api v0.45.0/go.mod h1:ISLIJCedJolbZvDfAk+Ctuq5hf+aJ33WgtUsfyFoLXA= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.86.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.91.0 h1:731+JzuwaJoZXRQGmPoBiV+SrsAfUaIkdMCWTcQNPyA= +google.golang.org/api v0.91.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -699,7 +663,6 @@ google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= @@ -718,6 +681,7 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= @@ -733,19 +697,54 @@ google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210413151531-c14fb6ef47c3/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210420162539-3c870d7478d2 h1:g2sJMUGCpeHZqTx8p3wsAWRS64nFq20i4dvJWcKGqvY= -google.golang.org/genproto v0.0.0-20210420162539-3c870d7478d2/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220808204814-fd01256a5276 h1:7PEE9xCtufpGJzrqweakEEnTh7YFELmnKm/ee+5jmfQ= +google.golang.org/genproto v0.0.0-20220808204814-fd01256a5276/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= @@ -755,50 +754,61 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0 h1:uSZWeQJX5j11bIQ4AJoj+McDBo29cY1MCoC1wO3ts+c= google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -809,5 +819,3 @@ honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/gcs/replica_client.go b/gs/replica_client.go similarity index 78% rename from gcs/replica_client.go rename to gs/replica_client.go index 7b2b2c67..efd7bc54 100644 --- a/gcs/replica_client.go +++ b/gs/replica_client.go @@ -1,4 +1,4 @@ -package gcs +package gs import ( "context" @@ -17,17 +17,17 @@ import ( ) // ReplicaClientType is the client type for this package. -const ReplicaClientType = "gcs" +const ReplicaClientType = "gs" var _ litestream.ReplicaClient = (*ReplicaClient)(nil) // ReplicaClient is a client for writing snapshots & WAL segments to disk. type ReplicaClient struct { mu sync.Mutex - client *storage.Client // gcs client - bkt *storage.BucketHandle // gcs bucket handle + client *storage.Client // gs client + bkt *storage.BucketHandle // gs bucket handle - // GCS bucket information + // GS bucket information Bucket string Path string } @@ -37,12 +37,12 @@ func NewReplicaClient() *ReplicaClient { return &ReplicaClient{} } -// Type returns "gcs" as the client type. +// Type returns "gs" as the client type. func (c *ReplicaClient) Type() string { return ReplicaClientType } -// Init initializes the connection to GCS. No-op if already initialized. +// Init initializes the connection to GS. No-op if already initialized. func (c *ReplicaClient) Init(ctx context.Context) (err error) { c.mu.Lock() defer c.mu.Unlock() @@ -68,7 +68,7 @@ func (c *ReplicaClient) Generations(ctx context.Context) ([]string, error) { // Construct query to only pull generation directory names. query := &storage.Query{ Delimiter: "/", - Prefix: litestream.GenerationsPath(c.Path) + "/", + Prefix: path.Join(c.Path, "generations") + "/", } // Loop over results and only build list of generation-formatted names. @@ -96,16 +96,15 @@ func (c *ReplicaClient) Generations(ctx context.Context) ([]string, error) { func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string) error { if err := c.Init(ctx); err != nil { return err + } else if generation == "" { + return fmt.Errorf("generation required") } - dir, err := litestream.GenerationPath(c.Path, generation) - if err != nil { - return fmt.Errorf("cannot determine generation path: %w", err) - } + prefix := path.Join(c.Path, "generations", generation) + "/" // Iterate over every object in generation and delete it. internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc() - for it := c.bkt.Objects(ctx, &storage.Query{Prefix: dir + "/"}); ; { + for it := c.bkt.Objects(ctx, &storage.Query{Prefix: prefix}); ; { attrs, err := it.Next() if err == iterator.Done { break @@ -130,24 +129,22 @@ func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string) func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { if err := c.Init(ctx); err != nil { return nil, err + } else if generation == "" { + return nil, fmt.Errorf("generation required") } - dir, err := litestream.SnapshotsPath(c.Path, generation) - if err != nil { - return nil, fmt.Errorf("cannot determine snapshots path: %w", err) - } - return newSnapshotIterator(generation, c.bkt.Objects(ctx, &storage.Query{Prefix: dir + "/"})), nil + prefix := path.Join(c.Path, "generations", generation) + "/" + return newSnapshotIterator(generation, c.bkt.Objects(ctx, &storage.Query{Prefix: prefix})), nil } // WriteSnapshot writes LZ4 compressed data from rd to the object storage. func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, index int, rd io.Reader) (info litestream.SnapshotInfo, err error) { if err := c.Init(ctx); err != nil { return info, err + } else if generation == "" { + return info, fmt.Errorf("generation required") } - key, err := litestream.SnapshotPath(c.Path, generation, index) - if err != nil { - return info, fmt.Errorf("cannot determine snapshot path: %w", err) - } + key := path.Join(c.Path, "generations", generation, "snapshots", litestream.FormatIndex(index)+".snapshot.lz4") startTime := time.Now() w := c.bkt.Object(key).NewWriter(ctx) @@ -163,8 +160,6 @@ func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, in internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "PUT").Inc() internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "PUT").Add(float64(n)) - // log.Printf("%s(%s): snapshot: creating %s/%08x t=%s", r.db.Path(), r.Name(), generation, index, time.Since(startTime).Truncate(time.Millisecond)) - return litestream.SnapshotInfo{ Generation: generation, Index: index, @@ -177,12 +172,11 @@ func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, in func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error) { if err := c.Init(ctx); err != nil { return nil, err + } else if generation == "" { + return nil, fmt.Errorf("generation required") } - key, err := litestream.SnapshotPath(c.Path, generation, index) - if err != nil { - return nil, fmt.Errorf("cannot determine snapshot path: %w", err) - } + key := path.Join(c.Path, "generations", generation, "snapshots", litestream.FormatIndex(index)+".snapshot.lz4") r, err := c.bkt.Object(key).NewReader(ctx) if isNotExists(err) { @@ -201,12 +195,11 @@ func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, i func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, index int) error { if err := c.Init(ctx); err != nil { return err + } else if generation == "" { + return fmt.Errorf("generation required") } - key, err := litestream.SnapshotPath(c.Path, generation, index) - if err != nil { - return fmt.Errorf("cannot determine snapshot path: %w", err) - } + key := path.Join(c.Path, "generations", generation, "snapshots", litestream.FormatIndex(index), ".snapshot.lz4") if err := c.bkt.Object(key).Delete(ctx); err != nil && !isNotExists(err) { return fmt.Errorf("cannot delete snapshot %q: %w", key, err) @@ -220,24 +213,22 @@ func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, i func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) { if err := c.Init(ctx); err != nil { return nil, err + } else if generation == "" { + return nil, fmt.Errorf("generation required") } - dir, err := litestream.WALPath(c.Path, generation) - if err != nil { - return nil, fmt.Errorf("cannot determine wal path: %w", err) - } - return newWALSegmentIterator(generation, c.bkt.Objects(ctx, &storage.Query{Prefix: dir + "/"})), nil + prefix := path.Join(c.Path, "generations", generation, "wal") + "/" + return newWALSegmentIterator(generation, prefix, c.bkt.Objects(ctx, &storage.Query{Prefix: prefix})), nil } // WriteWALSegment writes LZ4 compressed data from rd into a file on disk. func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, rd io.Reader) (info litestream.WALSegmentInfo, err error) { if err := c.Init(ctx); err != nil { return info, err + } else if pos.Generation == "" { + return info, fmt.Errorf("generation required") } - key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset) - if err != nil { - return info, fmt.Errorf("cannot determine wal segment path: %w", err) - } + key := path.Join(c.Path, "generations", pos.Generation, "wal", litestream.FormatIndex(pos.Index), litestream.FormatOffset(pos.Offset)+".wal.lz4") startTime := time.Now() w := c.bkt.Object(key).NewWriter(ctx) @@ -267,12 +258,11 @@ func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, func (c *ReplicaClient) WALSegmentReader(ctx context.Context, pos litestream.Pos) (io.ReadCloser, error) { if err := c.Init(ctx); err != nil { return nil, err + } else if pos.Generation == "" { + return nil, fmt.Errorf("generation required") } - key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset) - if err != nil { - return nil, fmt.Errorf("cannot determine wal segment path: %w", err) - } + key := path.Join(c.Path, "generations", pos.Generation, "wal", litestream.FormatIndex(pos.Index), litestream.FormatOffset(pos.Offset)+".wal.lz4") r, err := c.bkt.Object(key).NewReader(ctx) if isNotExists(err) { @@ -294,11 +284,11 @@ func (c *ReplicaClient) DeleteWALSegments(ctx context.Context, a []litestream.Po } for _, pos := range a { - key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset) - if err != nil { - return fmt.Errorf("cannot determine wal segment path: %w", err) + if pos.Generation == "" { + return fmt.Errorf("generation required") } + key := path.Join(c.Path, "generations", pos.Generation, "wal", litestream.FormatIndex(pos.Index), litestream.FormatOffset(pos.Offset)+".wal.lz4") if err := c.bkt.Object(key).Delete(ctx); err != nil && !isNotExists(err) { return fmt.Errorf("cannot delete wal segment %q: %w", key, err) } @@ -344,7 +334,7 @@ func (itr *snapshotIterator) Next() bool { } // Parse index, otherwise skip to the next object. - index, err := litestream.ParseSnapshotPath(path.Base(attrs.Name)) + index, err := internal.ParseSnapshotPath(path.Base(attrs.Name)) if err != nil { continue } @@ -366,15 +356,17 @@ func (itr *snapshotIterator) Snapshot() litestream.SnapshotInfo { return itr.inf type walSegmentIterator struct { generation string + prefix string it *storage.ObjectIterator info litestream.WALSegmentInfo err error } -func newWALSegmentIterator(generation string, it *storage.ObjectIterator) *walSegmentIterator { +func newWALSegmentIterator(generation, prefix string, it *storage.ObjectIterator) *walSegmentIterator { return &walSegmentIterator{ generation: generation, + prefix: prefix, it: it, } } @@ -400,7 +392,7 @@ func (itr *walSegmentIterator) Next() bool { } // Parse index & offset, otherwise skip to the next object. - index, offset, err := litestream.ParseWALSegmentPath(path.Base(attrs.Name)) + index, offset, err := internal.ParseWALSegmentPath(strings.TrimPrefix(attrs.Name, itr.prefix)) if err != nil { continue } diff --git a/http/server.go b/http/server.go new file mode 100644 index 00000000..910158a6 --- /dev/null +++ b/http/server.go @@ -0,0 +1,117 @@ +package http + +import ( + "fmt" + "log" + "net" + "net/http" + httppprof "net/http/pprof" + "os" + "strings" + + "github.com/benbjohnson/litestream" + "github.com/prometheus/client_golang/prometheus/promhttp" + "golang.org/x/sync/errgroup" +) + +// Server represents an HTTP API server for Litestream. +type Server struct { + ln net.Listener + closed bool + + httpServer *http.Server + promHandler http.Handler + + addr string + server *litestream.Server + + g errgroup.Group + + Logger *log.Logger +} + +func NewServer(server *litestream.Server, addr string) *Server { + s := &Server{ + addr: addr, + server: server, + Logger: log.New(os.Stderr, "http: ", litestream.LogFlags), + } + + s.promHandler = promhttp.Handler() + s.httpServer = &http.Server{ + Handler: http.HandlerFunc(s.serveHTTP), + } + return s +} + +func (s *Server) Open() (err error) { + if s.ln, err = net.Listen("tcp", s.addr); err != nil { + return err + } + + s.g.Go(func() error { + if err := s.httpServer.Serve(s.ln); err != nil && !s.closed { + return err + } + return nil + }) + + return nil +} + +func (s *Server) Close() (err error) { + s.closed = true + + if s.ln != nil { + if e := s.ln.Close(); e != nil && err == nil { + err = e + } + } + + if e := s.g.Wait(); e != nil && err == nil { + err = e + } + return err +} + +// Port returns the port the listener is running on. +func (s *Server) Port() int { + if s.ln == nil { + return 0 + } + return s.ln.Addr().(*net.TCPAddr).Port +} + +// URL returns the full base URL for the running server. +func (s *Server) URL() string { + host, _, _ := net.SplitHostPort(s.addr) + if host == "" { + host = "localhost" + } + return fmt.Sprintf("http://%s", net.JoinHostPort(host, fmt.Sprint(s.Port()))) +} + +func (s *Server) serveHTTP(w http.ResponseWriter, r *http.Request) { + if strings.HasPrefix(r.URL.Path, "/debug/pprof") { + switch r.URL.Path { + case "/debug/pprof/cmdline": + httppprof.Cmdline(w, r) + case "/debug/pprof/profile": + httppprof.Profile(w, r) + case "/debug/pprof/symbol": + httppprof.Symbol(w, r) + case "/debug/pprof/trace": + httppprof.Trace(w, r) + default: + httppprof.Index(w, r) + } + return + } + + switch r.URL.Path { + case "/metrics": + s.promHandler.ServeHTTP(w, r) + default: + http.NotFound(w, r) + } +} diff --git a/integration/cmd_test.go b/integration/cmd_test.go new file mode 100644 index 00000000..3abc628a --- /dev/null +++ b/integration/cmd_test.go @@ -0,0 +1,465 @@ +package integration_test + +import ( + "bytes" + "context" + "database/sql" + "flag" + "fmt" + "io" + "math/rand" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/benbjohnson/litestream/internal" + "github.com/benbjohnson/litestream/internal/testingutil" + _ "github.com/mattn/go-sqlite3" +) + +var longRunningDuration = flag.Duration("long-running-duration", 0, "") + +func init() { + fmt.Fprintln(os.Stderr, "# ") + fmt.Fprintln(os.Stderr, "# NOTE: Build litestream to your PATH before running integration tests") + fmt.Fprintln(os.Stderr, "#") + fmt.Fprintln(os.Stderr, "") +} + +// Ensure the default configuration works with light database load. +func TestCmd_Replicate_OK(t *testing.T) { + ctx := context.Background() + testDir, tempDir := filepath.Join("testdata", "replicate", "ok"), t.TempDir() + env := []string{"LITESTREAM_TEMPDIR=" + tempDir} + + cmd, stdout, _ := commandContext(ctx, env, "replicate", "-config", filepath.Join(testDir, "litestream.yml")) + if err := cmd.Start(); err != nil { + t.Fatal(err) + } + + db, err := sql.Open("sqlite3", filepath.Join(tempDir, "db")) + if err != nil { + t.Fatal(err) + } else if _, err := db.ExecContext(ctx, `PRAGMA journal_mode = wal`); err != nil { + t.Fatal(err) + } else if _, err := db.ExecContext(ctx, `CREATE TABLE t (id INTEGER PRIMARY KEY)`); err != nil { + t.Fatal(err) + } + defer db.Close() + + // Execute writes periodically. + for i := 0; i < 100; i++ { + t.Logf("[exec] INSERT INTO t (id) VALUES (%d)", i) + if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (?)`, i); err != nil { + t.Fatal(err) + } + time.Sleep(10 * time.Millisecond) + } + + // Stop & wait for Litestream command. + killLitestreamCmd(t, cmd, stdout) + + // Ensure signal and shutdown are logged. + if s := stdout.String(); !strings.Contains(s, `signal received, litestream shutting down`) { + t.Fatal("missing log output for signal received") + } else if s := stdout.String(); !strings.Contains(s, `litestream shut down`) { + t.Fatal("missing log output for shut down") + } + + // Checkpoint & verify original SQLite database. + if _, err := db.ExecContext(ctx, `PRAGMA wal_checkpoint(TRUNCATE)`); err != nil { + t.Fatal(err) + } + restoreAndVerify(t, ctx, env, filepath.Join(testDir, "litestream.yml"), filepath.Join(tempDir, "db")) +} + +// Ensure that stopping and restarting Litestream before an application-induced +// checkpoint will cause Litestream to continue replicating using the same generation. +func TestCmd_Replicate_ResumeWithCurrentGeneration(t *testing.T) { + ctx := context.Background() + testDir, tempDir := filepath.Join("testdata", "replicate", "resume-with-current-generation"), t.TempDir() + env := []string{"LITESTREAM_TEMPDIR=" + tempDir} + + cmd, stdout, _ := commandContext(ctx, env, "replicate", "-config", filepath.Join(testDir, "litestream.yml")) + if err := cmd.Start(); err != nil { + t.Fatal(err) + } + + t.Log("writing to database during replication") + + db, err := sql.Open("sqlite3", filepath.Join(tempDir, "db")) + if err != nil { + t.Fatal(err) + } else if _, err := db.ExecContext(ctx, `CREATE TABLE t (id INTEGER PRIMARY KEY)`); err != nil { + t.Fatal(err) + } + defer db.Close() + + // Execute a few writes to populate the WAL. + if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (1)`); err != nil { + t.Fatal(err) + } else if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (2)`); err != nil { + t.Fatal(err) + } + + // Wait for replication to occur & shutdown. + waitForLogMessage(t, stdout, `wal segment written`) + killLitestreamCmd(t, cmd, stdout) + t.Log("replication shutdown, continuing database writes") + + // Execute a few more writes while replication is stopped. + if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (3)`); err != nil { + t.Fatal(err) + } else if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (4)`); err != nil { + t.Fatal(err) + } + + t.Log("restarting replication") + + cmd, stdout, _ = commandContext(ctx, env, "replicate", "-config", filepath.Join(testDir, "litestream.yml")) + if err := cmd.Start(); err != nil { + t.Fatal(err) + } + + waitForLogMessage(t, stdout, `wal segment written`) + killLitestreamCmd(t, cmd, stdout) + + t.Log("replication shutdown again") + + // Litestream should resume replication from the previous generation. + if s := stdout.String(); strings.Contains(s, "no generation exists") { + t.Fatal("expected existing generation to resume; started new generation instead") + } + + // Checkpoint & verify original SQLite database. + if _, err := db.ExecContext(ctx, `PRAGMA wal_checkpoint(TRUNCATE)`); err != nil { + t.Fatal(err) + } + restoreAndVerify(t, ctx, env, filepath.Join(testDir, "litestream.yml"), filepath.Join(tempDir, "db")) +} + +// Ensure that restarting Litestream after a full checkpoint has occurred will +// cause it to begin a new generation. +func TestCmd_Replicate_ResumeWithNewGeneration(t *testing.T) { + ctx := context.Background() + testDir, tempDir := filepath.Join("testdata", "replicate", "resume-with-new-generation"), t.TempDir() + env := []string{"LITESTREAM_TEMPDIR=" + tempDir} + + cmd, stdout, _ := commandContext(ctx, env, "replicate", "-config", filepath.Join(testDir, "litestream.yml")) + if err := cmd.Start(); err != nil { + t.Fatal(err) + } + + t.Log("writing to database during replication") + + db, err := sql.Open("sqlite3", filepath.Join(tempDir, "db")) + if err != nil { + t.Fatal(err) + } else if _, err := db.ExecContext(ctx, `CREATE TABLE t (id INTEGER PRIMARY KEY)`); err != nil { + t.Fatal(err) + } + defer db.Close() + + // Execute a few writes to populate the WAL. + if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (1)`); err != nil { + t.Fatal(err) + } else if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (2)`); err != nil { + t.Fatal(err) + } + + // Wait for replication to occur & shutdown. + waitForLogMessage(t, stdout, `wal segment written`) + killLitestreamCmd(t, cmd, stdout) + t.Log("replication shutdown, continuing database writes") + + // Execute a few more writes while replication is stopped. + if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (3)`); err != nil { + t.Fatal(err) + } else if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (4)`); err != nil { + t.Fatal(err) + } + + t.Log("issuing checkpoint") + + // Issue a checkpoint to restart WAL. + if _, err := db.ExecContext(ctx, `PRAGMA wal_checkpoint(RESTART)`); err != nil { + t.Fatal(err) + } else if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (5)`); err != nil { + t.Fatal(err) + } + + t.Log("restarting replication") + + cmd, stdout, _ = commandContext(ctx, env, "replicate", "-config", filepath.Join(testDir, "litestream.yml")) + if err := cmd.Start(); err != nil { + t.Fatal(err) + } + + waitForLogMessage(t, stdout, `wal segment written`) + killLitestreamCmd(t, cmd, stdout) + + t.Log("replication shutdown again") + + // Litestream should resume replication from the previous generation. + if s := stdout.String(); !strings.Contains(s, "no generation exists") { + t.Fatal("expected new generation to start; continued existing generation instead") + } + + // Checkpoint & verify original SQLite database. + if _, err := db.ExecContext(ctx, `PRAGMA wal_checkpoint(TRUNCATE)`); err != nil { + t.Fatal(err) + } + restoreAndVerify(t, ctx, env, filepath.Join(testDir, "litestream.yml"), filepath.Join(tempDir, "db")) +} + +// Ensure the monitor interval can be turned off. +func TestCmd_Replicate_NoMonitorDelayInterval(t *testing.T) { + ctx := context.Background() + testDir, tempDir := filepath.Join("testdata", "replicate", "no-monitor-delay-interval"), t.TempDir() + env := []string{"LITESTREAM_TEMPDIR=" + tempDir} + + cmd, stdout, _ := commandContext(ctx, env, "replicate", "-config", filepath.Join(testDir, "litestream.yml")) + if err := cmd.Start(); err != nil { + t.Fatal(err) + } + + db, err := sql.Open("sqlite3", filepath.Join(tempDir, "db")) + if err != nil { + t.Fatal(err) + } else if _, err := db.ExecContext(ctx, `PRAGMA journal_mode = wal`); err != nil { + t.Fatal(err) + } else if _, err := db.ExecContext(ctx, `CREATE TABLE t (id INTEGER PRIMARY KEY)`); err != nil { + t.Fatal(err) + } + defer db.Close() + + time.Sleep(1 * time.Second) + + // Execute writes periodically. + for i := 0; i < 10; i++ { + t.Logf("[exec] INSERT INTO t (id) VALUES (%d)", i) + if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (?)`, i); err != nil { + t.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + } + + // Stop & wait for Litestream command. + killLitestreamCmd(t, cmd, stdout) + + // Ensure signal and shutdown are logged. + if s := stdout.String(); !strings.Contains(s, `signal received, litestream shutting down`) { + t.Fatal("missing log output for signal received") + } else if s := stdout.String(); !strings.Contains(s, `litestream shut down`) { + t.Fatal("missing log output for shut down") + } + + // Checkpoint & verify original SQLite database. + if _, err := db.ExecContext(ctx, `PRAGMA wal_checkpoint(TRUNCATE)`); err != nil { + t.Fatal(err) + } + restoreAndVerify(t, ctx, env, filepath.Join(testDir, "litestream.yml"), filepath.Join(tempDir, "db")) +} + +// Ensure the default configuration works with heavy write load. +func TestCmd_Replicate_HighLoad(t *testing.T) { + if testing.Short() { + t.Skip("short mode enabled, skipping") + } else if os.Getenv("CI") != "" { + t.Skip("ci, skipping") + } + + const writeDuration = 30 * time.Second + + ctx := context.Background() + testDir, tempDir := filepath.Join("testdata", "replicate", "high-load"), t.TempDir() + env := []string{"LITESTREAM_TEMPDIR=" + tempDir} + + cmd, stdout, _ := commandContext(ctx, env, "replicate", "-config", filepath.Join(testDir, "litestream.yml")) + if err := cmd.Start(); err != nil { + t.Fatal(err) + } + + db, err := sql.Open("sqlite3", filepath.Join(tempDir, "db")) + if err != nil { + t.Fatal(err) + } else if _, err := db.ExecContext(ctx, `PRAGMA journal_mode = WAL`); err != nil { + t.Fatal(err) + } else if _, err := db.ExecContext(ctx, `PRAGMA synchronous = NORMAL`); err != nil { + t.Fatal(err) + } else if _, err := db.ExecContext(ctx, `PRAGMA wal_autocheckpoint = 0`); err != nil { + t.Fatal(err) + } else if _, err := db.ExecContext(ctx, `CREATE TABLE t (id INTEGER PRIMARY KEY)`); err != nil { + t.Fatal(err) + } + defer db.Close() + + // Execute writes as fast as possible for a period of time. + timer := time.NewTimer(writeDuration) + defer timer.Stop() + + t.Logf("executing writes for %s", writeDuration) + +LOOP: + for i := 0; ; i++ { + select { + case <-timer.C: + break LOOP + default: + if i%1000 == 0 { + t.Logf("[exec] INSERT INTO t (id) VALUES (%d)", i) + } + if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (?)`, i); err != nil { + t.Fatal(err) + } + } + } + + t.Logf("writes complete, shutting down") + + // Stop & wait for Litestream command. + time.Sleep(5 * time.Second) + killLitestreamCmd(t, cmd, stdout) + + // Checkpoint & verify original SQLite database. + if _, err := db.ExecContext(ctx, `PRAGMA wal_checkpoint(TRUNCATE)`); err != nil { + t.Fatal(err) + } + restoreAndVerify(t, ctx, env, filepath.Join(testDir, "litestream.yml"), filepath.Join(tempDir, "db")) +} + +// Ensure replication works for an extended period. +func TestCmd_Replicate_LongRunning(t *testing.T) { + if *longRunningDuration == 0 { + t.Skip("long running test duration not specified, skipping") + } + + ctx := context.Background() + testDir, tempDir := filepath.Join("testdata", "replicate", "long-running"), t.TempDir() + env := []string{"LITESTREAM_TEMPDIR=" + tempDir} + + cmd, stdout, _ := commandContext(ctx, env, "replicate", "-config", filepath.Join(testDir, "litestream.yml")) + if err := cmd.Start(); err != nil { + t.Fatal(err) + } + + db, err := sql.Open("sqlite3", filepath.Join(tempDir, "db")) + if err != nil { + t.Fatal(err) + } else if _, err := db.ExecContext(ctx, `PRAGMA journal_mode = WAL`); err != nil { + t.Fatal(err) + } else if _, err := db.ExecContext(ctx, `PRAGMA synchronous = NORMAL`); err != nil { + t.Fatal(err) + } else if _, err := db.ExecContext(ctx, `CREATE TABLE t (id INTEGER PRIMARY KEY)`); err != nil { + t.Fatal(err) + } + defer db.Close() + + // Execute writes as fast as possible for a period of time. + timer := time.NewTimer(*longRunningDuration) + defer timer.Stop() + + t.Logf("executing writes for %s", longRunningDuration) + +LOOP: + for i := 0; ; i++ { + select { + case <-timer.C: + break LOOP + default: + t.Logf("[exec] INSERT INTO t (id) VALUES (%d)", i) + if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (?)`, i); err != nil { + t.Fatal(err) + } + + time.Sleep(time.Duration(rand.Intn(int(time.Second)))) + } + } + + t.Logf("writes complete, shutting down") + + // Stop & wait for Litestream command. + killLitestreamCmd(t, cmd, stdout) + + // Checkpoint & verify original SQLite database. + if _, err := db.ExecContext(ctx, `PRAGMA wal_checkpoint(TRUNCATE)`); err != nil { + t.Fatal(err) + } + restoreAndVerify(t, ctx, env, filepath.Join(testDir, "litestream.yml"), filepath.Join(tempDir, "db")) +} + +// commandContext returns a "litestream" command with stdout/stderr buffers. +func commandContext(ctx context.Context, env []string, arg ...string) (cmd *exec.Cmd, stdout, stderr *internal.LockingBuffer) { + cmd = exec.CommandContext(ctx, "litestream", arg...) + cmd.Env = env + var outBuf, errBuf internal.LockingBuffer + + // Split stdout/stderr to terminal if verbose flag set. + cmd.Stdout, cmd.Stderr = &outBuf, &errBuf + if testing.Verbose() { + cmd.Stdout = io.MultiWriter(&outBuf, os.Stdout) + cmd.Stderr = io.MultiWriter(&errBuf, os.Stderr) + } + + return cmd, &outBuf, &errBuf +} + +// waitForLogMessage continuously checks b for a message and returns when it occurs. +func waitForLogMessage(tb testing.TB, b *internal.LockingBuffer, msg string) { + timer := time.NewTimer(30 * time.Second) + defer timer.Stop() + ticker := time.NewTicker(10 * time.Millisecond) + defer ticker.Stop() + + for { + select { + case <-timer.C: + tb.Fatal("timed out waiting for cmd initialization") + case <-ticker.C: + if strings.Contains(b.String(), msg) { + return + } + } + } +} + +// killLitestreamCmd interrupts the process and waits for a clean shutdown. +func killLitestreamCmd(tb testing.TB, cmd *exec.Cmd, stdout *internal.LockingBuffer) { + tb.Helper() + if err := cmd.Process.Signal(os.Interrupt); err != nil { + tb.Fatal("kill litestream: signal:", err) + } else if err := cmd.Wait(); err != nil { + tb.Fatal("kill litestream: cmd:", err) + } +} + +// restoreAndVerify executes a "restore" and compares byte with the original database. +func restoreAndVerify(tb testing.TB, ctx context.Context, env []string, configPath, dbPath string) { + restorePath := filepath.Join(tb.TempDir(), "db") + + // Restore database. + cmd, _, _ := commandContext(ctx, env, "restore", "-config", configPath, "-o", restorePath, dbPath) + if err := cmd.Run(); err != nil { + tb.Fatalf("error running 'restore' command: %s", err) + } + + // Compare original database & restored database. + buf0 := testingutil.ReadFile(tb, dbPath) + buf1 := testingutil.ReadFile(tb, restorePath) + if bytes.Equal(buf0, buf1) { + return // ok, exit + } + + // On mismatch, copy out original & restored DBs. + dir, err := os.MkdirTemp("", "litestream-*") + if err != nil { + tb.Fatal(err) + } + testingutil.CopyFile(tb, dbPath, filepath.Join(dir, "original.db")) + testingutil.CopyFile(tb, restorePath, filepath.Join(dir, "restored.db")) + + tb.Fatalf("database mismatch; databases copied to %s", dir) +} diff --git a/integration/replica_client_test.go b/integration/replica_client_test.go new file mode 100644 index 00000000..b76034b8 --- /dev/null +++ b/integration/replica_client_test.go @@ -0,0 +1,568 @@ +package integration_test + +import ( + "context" + "flag" + "fmt" + "io/ioutil" + "math/rand" + "os" + "path" + "reflect" + "sort" + "strings" + "testing" + "time" + + "github.com/benbjohnson/litestream" + "github.com/benbjohnson/litestream/abs" + "github.com/benbjohnson/litestream/gs" + "github.com/benbjohnson/litestream/s3" + "github.com/benbjohnson/litestream/sftp" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +var ( + // Enables integration tests. + replicaType = flag.String("replica-type", "file", "") +) + +// S3 settings +var ( + // Replica client settings + s3AccessKeyID = flag.String("s3-access-key-id", os.Getenv("LITESTREAM_S3_ACCESS_KEY_ID"), "") + s3SecretAccessKey = flag.String("s3-secret-access-key", os.Getenv("LITESTREAM_S3_SECRET_ACCESS_KEY"), "") + s3Region = flag.String("s3-region", os.Getenv("LITESTREAM_S3_REGION"), "") + s3Bucket = flag.String("s3-bucket", os.Getenv("LITESTREAM_S3_BUCKET"), "") + s3Path = flag.String("s3-path", os.Getenv("LITESTREAM_S3_PATH"), "") + s3Endpoint = flag.String("s3-endpoint", os.Getenv("LITESTREAM_S3_ENDPOINT"), "") + s3ForcePathStyle = flag.Bool("s3-force-path-style", os.Getenv("LITESTREAM_S3_FORCE_PATH_STYLE") == "true", "") + s3SkipVerify = flag.Bool("s3-skip-verify", os.Getenv("LITESTREAM_S3_SKIP_VERIFY") == "true", "") +) + +// Google cloud storage settings +var ( + gsBucket = flag.String("gs-bucket", os.Getenv("LITESTREAM_GS_BUCKET"), "") + gsPath = flag.String("gs-path", os.Getenv("LITESTREAM_GS_PATH"), "") +) + +// Azure blob storage settings +var ( + absAccountName = flag.String("abs-account-name", os.Getenv("LITESTREAM_ABS_ACCOUNT_NAME"), "") + absAccountKey = flag.String("abs-account-key", os.Getenv("LITESTREAM_ABS_ACCOUNT_KEY"), "") + absBucket = flag.String("abs-bucket", os.Getenv("LITESTREAM_ABS_BUCKET"), "") + absPath = flag.String("abs-path", os.Getenv("LITESTREAM_ABS_PATH"), "") +) + +// SFTP settings +var ( + sftpHost = flag.String("sftp-host", os.Getenv("LITESTREAM_SFTP_HOST"), "") + sftpUser = flag.String("sftp-user", os.Getenv("LITESTREAM_SFTP_USER"), "") + sftpPassword = flag.String("sftp-password", os.Getenv("LITESTREAM_SFTP_PASSWORD"), "") + sftpKeyPath = flag.String("sftp-key-path", os.Getenv("LITESTREAM_SFTP_KEY_PATH"), "") + sftpHostKeyPath = flag.String("sftp-host-key-path", os.Getenv("LITESTREAM_SFTP_HOST_KEY_PATH"), "") + sftpPath = flag.String("sftp-path", os.Getenv("LITESTREAM_SFTP_PATH"), "") +) + +func TestReplicaClient_Generations(t *testing.T) { + RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + // Write snapshots. + if _, err := c.WriteSnapshot(context.Background(), "5efbd8d042012dca", 0, strings.NewReader(`foo`)); err != nil { + t.Fatal(err) + } else if _, err := c.WriteSnapshot(context.Background(), "b16ddcf5c697540f", 0, strings.NewReader(`bar`)); err != nil { + t.Fatal(err) + } else if _, err := c.WriteSnapshot(context.Background(), "155fe292f8333c72", 0, strings.NewReader(`baz`)); err != nil { + t.Fatal(err) + } + + // Verify returned generations. + if got, err := c.Generations(context.Background()); err != nil { + t.Fatal(err) + } else if want := []string{"155fe292f8333c72", "5efbd8d042012dca", "b16ddcf5c697540f"}; !reflect.DeepEqual(got, want) { + t.Fatalf("Generations()=%v, want %v", got, want) + } + }) + + RunWithReplicaClient(t, "NoGenerationsDir", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + if generations, err := c.Generations(context.Background()); err != nil { + t.Fatal(err) + } else if got, want := len(generations), 0; got != want { + t.Fatalf("len(Generations())=%v, want %v", got, want) + } + }) +} + +func TestReplicaClient_Snapshots(t *testing.T) { + RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + // Write snapshots. + if _, err := c.WriteSnapshot(context.Background(), "5efbd8d042012dca", 1, strings.NewReader(``)); err != nil { + t.Fatal(err) + } else if _, err := c.WriteSnapshot(context.Background(), "b16ddcf5c697540f", 5, strings.NewReader(`x`)); err != nil { + t.Fatal(err) + } else if _, err := c.WriteSnapshot(context.Background(), "b16ddcf5c697540f", 10, strings.NewReader(`xyz`)); err != nil { + t.Fatal(err) + } + + // Fetch all snapshots by generation. + itr, err := c.Snapshots(context.Background(), "b16ddcf5c697540f") + if err != nil { + t.Fatal(err) + } + defer itr.Close() + + // Read all snapshots into a slice so they can be sorted. + a, err := litestream.SliceSnapshotIterator(itr) + if err != nil { + t.Fatal(err) + } else if got, want := len(a), 2; got != want { + t.Fatalf("len=%v, want %v", got, want) + } + sort.Sort(litestream.SnapshotInfoSlice(a)) + + // Verify first snapshot metadata. + if got, want := a[0].Generation, "b16ddcf5c697540f"; got != want { + t.Fatalf("Generation=%v, want %v", got, want) + } else if got, want := a[0].Index, 5; got != want { + t.Fatalf("Index=%v, want %v", got, want) + } else if got, want := a[0].Size, int64(1); got != want { + t.Fatalf("Size=%v, want %v", got, want) + } else if a[0].CreatedAt.IsZero() { + t.Fatalf("expected CreatedAt") + } + + // Verify second snapshot metadata. + if got, want := a[1].Generation, "b16ddcf5c697540f"; got != want { + t.Fatalf("Generation=%v, want %v", got, want) + } else if got, want := a[1].Index, 0xA; got != want { + t.Fatalf("Index=%v, want %v", got, want) + } else if got, want := a[1].Size, int64(3); got != want { + t.Fatalf("Size=%v, want %v", got, want) + } else if a[1].CreatedAt.IsZero() { + t.Fatalf("expected CreatedAt") + } + + // Ensure close is clean. + if err := itr.Close(); err != nil { + t.Fatal(err) + } + }) + + RunWithReplicaClient(t, "NoGenerationDir", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + itr, err := c.Snapshots(context.Background(), "5efbd8d042012dca") + if err != nil { + t.Fatal(err) + } + defer itr.Close() + + if itr.Next() { + t.Fatal("expected no snapshots") + } + }) + + RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + itr, err := c.Snapshots(context.Background(), "") + if err == nil { + err = itr.Close() + } + if err == nil || err.Error() != `generation required` { + t.Fatalf("unexpected error: %v", err) + } + }) +} + +func TestReplicaClient_WriteSnapshot(t *testing.T) { + RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + if _, err := c.WriteSnapshot(context.Background(), "b16ddcf5c697540f", 1000, strings.NewReader(`foobar`)); err != nil { + t.Fatal(err) + } + + if r, err := c.SnapshotReader(context.Background(), "b16ddcf5c697540f", 1000); err != nil { + t.Fatal(err) + } else if buf, err := ioutil.ReadAll(r); err != nil { + t.Fatal(err) + } else if err := r.Close(); err != nil { + t.Fatal(err) + } else if got, want := string(buf), `foobar`; got != want { + t.Fatalf("data=%q, want %q", got, want) + } + }) + + RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + if _, err := c.WriteSnapshot(context.Background(), "", 0, nil); err == nil || err.Error() != `generation required` { + t.Fatalf("unexpected error: %v", err) + } + }) +} + +func TestReplicaClient_SnapshotReader(t *testing.T) { + RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + if _, err := c.WriteSnapshot(context.Background(), "5efbd8d042012dca", 10, strings.NewReader(`foo`)); err != nil { + t.Fatal(err) + } + + r, err := c.SnapshotReader(context.Background(), "5efbd8d042012dca", 10) + if err != nil { + t.Fatal(err) + } + defer r.Close() + + if buf, err := ioutil.ReadAll(r); err != nil { + t.Fatal(err) + } else if got, want := string(buf), "foo"; got != want { + t.Fatalf("ReadAll=%v, want %v", got, want) + } + }) + + RunWithReplicaClient(t, "ErrNotFound", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + if _, err := c.SnapshotReader(context.Background(), "5efbd8d042012dca", 1); !os.IsNotExist(err) { + t.Fatalf("expected not exist, got %#v", err) + } + }) + + RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + if _, err := c.SnapshotReader(context.Background(), "", 1); err == nil || err.Error() != `generation required` { + t.Fatalf("unexpected error: %v", err) + } + }) +} + +func TestReplicaClient_WALSegments(t *testing.T) { + RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 1, Offset: 0}, strings.NewReader(``)); err != nil { + t.Fatal(err) + } + if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 2, Offset: 0}, strings.NewReader(`12345`)); err != nil { + t.Fatal(err) + } else if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 2, Offset: 5}, strings.NewReader(`67`)); err != nil { + t.Fatal(err) + } else if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 3, Offset: 0}, strings.NewReader(`xyz`)); err != nil { + t.Fatal(err) + } + + itr, err := c.WALSegments(context.Background(), "b16ddcf5c697540f") + if err != nil { + t.Fatal(err) + } + defer itr.Close() + + // Read all WAL segment files into a slice so they can be sorted. + a, err := litestream.SliceWALSegmentIterator(itr) + if err != nil { + t.Fatal(err) + } else if got, want := len(a), 3; got != want { + t.Fatalf("len=%v, want %v", got, want) + } + sort.Sort(litestream.WALSegmentInfoSlice(a)) + + // Verify first WAL segment metadata. + if got, want := a[0].Generation, "b16ddcf5c697540f"; got != want { + t.Fatalf("Generation=%v, want %v", got, want) + } else if got, want := a[0].Index, 2; got != want { + t.Fatalf("Index=%v, want %v", got, want) + } else if got, want := a[0].Offset, int64(0); got != want { + t.Fatalf("Offset=%v, want %v", got, want) + } else if got, want := a[0].Size, int64(5); got != want { + t.Fatalf("Size=%v, want %v", got, want) + } else if a[0].CreatedAt.IsZero() { + t.Fatalf("expected CreatedAt") + } + + // Verify first WAL segment metadata. + if got, want := a[1].Generation, "b16ddcf5c697540f"; got != want { + t.Fatalf("Generation=%v, want %v", got, want) + } else if got, want := a[1].Index, 2; got != want { + t.Fatalf("Index=%v, want %v", got, want) + } else if got, want := a[1].Offset, int64(5); got != want { + t.Fatalf("Offset=%v, want %v", got, want) + } else if got, want := a[1].Size, int64(2); got != want { + t.Fatalf("Size=%v, want %v", got, want) + } else if a[1].CreatedAt.IsZero() { + t.Fatalf("expected CreatedAt") + } + + // Verify third WAL segment metadata. + if got, want := a[2].Generation, "b16ddcf5c697540f"; got != want { + t.Fatalf("Generation=%v, want %v", got, want) + } else if got, want := a[2].Index, 3; got != want { + t.Fatalf("Index=%v, want %v", got, want) + } else if got, want := a[2].Offset, int64(0); got != want { + t.Fatalf("Offset=%v, want %v", got, want) + } else if got, want := a[2].Size, int64(3); got != want { + t.Fatalf("Size=%v, want %v", got, want) + } else if a[1].CreatedAt.IsZero() { + t.Fatalf("expected CreatedAt") + } + + // Ensure close is clean. + if err := itr.Close(); err != nil { + t.Fatal(err) + } + }) + + RunWithReplicaClient(t, "NoGenerationDir", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + itr, err := c.WALSegments(context.Background(), "5efbd8d042012dca") + if err != nil { + t.Fatal(err) + } + defer itr.Close() + + if itr.Next() { + t.Fatal("expected no wal files") + } + }) + + RunWithReplicaClient(t, "NoWALs", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + if _, err := c.WriteSnapshot(context.Background(), "5efbd8d042012dca", 0, strings.NewReader(`foo`)); err != nil { + t.Fatal(err) + } + + itr, err := c.WALSegments(context.Background(), "5efbd8d042012dca") + if err != nil { + t.Fatal(err) + } + defer itr.Close() + + if itr.Next() { + t.Fatal("expected no wal files") + } + }) + + RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + itr, err := c.WALSegments(context.Background(), "") + if err == nil { + err = itr.Close() + } + if err == nil || err.Error() != `generation required` { + t.Fatalf("unexpected error: %v", err) + } + }) +} + +func TestReplicaClient_WriteWALSegment(t *testing.T) { + RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1000, Offset: 2000}, strings.NewReader(`foobar`)); err != nil { + t.Fatal(err) + } + + if r, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1000, Offset: 2000}); err != nil { + t.Fatal(err) + } else if buf, err := ioutil.ReadAll(r); err != nil { + t.Fatal(err) + } else if err := r.Close(); err != nil { + t.Fatal(err) + } else if got, want := string(buf), `foobar`; got != want { + t.Fatalf("data=%q, want %q", got, want) + } + }) + + RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "", Index: 0, Offset: 0}, nil); err == nil || err.Error() != `generation required` { + t.Fatalf("unexpected error: %v", err) + } + }) +} + +func TestReplicaClient_WALSegmentReader(t *testing.T) { + + RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 10, Offset: 5}, strings.NewReader(`foobar`)); err != nil { + t.Fatal(err) + } + + r, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 10, Offset: 5}) + if err != nil { + t.Fatal(err) + } + defer r.Close() + + if buf, err := ioutil.ReadAll(r); err != nil { + t.Fatal(err) + } else if got, want := string(buf), "foobar"; got != want { + t.Fatalf("ReadAll=%v, want %v", got, want) + } + }) + + RunWithReplicaClient(t, "ErrNotFound", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + if _, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 1, Offset: 0}); !os.IsNotExist(err) { + t.Fatalf("expected not exist, got %#v", err) + } + }) +} + +func TestReplicaClient_DeleteWALSegments(t *testing.T) { + RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1, Offset: 2}, strings.NewReader(`foo`)); err != nil { + t.Fatal(err) + } else if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 3, Offset: 4}, strings.NewReader(`bar`)); err != nil { + t.Fatal(err) + } + + if err := c.DeleteWALSegments(context.Background(), []litestream.Pos{ + {Generation: "b16ddcf5c697540f", Index: 1, Offset: 2}, + {Generation: "5efbd8d042012dca", Index: 3, Offset: 4}, + }); err != nil { + t.Fatal(err) + } + + if _, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1, Offset: 2}); !os.IsNotExist(err) { + t.Fatalf("expected not exist, got %#v", err) + } else if _, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 3, Offset: 4}); !os.IsNotExist(err) { + t.Fatalf("expected not exist, got %#v", err) + } + }) + + RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + if err := c.DeleteWALSegments(context.Background(), []litestream.Pos{{}}); err == nil || err.Error() != `generation required` { + t.Fatalf("unexpected error: %v", err) + } + }) +} + +// RunWithReplicaClient executes fn with each replica specified by the -replica-type flag +func RunWithReplicaClient(t *testing.T, name string, fn func(*testing.T, litestream.ReplicaClient)) { + t.Run(name, func(t *testing.T) { + for _, typ := range strings.Split(*replicaType, ",") { + t.Run(typ, func(t *testing.T) { + c := NewReplicaClient(t, typ) + defer MustDeleteAll(t, c) + + fn(t, c) + }) + } + }) +} + +// NewReplicaClient returns a new client for integration testing by type name. +func NewReplicaClient(tb testing.TB, typ string) litestream.ReplicaClient { + tb.Helper() + + switch typ { + case litestream.FileReplicaClientType: + return litestream.NewFileReplicaClient(tb.TempDir()) + case s3.ReplicaClientType: + return NewS3ReplicaClient(tb) + case gs.ReplicaClientType: + return NewGSReplicaClient(tb) + case abs.ReplicaClientType: + return NewABSReplicaClient(tb) + case sftp.ReplicaClientType: + return NewSFTPReplicaClient(tb) + default: + tb.Fatalf("invalid replica client type: %q", typ) + return nil + } +} + +// NewS3ReplicaClient returns a new client for integration testing. +func NewS3ReplicaClient(tb testing.TB) *s3.ReplicaClient { + tb.Helper() + + c := s3.NewReplicaClient() + c.AccessKeyID = *s3AccessKeyID + c.SecretAccessKey = *s3SecretAccessKey + c.Region = *s3Region + c.Bucket = *s3Bucket + c.Path = path.Join(*s3Path, fmt.Sprintf("%016x", rand.Uint64())) + c.Endpoint = *s3Endpoint + c.ForcePathStyle = *s3ForcePathStyle + c.SkipVerify = *s3SkipVerify + return c +} + +// NewGSReplicaClient returns a new client for integration testing. +func NewGSReplicaClient(tb testing.TB) *gs.ReplicaClient { + tb.Helper() + + c := gs.NewReplicaClient() + c.Bucket = *gsBucket + c.Path = path.Join(*gsPath, fmt.Sprintf("%016x", rand.Uint64())) + return c +} + +// NewABSReplicaClient returns a new client for integration testing. +func NewABSReplicaClient(tb testing.TB) *abs.ReplicaClient { + tb.Helper() + + c := abs.NewReplicaClient() + c.AccountName = *absAccountName + c.AccountKey = *absAccountKey + c.Bucket = *absBucket + c.Path = path.Join(*absPath, fmt.Sprintf("%016x", rand.Uint64())) + return c +} + +// NewSFTPReplicaClient returns a new client for integration testing. +func NewSFTPReplicaClient(tb testing.TB) *sftp.ReplicaClient { + tb.Helper() + + c := sftp.NewReplicaClient() + c.Host = *sftpHost + c.User = *sftpUser + c.Password = *sftpPassword + c.KeyPath = *sftpKeyPath + c.HostKeyPath = *sftpHostKeyPath + c.Path = path.Join(*sftpPath, fmt.Sprintf("%016x", rand.Uint64())) + return c +} + +// MustDeleteAll deletes all objects under the client's path. +func MustDeleteAll(tb testing.TB, c litestream.ReplicaClient) { + tb.Helper() + + generations, err := c.Generations(context.Background()) + if err != nil { + tb.Fatalf("cannot list generations for deletion: %s", err) + } + + for _, generation := range generations { + if err := c.DeleteGeneration(context.Background(), generation); err != nil { + tb.Fatalf("cannot delete generation: %s", err) + } + } + + switch c := c.(type) { + case *sftp.ReplicaClient: + if err := c.Cleanup(context.Background()); err != nil { + tb.Fatalf("cannot cleanup sftp: %s", err) + } + } +} diff --git a/integration/testdata/replicate/high-load/litestream.yml b/integration/testdata/replicate/high-load/litestream.yml new file mode 100644 index 00000000..5e116355 --- /dev/null +++ b/integration/testdata/replicate/high-load/litestream.yml @@ -0,0 +1,6 @@ +dbs: + - path: $LITESTREAM_TEMPDIR/db + replicas: + - path: $LITESTREAM_TEMPDIR/replica + + max-checkpoint-page-count: 20 diff --git a/integration/testdata/replicate/http-full-recovery/litestream.0.yml b/integration/testdata/replicate/http-full-recovery/litestream.0.yml new file mode 100644 index 00000000..88dea072 --- /dev/null +++ b/integration/testdata/replicate/http-full-recovery/litestream.0.yml @@ -0,0 +1,6 @@ +addr: :10002 + +dbs: + - path: $LITESTREAM_TEMPDIR/0/db + max-checkpoint-page-count: 5 + shadow-retention-count: 3 diff --git a/integration/testdata/replicate/http-full-recovery/litestream.1.yml b/integration/testdata/replicate/http-full-recovery/litestream.1.yml new file mode 100644 index 00000000..e9735700 --- /dev/null +++ b/integration/testdata/replicate/http-full-recovery/litestream.1.yml @@ -0,0 +1,5 @@ +dbs: + - path: $LITESTREAM_TEMPDIR/1/db + upstream: + url: "$LITESTREAM_UPSTREAM_URL" + path: "$LITESTREAM_TEMPDIR/0/db" diff --git a/integration/testdata/replicate/http-partial-recovery/litestream.0.yml b/integration/testdata/replicate/http-partial-recovery/litestream.0.yml new file mode 100644 index 00000000..41c7b1b3 --- /dev/null +++ b/integration/testdata/replicate/http-partial-recovery/litestream.0.yml @@ -0,0 +1,5 @@ +addr: :10002 + +dbs: + - path: $LITESTREAM_TEMPDIR/0/db + max-checkpoint-page-count: 10 diff --git a/integration/testdata/replicate/http-partial-recovery/litestream.1.yml b/integration/testdata/replicate/http-partial-recovery/litestream.1.yml new file mode 100644 index 00000000..e9735700 --- /dev/null +++ b/integration/testdata/replicate/http-partial-recovery/litestream.1.yml @@ -0,0 +1,5 @@ +dbs: + - path: $LITESTREAM_TEMPDIR/1/db + upstream: + url: "$LITESTREAM_UPSTREAM_URL" + path: "$LITESTREAM_TEMPDIR/0/db" diff --git a/integration/testdata/replicate/http/litestream.0.yml b/integration/testdata/replicate/http/litestream.0.yml new file mode 100644 index 00000000..e30e651a --- /dev/null +++ b/integration/testdata/replicate/http/litestream.0.yml @@ -0,0 +1,5 @@ +addr: :10001 + +dbs: + - path: $LITESTREAM_TEMPDIR/0/db + max-checkpoint-page-count: 20 diff --git a/integration/testdata/replicate/http/litestream.1.yml b/integration/testdata/replicate/http/litestream.1.yml new file mode 100644 index 00000000..e9735700 --- /dev/null +++ b/integration/testdata/replicate/http/litestream.1.yml @@ -0,0 +1,5 @@ +dbs: + - path: $LITESTREAM_TEMPDIR/1/db + upstream: + url: "$LITESTREAM_UPSTREAM_URL" + path: "$LITESTREAM_TEMPDIR/0/db" diff --git a/integration/testdata/replicate/long-running/litestream.yml b/integration/testdata/replicate/long-running/litestream.yml new file mode 100644 index 00000000..b7d0e0ee --- /dev/null +++ b/integration/testdata/replicate/long-running/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: $LITESTREAM_TEMPDIR/db + replicas: + - path: $LITESTREAM_TEMPDIR/replica diff --git a/integration/testdata/replicate/no-monitor-delay-interval/litestream.yml b/integration/testdata/replicate/no-monitor-delay-interval/litestream.yml new file mode 100644 index 00000000..e597b313 --- /dev/null +++ b/integration/testdata/replicate/no-monitor-delay-interval/litestream.yml @@ -0,0 +1,5 @@ +dbs: + - path: $LITESTREAM_TEMPDIR/db + monitor-delay-interval: 0 + replicas: + - path: $LITESTREAM_TEMPDIR/replica diff --git a/integration/testdata/replicate/ok/litestream.yml b/integration/testdata/replicate/ok/litestream.yml new file mode 100644 index 00000000..5e116355 --- /dev/null +++ b/integration/testdata/replicate/ok/litestream.yml @@ -0,0 +1,6 @@ +dbs: + - path: $LITESTREAM_TEMPDIR/db + replicas: + - path: $LITESTREAM_TEMPDIR/replica + + max-checkpoint-page-count: 20 diff --git a/integration/testdata/replicate/resume-with-current-generation/litestream.yml b/integration/testdata/replicate/resume-with-current-generation/litestream.yml new file mode 100644 index 00000000..b7d0e0ee --- /dev/null +++ b/integration/testdata/replicate/resume-with-current-generation/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: $LITESTREAM_TEMPDIR/db + replicas: + - path: $LITESTREAM_TEMPDIR/replica diff --git a/integration/testdata/replicate/resume-with-new-generation/litestream.yml b/integration/testdata/replicate/resume-with-new-generation/litestream.yml new file mode 100644 index 00000000..b7d0e0ee --- /dev/null +++ b/integration/testdata/replicate/resume-with-new-generation/litestream.yml @@ -0,0 +1,4 @@ +dbs: + - path: $LITESTREAM_TEMPDIR/db + replicas: + - path: $LITESTREAM_TEMPDIR/replica diff --git a/integration/testdata/replicate/resume/litestream.yml b/integration/testdata/replicate/resume/litestream.yml new file mode 100644 index 00000000..494ece05 --- /dev/null +++ b/integration/testdata/replicate/resume/litestream.yml @@ -0,0 +1,6 @@ +dbs: + - path: $LITESTREAM_TEMPDIR/db + replicas: + - path: $LITESTREAM_TEMPDIR/replica + + max-checkpoint-page-count: 10 diff --git a/internal/internal.go b/internal/internal.go index b22399ca..e4da7f8e 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -1,14 +1,27 @@ package internal import ( + "crypto/md5" + "fmt" "io" + "math" "os" + "regexp" + "strconv" + "sync" "syscall" + "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" ) +// Platform-independent maximum integer sizes. +const ( + MaxUint = ^uint(0) + MaxInt = int(MaxUint >> 1) +) + // ReadCloser wraps a reader to also attach a separate closer. type ReadCloser struct { r io.Reader @@ -36,6 +49,39 @@ func (r *ReadCloser) Close() error { return r.c.Close() } +// MultiReadCloser is a logical concatenation of io.ReadCloser. +// It works like io.MultiReader except all objects are closed when Close() is called. +type MultiReadCloser struct { + mr io.Reader + closers []io.Closer +} + +// NewMultiReadCloser returns a new instance of MultiReadCloser. +func NewMultiReadCloser(a []io.ReadCloser) *MultiReadCloser { + readers := make([]io.Reader, len(a)) + closers := make([]io.Closer, len(a)) + for i, rc := range a { + readers[i] = rc + closers[i] = rc + } + return &MultiReadCloser{mr: io.MultiReader(readers...), closers: closers} +} + +// Read reads from the next available reader. +func (mrc *MultiReadCloser) Read(p []byte) (n int, err error) { + return mrc.mr.Read(p) +} + +// Close closes all underlying ReadClosers and returns first error encountered. +func (mrc *MultiReadCloser) Close() (err error) { + for _, c := range mrc.closers { + if e := c.Close(); e != nil && err == nil { + err = e + } + } + return err +} + // ReadCounter wraps an io.Reader and counts the total number of bytes read. type ReadCounter struct { r io.Reader @@ -58,27 +104,34 @@ func (r *ReadCounter) Read(p []byte) (int, error) { func (r *ReadCounter) N() int64 { return r.n } // CreateFile creates the file and matches the mode & uid/gid of fi. -func CreateFile(filename string, fi os.FileInfo) (*os.File, error) { - mode := os.FileMode(0600) - if fi != nil { - mode = fi.Mode() - } - +func CreateFile(filename string, mode os.FileMode, uid, gid int) (*os.File, error) { f, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode) if err != nil { return nil, err } - uid, gid := Fileinfo(fi) _ = f.Chown(uid, gid) return f, nil } +// WriteFile writes data to a named file and sets the mode & uid/gid. +func WriteFile(name string, data []byte, perm os.FileMode, uid, gid int) error { + f, err := os.OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + _ = f.Chown(uid, gid) + + _, err = f.Write(data) + if err1 := f.Close(); err1 != nil && err == nil { + err = err1 + } + return err +} + // MkdirAll is a copy of os.MkdirAll() except that it attempts to set the // mode/uid/gid to match fi for each created directory. -func MkdirAll(path string, fi os.FileInfo) error { - uid, gid := Fileinfo(fi) - +func MkdirAll(path string, mode os.FileMode, uid, gid int) error { // Fast path: if we can tell whether path is a directory or file, stop with success or error. dir, err := os.Stat(path) if err == nil { @@ -101,17 +154,13 @@ func MkdirAll(path string, fi os.FileInfo) error { if j > 1 { // Create parent. - err = MkdirAll(fixRootDirectory(path[:j-1]), fi) + err = MkdirAll(path[:j-1], mode, uid, gid) if err != nil { return err } } // Parent now exists; invoke Mkdir and use its result. - mode := os.FileMode(0700) - if fi != nil { - mode = fi.Mode() - } err = os.Mkdir(path, mode) if err != nil { // Handle arguments like "foo/." by @@ -127,6 +176,42 @@ func MkdirAll(path string, fi os.FileInfo) error { return nil } +// ParseSnapshotPath parses the index from a snapshot filename. Used by path-based replicas. +func ParseSnapshotPath(s string) (index int, err error) { + a := snapshotPathRegex.FindStringSubmatch(s) + if a == nil { + return 0, fmt.Errorf("invalid snapshot path") + } + + i64, _ := strconv.ParseUint(a[1], 16, 64) + if i64 > uint64(MaxInt) { + return 0, fmt.Errorf("index too large in snapshot path %q", s) + } + return int(i64), nil +} + +var snapshotPathRegex = regexp.MustCompile(`^([0-9a-f]{16})\.snapshot\.lz4$`) + +// ParseWALSegmentPath parses the index/offset from a segment filename. Used by path-based replicas. +func ParseWALSegmentPath(s string) (index int, offset int64, err error) { + a := walSegmentPathRegex.FindStringSubmatch(s) + if a == nil { + return 0, 0, fmt.Errorf("invalid wal segment path") + } + + i64, _ := strconv.ParseUint(a[1], 16, 64) + if i64 > uint64(MaxInt) { + return 0, 0, fmt.Errorf("index too large in wal segment path %q", s) + } + off64, _ := strconv.ParseUint(a[2], 16, 64) + if off64 > math.MaxInt64 { + return 0, 0, fmt.Errorf("offset too large in wal segment path %q", s) + } + return int(i64), int64(off64), nil +} + +var walSegmentPathRegex = regexp.MustCompile(`^([0-9a-f]{16})\/([0-9a-f]{16})\.wal\.lz4$`) + // Shared replica metrics. var ( OperationTotalCounterVec = promauto.NewCounterVec(prometheus.CounterOpts{ @@ -139,3 +224,50 @@ var ( Help: "The number of bytes used by replica operations", }, []string{"replica_type", "operation"}) ) + +// TruncateDuration truncates d to the nearest major unit (s, ms, µs, ns). +func TruncateDuration(d time.Duration) time.Duration { + if d < 0 { + if d < -10*time.Second { + return d.Truncate(time.Second) + } else if d < -time.Second { + return d.Truncate(time.Second / 10) + } else if d < -time.Millisecond { + return d.Truncate(time.Millisecond) + } else if d < -time.Microsecond { + return d.Truncate(time.Microsecond) + } + return d + } + + if d > 10*time.Second { + return d.Truncate(time.Second) + } else if d > time.Second { + return d.Truncate(time.Second / 10) + } else if d > time.Millisecond { + return d.Truncate(time.Millisecond) + } else if d > time.Microsecond { + return d.Truncate(time.Microsecond) + } + return d +} + +// MD5Hash returns a hex-encoded MD5 hash of b. +func MD5Hash(b []byte) string { + return fmt.Sprintf("%x", md5.Sum(b)) +} + +// OnceCloser returns a closer that will only ignore duplicate closes. +func OnceCloser(c io.Closer) io.Closer { + return &onceCloser{Closer: c} +} + +type onceCloser struct { + sync.Once + io.Closer +} + +func (c *onceCloser) Close() (err error) { + c.Once.Do(func() { err = c.Closer.Close() }) + return err +} diff --git a/internal/internal_test.go b/internal/internal_test.go new file mode 100644 index 00000000..308c6df4 --- /dev/null +++ b/internal/internal_test.go @@ -0,0 +1,145 @@ +package internal_test + +import ( + "fmt" + "reflect" + "testing" + "time" + + "github.com/benbjohnson/litestream/internal" + "github.com/benbjohnson/litestream/mock" +) + +func TestParseSnapshotPath(t *testing.T) { + for _, tt := range []struct { + s string + index int + err error + }{ + {"0000000000bc614e.snapshot.lz4", 12345678, nil}, + {"xxxxxxxxxxxxxxxx.snapshot.lz4", 0, fmt.Errorf("invalid snapshot path")}, + {"0000000000bc614.snapshot.lz4", 0, fmt.Errorf("invalid snapshot path")}, + {"0000000000bc614e.snapshot.lz", 0, fmt.Errorf("invalid snapshot path")}, + {"0000000000bc614e.snapshot", 0, fmt.Errorf("invalid snapshot path")}, + {"0000000000bc614e", 0, fmt.Errorf("invalid snapshot path")}, + {"", 0, fmt.Errorf("invalid snapshot path")}, + } { + t.Run("", func(t *testing.T) { + index, err := internal.ParseSnapshotPath(tt.s) + if got, want := index, tt.index; got != want { + t.Errorf("index=%#v, want %#v", got, want) + } else if got, want := err, tt.err; !reflect.DeepEqual(got, want) { + t.Errorf("err=%#v, want %#v", got, want) + } + }) + } +} + +func TestParseWALSegmentPath(t *testing.T) { + for _, tt := range []struct { + s string + index int + offset int64 + err error + }{ + {"0000000000bc614e/00000000000003e8.wal.lz4", 12345678, 1000, nil}, + {"0000000000000000/0000000000000000.wal", 0, 0, fmt.Errorf("invalid wal segment path")}, + {"0000000000000000/0000000000000000", 0, 0, fmt.Errorf("invalid wal segment path")}, + {"0000000000000000/", 0, 0, fmt.Errorf("invalid wal segment path")}, + {"0000000000000000", 0, 0, fmt.Errorf("invalid wal segment path")}, + {"", 0, 0, fmt.Errorf("invalid wal segment path")}, + } { + t.Run("", func(t *testing.T) { + index, offset, err := internal.ParseWALSegmentPath(tt.s) + if got, want := index, tt.index; got != want { + t.Errorf("index=%#v, want %#v", got, want) + } + if got, want := offset, tt.offset; got != want { + t.Errorf("offset=%#v, want %#v", got, want) + } + if got, want := err, tt.err; !reflect.DeepEqual(got, want) { + t.Errorf("err=%#v, want %#v", got, want) + } + }) + } +} + +func TestTruncateDuration(t *testing.T) { + for _, tt := range []struct { + input, output time.Duration + }{ + {0, 0 * time.Nanosecond}, + + {1, 1 * time.Nanosecond}, + {12, 12 * time.Nanosecond}, + {123, 123 * time.Nanosecond}, + {1234, 1 * time.Microsecond}, + {12345, 12 * time.Microsecond}, + {123456, 123 * time.Microsecond}, + {1234567, 1 * time.Millisecond}, + {12345678, 12 * time.Millisecond}, + {123456789, 123 * time.Millisecond}, + {1234567890, 1200 * time.Millisecond}, + {12345678900, 12 * time.Second}, + + {-1, -1 * time.Nanosecond}, + {-12, -12 * time.Nanosecond}, + {-123, -123 * time.Nanosecond}, + {-1234, -1 * time.Microsecond}, + {-12345, -12 * time.Microsecond}, + {-123456, -123 * time.Microsecond}, + {-1234567, -1 * time.Millisecond}, + {-12345678, -12 * time.Millisecond}, + {-123456789, -123 * time.Millisecond}, + {-1234567890, -1200 * time.Millisecond}, + {-12345678900, -12 * time.Second}, + } { + t.Run(fmt.Sprint(int(tt.input)), func(t *testing.T) { + if got, want := internal.TruncateDuration(tt.input), tt.output; got != want { + t.Fatalf("duration=%s, want %s", got, want) + } + }) + } +} + +func TestMD5Hash(t *testing.T) { + for _, tt := range []struct { + input []byte + output string + }{ + {[]byte{}, "d41d8cd98f00b204e9800998ecf8427e"}, + {[]byte{0x0}, "93b885adfe0da089cdf634904fd59f71"}, + {[]byte{0x0, 0x1, 0x2, 0x3}, "37b59afd592725f9305e484a5d7f5168"}, + {[]byte("Hello, world!"), "6cd3556deb0da54bca060b4c39479839"}, + } { + t.Run(fmt.Sprintf("%v", tt.input), func(t *testing.T) { + if got, want := internal.MD5Hash(tt.input), tt.output; got != want { + t.Fatalf("hash=%s, want %s", got, want) + } + }) + } +} + +func TestOnceCloser(t *testing.T) { + var closed bool + var rc = &mock.ReadCloser{ + CloseFunc: func() error { + if closed { + t.Fatal("already closed") + } + closed = true + return nil + }, + } + + oc := internal.OnceCloser(rc) + if err := oc.Close(); err != nil { + t.Fatalf("first close: %s", err) + } else if err := oc.Close(); err != nil { + t.Fatalf("second close: %s", err) + } + + if !closed { + t.Fatal("expected close") + } +} diff --git a/internal/internal_unix.go b/internal/internal_unix.go index cedc947e..4de6d15d 100644 --- a/internal/internal_unix.go +++ b/internal/internal_unix.go @@ -1,4 +1,5 @@ -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +//go:build !windows +// +build !windows package internal @@ -15,7 +16,3 @@ func Fileinfo(fi os.FileInfo) (uid, gid int) { stat := fi.Sys().(*syscall.Stat_t) return int(stat.Uid), int(stat.Gid) } - -func fixRootDirectory(p string) string { - return p -} diff --git a/internal/internal_windows.go b/internal/internal_windows.go index 18531642..c9810326 100644 --- a/internal/internal_windows.go +++ b/internal/internal_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package internal @@ -10,13 +11,3 @@ import ( func Fileinfo(fi os.FileInfo) (uid, gid int) { return -1, -1 } - -// fixRootDirectory is copied from the standard library for use with mkdirAll() -func fixRootDirectory(p string) string { - if len(p) == len(`\\?\c:`) { - if os.IsPathSeparator(p[0]) && os.IsPathSeparator(p[1]) && p[2] == '?' && os.IsPathSeparator(p[3]) && p[5] == ':' { - return p + `\` - } - } - return p -} diff --git a/internal/locking_buffer.go b/internal/locking_buffer.go new file mode 100644 index 00000000..5a95df92 --- /dev/null +++ b/internal/locking_buffer.go @@ -0,0 +1,145 @@ +package internal + +import ( + "bytes" + "io" + "sync" +) + +// LockingBuffer wraps a bytes.Buffer with a mutex. +type LockingBuffer struct { + mu sync.Mutex + b bytes.Buffer +} + +func (b *LockingBuffer) Bytes() []byte { + b.mu.Lock() + defer b.mu.Unlock() + buf := b.b.Bytes() + other := make([]byte, len(buf)) + copy(other, buf) + return other +} + +func (b *LockingBuffer) Cap() int { + b.mu.Lock() + defer b.mu.Unlock() + return b.b.Cap() +} + +func (b *LockingBuffer) Grow(n int) { + b.mu.Lock() + defer b.mu.Unlock() + b.b.Grow(n) +} + +func (b *LockingBuffer) Len() int { + b.mu.Lock() + defer b.mu.Unlock() + return b.b.Len() +} + +func (b *LockingBuffer) Next(n int) []byte { + b.mu.Lock() + defer b.mu.Unlock() + buf := b.b.Next(n) + other := make([]byte, len(buf)) + copy(other, buf) + return other +} + +func (b *LockingBuffer) Read(p []byte) (n int, err error) { + b.mu.Lock() + defer b.mu.Unlock() + return b.b.Read(p) +} + +func (b *LockingBuffer) ReadByte() (byte, error) { + b.mu.Lock() + defer b.mu.Unlock() + return b.b.ReadByte() +} + +func (b *LockingBuffer) ReadBytes(delim byte) (line []byte, err error) { + b.mu.Lock() + defer b.mu.Unlock() + return b.b.ReadBytes(delim) +} + +func (b *LockingBuffer) ReadFrom(r io.Reader) (n int64, err error) { + b.mu.Lock() + defer b.mu.Unlock() + return b.b.ReadFrom(r) +} + +func (b *LockingBuffer) ReadRune() (r rune, size int, err error) { + b.mu.Lock() + defer b.mu.Unlock() + return b.b.ReadRune() +} + +func (b *LockingBuffer) ReadString(delim byte) (line string, err error) { + b.mu.Lock() + defer b.mu.Unlock() + return b.b.ReadString(delim) +} + +func (b *LockingBuffer) Reset() { + b.mu.Lock() + defer b.mu.Unlock() + b.b.Reset() +} + +func (b *LockingBuffer) String() string { + b.mu.Lock() + defer b.mu.Unlock() + return b.b.String() +} + +func (b *LockingBuffer) Truncate(n int) { + b.mu.Lock() + defer b.mu.Unlock() + b.b.Truncate(n) +} + +func (b *LockingBuffer) UnreadByte() error { + b.mu.Lock() + defer b.mu.Unlock() + return b.b.UnreadByte() +} + +func (b *LockingBuffer) UnreadRune() error { + b.mu.Lock() + defer b.mu.Unlock() + return b.b.UnreadRune() +} + +func (b *LockingBuffer) Write(p []byte) (n int, err error) { + b.mu.Lock() + defer b.mu.Unlock() + return b.b.Write(p) +} + +func (b *LockingBuffer) WriteByte(c byte) error { + b.mu.Lock() + defer b.mu.Unlock() + return b.b.WriteByte(c) +} + +func (b *LockingBuffer) WriteRune(r rune) (n int, err error) { + b.mu.Lock() + defer b.mu.Unlock() + return b.b.WriteRune(r) +} + +func (b *LockingBuffer) WriteString(s string) (n int, err error) { + b.mu.Lock() + defer b.mu.Unlock() + return b.b.WriteString(s) +} + +func (b *LockingBuffer) WriteTo(w io.Writer) (n int64, err error) { + b.mu.Lock() + defer b.mu.Unlock() + return b.b.WriteTo(w) +} diff --git a/internal/testingutil/testingutil.go b/internal/testingutil/testingutil.go new file mode 100644 index 00000000..bcf60dc3 --- /dev/null +++ b/internal/testingutil/testingutil.go @@ -0,0 +1,90 @@ +package testingutil + +import ( + "bytes" + "io" + "os" + "testing" + + "github.com/pierrec/lz4/v4" +) + +// ReadFile reads all data from filename. Fail on error. +func ReadFile(tb testing.TB, filename string) []byte { + tb.Helper() + b, err := os.ReadFile(filename) + if err != nil { + tb.Fatal(err) + } + return b +} + +// CopyFile copies all data from src to dst. Fail on error. +func CopyFile(tb testing.TB, src, dst string) { + tb.Helper() + r, err := os.Open(src) + if err != nil { + tb.Fatal(err) + } + defer r.Close() + + w, err := os.Create(dst) + if err != nil { + tb.Fatal(err) + } + defer w.Close() + + if _, err := io.Copy(w, r); err != nil { + tb.Fatal(err) + } +} + +// Getpwd returns the working directory. Fail on error. +func Getwd(tb testing.TB) string { + tb.Helper() + + dir, err := os.Getwd() + if err != nil { + tb.Fatal(err) + } + return dir +} + +// Setenv sets the environment variable key to value. The returned function reverts it. +func Setenv(tb testing.TB, key, value string) func() { + tb.Helper() + + prevValue := os.Getenv(key) + if err := os.Setenv(key, value); err != nil { + tb.Fatal(err) + } + + return func() { + if err := os.Setenv(key, prevValue); err != nil { + tb.Fatal(tb) + } + } +} + +func CompressLZ4(tb testing.TB, b []byte) []byte { + tb.Helper() + + var buf bytes.Buffer + zw := lz4.NewWriter(&buf) + if _, err := zw.Write(b); err != nil { + tb.Fatal(err) + } else if err := zw.Close(); err != nil { + tb.Fatal(err) + } + return buf.Bytes() +} + +func DecompressLZ4(tb testing.TB, b []byte) []byte { + tb.Helper() + + buf, err := io.ReadAll(lz4.NewReader(bytes.NewReader(b))) + if err != nil { + tb.Fatal(err) + } + return buf +} diff --git a/litestream.go b/litestream.go index f31985b6..6cf0b9b2 100644 --- a/litestream.go +++ b/litestream.go @@ -6,13 +6,16 @@ import ( "errors" "fmt" "io" + "math" "os" - "path" "path/filepath" "regexp" "strconv" "strings" "time" + + "github.com/benbjohnson/litestream/internal" + "github.com/mattn/go-sqlite3" ) // Naming constants. @@ -37,18 +40,40 @@ const ( // Litestream errors. var ( - ErrNoGeneration = errors.New("no generation available") - ErrNoSnapshots = errors.New("no snapshots available") - ErrChecksumMismatch = errors.New("invalid replica, checksum mismatch") + ErrDBClosed = errors.New("database closed") + ErrNoGeneration = errors.New("no generation available") + ErrGenerationChanged = errors.New("generation changed") + ErrNoSnapshots = errors.New("no snapshots available") + ErrNoWALSegments = errors.New("no wal segments available") + ErrChecksumMismatch = errors.New("invalid replica, checksum mismatch") +) + +var ( + // LogWriter is the destination writer for all logging. + LogWriter = os.Stdout + + // LogFlags are the flags passed to log.New(). + LogFlags = 0 ) +func init() { + sql.Register("litestream-sqlite3", &sqlite3.SQLiteDriver{ + ConnectHook: func(conn *sqlite3.SQLiteConn) error { + if err := conn.SetFileControlInt("main", sqlite3.SQLITE_FCNTL_PERSIST_WAL, 1); err != nil { + return fmt.Errorf("cannot set file control: %w", err) + } + return nil + }, + }) +} + // SnapshotIterator represents an iterator over a collection of snapshot metadata. type SnapshotIterator interface { io.Closer - // Prepares the the next snapshot for reading with the Snapshot() method. + // Prepares the next snapshot for reading with the Snapshot() method. // Returns true if another snapshot is available. Returns false if no more - // snapshots are available or if an error occured. + // snapshots are available or if an error occurred. Next() bool // Returns an error that occurred during iteration. @@ -108,9 +133,9 @@ func (itr *SnapshotInfoSliceIterator) Snapshot() SnapshotInfo { type WALSegmentIterator interface { io.Closer - // Prepares the the next WAL for reading with the WAL() method. + // Prepares the next WAL for reading with the WAL() method. // Returns true if another WAL is available. Returns false if no more - // WAL files are available or if an error occured. + // WAL files are available or if an error occurred. Next() bool // Returns an error that occurred during iteration. @@ -166,6 +191,49 @@ func (itr *WALSegmentInfoSliceIterator) WALSegment() WALSegmentInfo { return itr.a[0] } +type BufferedWALSegmentIterator struct { + itr WALSegmentIterator + buffered bool +} + +// NewBufferedWALSegmentIterator returns a new instance of BufferedWALSegmentIterator. +func NewBufferedWALSegmentIterator(itr WALSegmentIterator) *BufferedWALSegmentIterator { + return &BufferedWALSegmentIterator{itr: itr} +} + +// Close closes the underlying iterator. +func (itr *BufferedWALSegmentIterator) Close() error { + return itr.itr.Close() +} + +// Peek returns the next segment without moving the iterator forward. +func (itr *BufferedWALSegmentIterator) Peek() (info WALSegmentInfo, ok bool) { + if !itr.Next() { + return WALSegmentInfo{}, false + } + itr.buffered = true + return itr.itr.WALSegment(), true +} + +// Next returns the next segment. If buffer is full, this call is a no-op. +func (itr *BufferedWALSegmentIterator) Next() bool { + if itr.buffered { + itr.buffered = false + return true + } + return itr.itr.Next() +} + +// Returns an error that occurred during iteration. +func (itr *BufferedWALSegmentIterator) Err() error { + return itr.itr.Err() +} + +// Returns metadata for the currently positioned WAL segment file. +func (itr *BufferedWALSegmentIterator) WALSegment() WALSegmentInfo { + return itr.itr.WALSegment() +} + // SnapshotInfo represents file information about a snapshot. type SnapshotInfo struct { Generation string @@ -207,11 +275,13 @@ func FilterSnapshotsAfter(a []SnapshotInfo, t time.Time) []SnapshotInfo { // FindMinSnapshotByGeneration finds the snapshot with the lowest index in a generation. func FindMinSnapshotByGeneration(a []SnapshotInfo, generation string) *SnapshotInfo { var min *SnapshotInfo - for _, snapshot := range a { + for i := range a { + snapshot := &a[i] + if snapshot.Generation != generation { continue } else if min == nil || snapshot.Index < min.Index { - min = &snapshot + min = snapshot } } return min @@ -275,12 +345,38 @@ type Pos struct { Offset int64 // offset within wal file } +// ParsePos parses a position generated by Pos.String(). +func ParsePos(s string) (Pos, error) { + a := posRegex.FindStringSubmatch(s) + if a == nil { + return Pos{}, fmt.Errorf("invalid pos: %q", s) + } + + index, err := ParseIndex(a[2]) + if err != nil { + return Pos{}, err + } + + offset, err := ParseOffset(a[3]) + if err != nil { + return Pos{}, err + } + + return Pos{ + Generation: a[1], + Index: index, + Offset: offset, + }, nil +} + +var posRegex = regexp.MustCompile(`^(\w+)/(\w+):(\w+)$`) + // String returns a string representation. func (p Pos) String() string { if p.IsZero() { return "" } - return fmt.Sprintf("%s/%08x:%d", p.Generation, p.Index, p.Offset) + return fmt.Sprintf("%s/%s:%s", p.Generation, FormatIndex(p.Index), FormatOffset(p.Offset)) } // IsZero returns true if p is the zero value. @@ -293,6 +389,26 @@ func (p Pos) Truncate() Pos { return Pos{Generation: p.Generation, Index: p.Index} } +// ComparePos returns -1 if a is less than b, 1 if a is greater than b, and +// returns 0 if a and b are equal. Only index & offset are compared. +// Returns an error if generations are not equal. +func ComparePos(a, b Pos) (int, error) { + if a.Generation != b.Generation { + return 0, fmt.Errorf("generation mismatch") + } + + if a.Index < b.Index { + return -1, nil + } else if a.Index > b.Index { + return 1, nil + } else if a.Offset < b.Offset { + return -1, nil + } else if a.Offset > b.Offset { + return 1, nil + } + return 0, nil +} + // Checksum computes a running SQLite checksum over a byte slice. func Checksum(bo binary.ByteOrder, s0, s1 uint32, b []byte) (uint32, uint32) { assert(len(b)%8 == 0, "misaligned checksum byte slice") @@ -311,6 +427,9 @@ const ( // WALFrameHeaderSize is the size of the WAL frame header, in bytes. WALFrameHeaderSize = 24 + + // WALIndexHeaderSize is the size of the SHM index header, in bytes. + WALIndexHeaderSize = 136 ) // calcWALSize returns the size of the WAL, in bytes, for a given number of pages. @@ -384,134 +503,52 @@ func IsGenerationName(s string) bool { return true } -// GenerationsPath returns the path to a generation root directory. -func GenerationsPath(root string) string { - return path.Join(root, "generations") -} - -// GenerationPath returns the path to a generation's root directory. -func GenerationPath(root, generation string) (string, error) { - dir := GenerationsPath(root) - if generation == "" { - return "", fmt.Errorf("generation required") - } - return path.Join(dir, generation), nil +// FormatIndex formats an index as a hex value. +func FormatIndex(index int) string { + return fmt.Sprintf("%016x", index) } -// SnapshotsPath returns the path to a generation's snapshot directory. -func SnapshotsPath(root, generation string) (string, error) { - dir, err := GenerationPath(root, generation) +// ParseIndex parses a hex-formatted index into an integer. +func ParseIndex(s string) (int, error) { + v, err := strconv.ParseUint(s, 16, 64) if err != nil { - return "", err + return -1, fmt.Errorf("cannot parse index: %q", s) + } else if v > uint64(internal.MaxInt) { + return -1, fmt.Errorf("index too large: %q", s) } - return path.Join(dir, "snapshots"), nil + return int(v), nil } -// SnapshotPath returns the path to an uncompressed snapshot file. -func SnapshotPath(root, generation string, index int) (string, error) { - dir, err := SnapshotsPath(root, generation) - if err != nil { - return "", err - } - return path.Join(dir, FormatSnapshotPath(index)), nil -} - -// WALPath returns the path to a generation's WAL directory -func WALPath(root, generation string) (string, error) { - dir, err := GenerationPath(root, generation) - if err != nil { - return "", err - } - return path.Join(dir, "wal"), nil +// FormatOffset formats an offset as a hex value. +func FormatOffset(offset int64) string { + return fmt.Sprintf("%016x", offset) } -// WALSegmentPath returns the path to a WAL segment file. -func WALSegmentPath(root, generation string, index int, offset int64) (string, error) { - dir, err := WALPath(root, generation) +// ParseOffset parses a hex-formatted offset into an integer. +func ParseOffset(s string) (int64, error) { + v, err := strconv.ParseUint(s, 16, 64) if err != nil { - return "", err - } - return path.Join(dir, FormatWALSegmentPath(index, offset)), nil -} - -// IsSnapshotPath returns true if s is a path to a snapshot file. -func IsSnapshotPath(s string) bool { - return snapshotPathRegex.MatchString(s) -} - -// ParseSnapshotPath returns the index for the snapshot. -// Returns an error if the path is not a valid snapshot path. -func ParseSnapshotPath(s string) (index int, err error) { - s = filepath.Base(s) - - a := snapshotPathRegex.FindStringSubmatch(s) - if a == nil { - return 0, fmt.Errorf("invalid snapshot path: %s", s) - } - - i64, _ := strconv.ParseUint(a[1], 16, 64) - return int(i64), nil -} - -// FormatSnapshotPath formats a snapshot filename with a given index. -func FormatSnapshotPath(index int) string { - assert(index >= 0, "snapshot index must be non-negative") - return fmt.Sprintf("%08x%s", index, SnapshotExt) -} - -var snapshotPathRegex = regexp.MustCompile(`^([0-9a-f]{8})\.snapshot\.lz4$`) - -// IsWALPath returns true if s is a path to a WAL file. -func IsWALPath(s string) bool { - return walPathRegex.MatchString(s) -} - -// ParseWALPath returns the index for the WAL file. -// Returns an error if the path is not a valid WAL path. -func ParseWALPath(s string) (index int, err error) { - s = filepath.Base(s) - - a := walPathRegex.FindStringSubmatch(s) - if a == nil { - return 0, fmt.Errorf("invalid wal path: %s", s) + return -1, fmt.Errorf("cannot parse offset: %q", s) + } else if v > math.MaxInt64 { + return -1, fmt.Errorf("offset too large: %q", s) } - - i64, _ := strconv.ParseUint(a[1], 16, 64) - return int(i64), nil -} - -// FormatWALPath formats a WAL filename with a given index. -func FormatWALPath(index int) string { - assert(index >= 0, "wal index must be non-negative") - return fmt.Sprintf("%08x%s", index, WALExt) -} - -var walPathRegex = regexp.MustCompile(`^([0-9a-f]{8})\.wal$`) - -// ParseWALSegmentPath returns the index & offset for the WAL segment file. -// Returns an error if the path is not a valid wal segment path. -func ParseWALSegmentPath(s string) (index int, offset int64, err error) { - s = filepath.Base(s) - - a := walSegmentPathRegex.FindStringSubmatch(s) - if a == nil { - return 0, 0, fmt.Errorf("invalid wal segment path: %s", s) + return int64(v), nil +} + +// removeDBFiles deletes the database and related files (journal, shm, wal). +func removeDBFiles(filename string) error { + if err := os.Remove(filename); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("cannot delete database %q: %w", filename, err) + } else if err := os.Remove(filename + "-journal"); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("cannot delete journal for %q: %w", filename, err) + } else if err := os.Remove(filename + "-shm"); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("cannot delete shared memory for %q: %w", filename, err) + } else if err := os.Remove(filename + "-wal"); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("cannot delete wal for %q: %w", filename, err) } - - i64, _ := strconv.ParseUint(a[1], 16, 64) - off64, _ := strconv.ParseUint(a[2], 16, 64) - return int(i64), int64(off64), nil -} - -// FormatWALSegmentPath formats a WAL segment filename with a given index & offset. -func FormatWALSegmentPath(index int, offset int64) string { - assert(index >= 0, "wal index must be non-negative") - assert(offset >= 0, "wal offset must be non-negative") - return fmt.Sprintf("%08x_%08x%s", index, offset, WALSegmentExt) + return nil } -var walSegmentPathRegex = regexp.MustCompile(`^([0-9a-f]{8})(?:_([0-9a-f]{8}))\.wal\.lz4$`) - // isHexChar returns true if ch is a lowercase hex character. func isHexChar(ch rune) bool { return (ch >= '0' && ch <= '9') || (ch >= 'a' && ch <= 'f') diff --git a/litestream_test.go b/litestream_test.go index 0f1bb859..860b6588 100644 --- a/litestream_test.go +++ b/litestream_test.go @@ -1,8 +1,10 @@ package litestream_test import ( + "bytes" "encoding/binary" "encoding/hex" + "os" "testing" "github.com/benbjohnson/litestream" @@ -26,112 +28,125 @@ func TestChecksum(t *testing.T) { // Ensure we get the same result as OnePass even if we split up into multiple calls. t.Run("Incremental", func(t *testing.T) { // Compute checksum for beginning of WAL header. - s0, s1 := litestream.Checksum(binary.LittleEndian, 0, 0, MustDecodeHexString("377f0682002de218000010000000000052382eac857b1a4e")) + s0, s1 := litestream.Checksum(binary.LittleEndian, 0, 0, decodeHexString(t, "377f0682002de218000010000000000052382eac857b1a4e")) if got, want := [2]uint32{s0, s1}, [2]uint32{0x81153b65, 0x87178e8f}; got != want { t.Fatalf("Checksum()=%x, want %x", got, want) } // Continue checksum with WAL frame header & frame contents. - s0a, s1a := litestream.Checksum(binary.LittleEndian, s0, s1, MustDecodeHexString("0000000200000002")) - s0b, s1b := litestream.Checksum(binary.LittleEndian, s0a, s1a, MustDecodeHexString(`0d000000080fe0000ffc0ff80ff40ff00fec0fe80fe40fe000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000208020902070209020602090205020902040209020302090202020902010209`)) + s0a, s1a := litestream.Checksum(binary.LittleEndian, s0, s1, decodeHexString(t, "0000000200000002")) + s0b, s1b := litestream.Checksum(binary.LittleEndian, s0a, s1a, decodeHexString(t, `0d000000080fe0000ffc0ff80ff40ff00fec0fe80fe40fe000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000208020902070209020602090205020902040209020302090202020902010209`)) if got, want := [2]uint32{s0b, s1b}, [2]uint32{0xdc2f3e84, 0x540488d3}; got != want { t.Fatalf("Checksum()=%x, want %x", got, want) } }) } -func TestGenerationsPath(t *testing.T) { - t.Run("OK", func(t *testing.T) { - if got, want := litestream.GenerationsPath("foo"), "foo/generations"; got != want { - t.Fatalf("GenerationsPath()=%v, want %v", got, want) - } - }) - t.Run("NoPath", func(t *testing.T) { - if got, want := litestream.GenerationsPath(""), "generations"; got != want { - t.Fatalf("GenerationsPath()=%v, want %v", got, want) - } - }) +func TestFindMinSnapshotByGeneration(t *testing.T) { + infos := []litestream.SnapshotInfo{ + {Generation: "29cf4bced74e92ab", Index: 0}, + {Generation: "5dfeb4aa03232553", Index: 24}, + } + if got, want := litestream.FindMinSnapshotByGeneration(infos, "29cf4bced74e92ab"), &infos[0]; got != want { + t.Fatalf("info=%#v, want %#v", got, want) + } } -func TestGenerationPath(t *testing.T) { +func TestBufferedWALSegmentIterator(t *testing.T) { t.Run("OK", func(t *testing.T) { - if got, err := litestream.GenerationPath("foo", "0123456701234567"); err != nil { - t.Fatal(err) - } else if want := "foo/generations/0123456701234567"; got != want { - t.Fatalf("GenerationPath()=%v, want %v", got, want) - } - }) - t.Run("ErrNoGeneration", func(t *testing.T) { - if _, err := litestream.GenerationPath("foo", ""); err == nil || err.Error() != `generation required` { - t.Fatalf("expected error: %v", err) + a := []litestream.WALSegmentInfo{{Index: 1}, {Index: 2}} + itr := litestream.NewBufferedWALSegmentIterator(litestream.NewWALSegmentInfoSliceIterator(a)) + + if info, ok := itr.Peek(); !ok { + t.Fatal("expected info") + } else if got, want := info.Index, 1; got != want { + t.Fatalf("index=%d, want %d", got, want) } - }) -} -func TestSnapshotsPath(t *testing.T) { - t.Run("OK", func(t *testing.T) { - if got, err := litestream.SnapshotsPath("foo", "0123456701234567"); err != nil { - t.Fatal(err) - } else if want := "foo/generations/0123456701234567/snapshots"; got != want { - t.Fatalf("SnapshotsPath()=%v, want %v", got, want) + if !itr.Next() { + t.Fatal("expected next") + } else if got, want := itr.WALSegment().Index, 1; got != want { + t.Fatalf("index=%d, want %d", got, want) } - }) - t.Run("ErrNoGeneration", func(t *testing.T) { - if _, err := litestream.SnapshotsPath("foo", ""); err == nil || err.Error() != `generation required` { - t.Fatalf("unexpected error: %v", err) + + if !itr.Next() { + t.Fatal("expected next") + } else if got, want := itr.WALSegment().Index, 2; got != want { + t.Fatalf("index=%d, want %d", got, want) } - }) -} -func TestSnapshotPath(t *testing.T) { - t.Run("OK", func(t *testing.T) { - if got, err := litestream.SnapshotPath("foo", "0123456701234567", 1000); err != nil { - t.Fatal(err) - } else if want := "foo/generations/0123456701234567/snapshots/000003e8.snapshot.lz4"; got != want { - t.Fatalf("SnapshotPath()=%v, want %v", got, want) + if itr.Next() { + t.Fatal("expected eof") } }) - t.Run("ErrNoGeneration", func(t *testing.T) { - if _, err := litestream.SnapshotPath("foo", "", 1000); err == nil || err.Error() != `generation required` { - t.Fatalf("unexpected error: %v", err) + + t.Run("Empty", func(t *testing.T) { + itr := litestream.NewBufferedWALSegmentIterator(litestream.NewWALSegmentInfoSliceIterator(nil)) + + if info, ok := itr.Peek(); ok { + t.Fatal("expected eof") + } else if got, want := info.Index, 0; got != want { + t.Fatalf("index=%d, want %d", got, want) } }) } -func TestWALPath(t *testing.T) { +func TestParsePos(t *testing.T) { t.Run("OK", func(t *testing.T) { - if got, err := litestream.WALPath("foo", "0123456701234567"); err != nil { + if pos, err := litestream.ParsePos("29cf4bced74e92ab/00000000000003e8:00000000000007d0"); err != nil { t.Fatal(err) - } else if want := "foo/generations/0123456701234567/wal"; got != want { - t.Fatalf("WALPath()=%v, want %v", got, want) + } else if got, want := pos.Generation, "29cf4bced74e92ab"; got != want { + t.Fatalf("generation=%s, want %s", got, want) + } else if got, want := pos.Index, 1000; got != want { + t.Fatalf("index=%v, want %v", got, want) + } else if got, want := pos.Offset, 2000; got != int64(want) { + t.Fatalf("offset=%v, want %v", got, want) } }) - t.Run("ErrNoGeneration", func(t *testing.T) { - if _, err := litestream.WALPath("foo", ""); err == nil || err.Error() != `generation required` { - t.Fatalf("unexpected error: %v", err) + + t.Run("ErrMismatch", func(t *testing.T) { + _, err := litestream.ParsePos("29cf4bced74e92ab-00000000000003e8-00000000000007d0") + if err == nil || err.Error() != `invalid pos: "29cf4bced74e92ab-00000000000003e8-00000000000007d0"` { + t.Fatal(err) } }) -} - -func TestWALSegmentPath(t *testing.T) { - t.Run("OK", func(t *testing.T) { - if got, err := litestream.WALSegmentPath("foo", "0123456701234567", 1000, 1001); err != nil { + t.Run("ErrInvalidIndex", func(t *testing.T) { + _, err := litestream.ParsePos("29cf4bced74e92ab/0000000000000xxx:00000000000007d0") + if err == nil || err.Error() != `cannot parse index: "0000000000000xxx"` { t.Fatal(err) - } else if want := "foo/generations/0123456701234567/wal/000003e8_000003e9.wal.lz4"; got != want { - t.Fatalf("WALPath()=%v, want %v", got, want) } }) - t.Run("ErrNoGeneration", func(t *testing.T) { - if _, err := litestream.WALSegmentPath("foo", "", 1000, 0); err == nil || err.Error() != `generation required` { - t.Fatalf("unexpected error: %v", err) + t.Run("ErrInvalidIndex", func(t *testing.T) { + _, err := litestream.ParsePos("29cf4bced74e92ab/00000000000003e8:0000000000000xxx") + if err == nil || err.Error() != `cannot parse offset: "0000000000000xxx"` { + t.Fatal(err) } }) } -func MustDecodeHexString(s string) []byte { +func decodeHexString(tb testing.TB, s string) []byte { + tb.Helper() + b, err := hex.DecodeString(s) if err != nil { - panic(err) + tb.Fatal(err) } return b } + +// fileEqual returns true if files at x and y have equal contents. +func fileEqual(tb testing.TB, x, y string) bool { + tb.Helper() + + bx, err := os.ReadFile(x) + if err != nil { + tb.Fatal(err) + } + + by, err := os.ReadFile(y) + if err != nil { + tb.Fatal(err) + } + + return bytes.Equal(bx, by) +} diff --git a/mock/read_closer.go b/mock/read_closer.go new file mode 100644 index 00000000..a473e96e --- /dev/null +++ b/mock/read_closer.go @@ -0,0 +1,14 @@ +package mock + +type ReadCloser struct { + CloseFunc func() error + ReadFunc func([]byte) (int, error) +} + +func (r *ReadCloser) Close() error { + return r.CloseFunc() +} + +func (r *ReadCloser) Read(b []byte) (int, error) { + return r.ReadFunc(b) +} diff --git a/mock/snapshot_iterator.go b/mock/snapshot_iterator.go new file mode 100644 index 00000000..8f167d68 --- /dev/null +++ b/mock/snapshot_iterator.go @@ -0,0 +1,28 @@ +package mock + +import ( + "github.com/benbjohnson/litestream" +) + +type SnapshotIterator struct { + CloseFunc func() error + NextFunc func() bool + ErrFunc func() error + SnapshotFunc func() litestream.SnapshotInfo +} + +func (itr *SnapshotIterator) Close() error { + return itr.CloseFunc() +} + +func (itr *SnapshotIterator) Next() bool { + return itr.NextFunc() +} + +func (itr *SnapshotIterator) Err() error { + return itr.ErrFunc() +} + +func (itr *SnapshotIterator) Snapshot() litestream.SnapshotInfo { + return itr.SnapshotFunc() +} diff --git a/mock/wal_segment_iterator.go b/mock/wal_segment_iterator.go new file mode 100644 index 00000000..f1d62cd3 --- /dev/null +++ b/mock/wal_segment_iterator.go @@ -0,0 +1,28 @@ +package mock + +import ( + "github.com/benbjohnson/litestream" +) + +type WALSegmentIterator struct { + CloseFunc func() error + NextFunc func() bool + ErrFunc func() error + WALSegmentFunc func() litestream.WALSegmentInfo +} + +func (itr *WALSegmentIterator) Close() error { + return itr.CloseFunc() +} + +func (itr *WALSegmentIterator) Next() bool { + return itr.NextFunc() +} + +func (itr *WALSegmentIterator) Err() error { + return itr.ErrFunc() +} + +func (itr *WALSegmentIterator) WALSegment() litestream.WALSegmentInfo { + return itr.WALSegmentFunc() +} diff --git a/replica.go b/replica.go index 0cfc21dc..245565fe 100644 --- a/replica.go +++ b/replica.go @@ -2,20 +2,15 @@ package litestream import ( "context" - "encoding/binary" "fmt" - "hash/crc64" "io" "io/ioutil" "log" - "math" "os" - "path/filepath" "sort" "sync" "time" - "github.com/benbjohnson/litestream/internal" "github.com/pierrec/lz4/v4" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -38,6 +33,7 @@ type Replica struct { mu sync.RWMutex pos Pos // current replicated position + itr *FileWALSegmentIterator muf sync.Mutex f *os.File // long-running file descriptor to avoid non-OFD lock issues @@ -46,7 +42,7 @@ type Replica struct { cancel func() // Client used to connect to the remote replica. - Client ReplicaClient + client ReplicaClient // Time between syncs with the shadow WAL. SyncInterval time.Duration @@ -67,12 +63,15 @@ type Replica struct { // If true, replica monitors database for changes automatically. // Set to false if replica is being used synchronously (such as in tests). MonitorEnabled bool + + Logger *log.Logger } -func NewReplica(db *DB, name string) *Replica { +func NewReplica(db *DB, name string, client ReplicaClient) *Replica { r := &Replica{ db: db, name: name, + client: client, cancel: func() {}, SyncInterval: DefaultSyncInterval, @@ -81,13 +80,19 @@ func NewReplica(db *DB, name string) *Replica { MonitorEnabled: true, } + prefix := fmt.Sprintf("%s: ", r.Name()) + if db != nil { + prefix = fmt.Sprintf("%s(%s): ", logPrefixPath(db.Path()), r.Name()) + } + r.Logger = log.New(LogWriter, prefix, LogFlags) + return r } // Name returns the name of the replica. func (r *Replica) Name() string { - if r.name == "" && r.Client != nil { - return r.Client.Type() + if r.name == "" && r.client != nil { + return r.client.Type() } return r.name } @@ -95,41 +100,46 @@ func (r *Replica) Name() string { // DB returns a reference to the database the replica is attached to, if any. func (r *Replica) DB() *DB { return r.db } +// Client returns the client the replica was initialized with. +func (r *Replica) Client() ReplicaClient { return r.client } + // Starts replicating in a background goroutine. -func (r *Replica) Start(ctx context.Context) error { - // Ignore if replica is being used sychronously. +func (r *Replica) Start(ctx context.Context) { + // Ignore if replica is being used synchronously. if !r.MonitorEnabled { - return nil + return } // Stop previous replication. - r.Stop(false) + r.Stop() // Wrap context with cancelation. ctx, r.cancel = context.WithCancel(ctx) // Start goroutine to replicate data. - r.wg.Add(4) + r.wg.Add(3) go func() { defer r.wg.Done(); r.monitor(ctx) }() go func() { defer r.wg.Done(); r.retainer(ctx) }() go func() { defer r.wg.Done(); r.snapshotter(ctx) }() - go func() { defer r.wg.Done(); r.validator(ctx) }() - - return nil } // Stop cancels any outstanding replication and blocks until finished. -// -// Performing a hard stop will close the DB file descriptor which could release -// locks on per-process locks. Hard stops should only be performed when -// stopping the entire process. -func (r *Replica) Stop(hard bool) (err error) { +func (r *Replica) Stop() { r.cancel() r.wg.Wait() + if r.itr != nil { + r.itr.Close() + r.itr = nil + } +} + +// Close will close the DB file descriptor which could release locks on +// per-process locks (e.g. non-Linux OSes). +func (r *Replica) Close() (err error) { r.muf.Lock() defer r.muf.Unlock() - if hard && r.f != nil { + if r.f != nil { if e := r.f.Close(); e != nil && err == nil { err = e } @@ -149,15 +159,25 @@ func (r *Replica) Sync(ctx context.Context) (err error) { }() // Find current position of database. - dpos, err := r.db.Pos() - if err != nil { - return fmt.Errorf("cannot determine current generation: %w", err) - } else if dpos.IsZero() { - return fmt.Errorf("no generation, waiting for data") + dpos := r.db.Pos() + if dpos.IsZero() { + return ErrNoGeneration } generation := dpos.Generation - Tracef("%s(%s): replica sync: db.pos=%s", r.db.Path(), r.Name(), dpos) + // Close out iterator if the generation has changed. + if r.itr != nil && r.itr.Generation() != generation { + _ = r.itr.Close() + r.itr = nil + } + + // Ensure we obtain a WAL iterator before we snapshot so we don't miss any segments. + resetItr := r.itr == nil + if resetItr { + if r.itr, err = r.db.WALSegments(ctx, generation); err != nil { + return fmt.Errorf("wal segments: %w", err) + } + } // Create snapshot if no snapshots exist for generation. snapshotN, err := r.snapshotN(generation) @@ -174,130 +194,150 @@ func (r *Replica) Sync(ctx context.Context) (err error) { replicaSnapshotTotalGaugeVec.WithLabelValues(r.db.Path(), r.Name()).Set(float64(snapshotN)) // Determine position, if necessary. - if r.Pos().Generation != generation { + if resetItr { pos, err := r.calcPos(ctx, generation) if err != nil { return fmt.Errorf("cannot determine replica position: %s", err) } - Tracef("%s(%s): replica sync: calc new pos: %s", r.db.Path(), r.Name(), pos) r.mu.Lock() r.pos = pos r.mu.Unlock() } // Read all WAL files since the last position. - for { - if err = r.syncWAL(ctx); err == io.EOF { - break - } else if err != nil { - return err - } + if err = r.syncWAL(ctx); err != nil { + return err } return nil } func (r *Replica) syncWAL(ctx context.Context) (err error) { - rd, err := r.db.ShadowWALReader(r.Pos()) - if err == io.EOF { - return err - } else if err != nil { - return fmt.Errorf("replica wal reader: %w", err) + pos := r.Pos() + + // Group segments by index. + var segments [][]WALSegmentInfo + for r.itr.Next() { + info := r.itr.WALSegment() + + if cmp, err := ComparePos(pos, info.Pos()); err != nil { + return fmt.Errorf("compare pos: %w", err) + } else if cmp == 1 { + continue // already processed, skip + } + + // Start a new chunk if index has changed. + if len(segments) == 0 || segments[len(segments)-1][0].Index != info.Index { + segments = append(segments, []WALSegmentInfo{info}) + continue + } + + // Add segment to the end of the current index, if matching. + segments[len(segments)-1] = append(segments[len(segments)-1], info) } - defer rd.Close() + + // Write out segments to replica by index so they can be combined. + for i := range segments { + if err := r.writeIndexSegments(ctx, segments[i]); err != nil { + return fmt.Errorf("write index segments: index=%d err=%w", segments[i][0].Index, err) + } + } + + return nil +} + +func (r *Replica) writeIndexSegments(ctx context.Context, segments []WALSegmentInfo) (err error) { + assert(len(segments) > 0, "segments required for replication") + + // First segment position must be equal to last replica position or + // the start of the next index. + if pos := r.Pos(); pos != segments[0].Pos() { + nextIndexPos := pos.Truncate() + nextIndexPos.Index++ + if nextIndexPos != segments[0].Pos() { + return fmt.Errorf("replica skipped position: replica=%s initial=%s", pos, segments[0].Pos()) + } + } + + pos := segments[0].Pos() + initialPos := pos // Copy shadow WAL to client write via io.Pipe(). pr, pw := io.Pipe() defer func() { _ = pw.CloseWithError(err) }() - // Obtain initial position from shadow reader. - // It may have moved to the next index if previous position was at the end. - pos := rd.Pos() - // Copy through pipe into client from the starting position. var g errgroup.Group g.Go(func() error { - _, err := r.Client.WriteWALSegment(ctx, pos, pr) + _, err := r.client.WriteWALSegment(ctx, initialPos, pr) return err }) // Wrap writer to LZ4 compress. zw := lz4.NewWriter(pw) - // Track total WAL bytes written to replica client. - walBytesCounter := replicaWALBytesCounterVec.WithLabelValues(r.db.Path(), r.Name()) - - // Copy header if at offset zero. - var psalt uint64 // previous salt value - if pos := rd.Pos(); pos.Offset == 0 { - buf := make([]byte, WALHeaderSize) - if _, err := io.ReadFull(rd, buf); err != nil { - return err - } + // Write each segment out to the replica. + for i := range segments { + info := &segments[i] - psalt = binary.BigEndian.Uint64(buf[16:24]) + if err := func() error { + // Ensure segments are in order and no bytes are skipped. + if pos != info.Pos() { + return fmt.Errorf("non-contiguous segment: expected=%s current=%s", pos, info.Pos()) + } - n, err := zw.Write(buf) - if err != nil { - return err - } - walBytesCounter.Add(float64(n)) - } + rc, err := r.db.WALSegmentReader(ctx, info.Pos()) + if err != nil { + return err + } + defer rc.Close() - // Copy frames. - for { - pos := rd.Pos() - assert(pos.Offset == frameAlign(pos.Offset, r.db.pageSize), "shadow wal reader not frame aligned") + n, err := io.Copy(zw, lz4.NewReader(rc)) + if err != nil { + return err + } else if err := rc.Close(); err != nil { + return err + } - buf := make([]byte, WALFrameHeaderSize+r.db.pageSize) - if _, err := io.ReadFull(rd, buf); err == io.EOF { - break - } else if err != nil { - return err - } + // Track last position written. + pos = info.Pos() + pos.Offset += n - // Verify salt matches the previous frame/header read. - salt := binary.BigEndian.Uint64(buf[8:16]) - if psalt != 0 && psalt != salt { - return fmt.Errorf("replica salt mismatch: %s", pos.String()) - } - psalt = salt - - n, err := zw.Write(buf) - if err != nil { - return err + return nil + }(); err != nil { + return fmt.Errorf("wal segment: pos=%s err=%w", info.Pos(), err) } - walBytesCounter.Add(float64(n)) } - // Flush LZ4 writer and close pipe. + // Flush LZ4 writer, close pipe, and wait for write to finish. if err := zw.Close(); err != nil { - return err + return fmt.Errorf("lz4 writer close: %w", err) } else if err := pw.Close(); err != nil { + return fmt.Errorf("pipe writer close: %w", err) + } else if err := g.Wait(); err != nil { return err } - // Wait for client to finish write. - if err := g.Wait(); err != nil { - return fmt.Errorf("client write: %w", err) - } - // Save last replicated position. r.mu.Lock() - r.pos = rd.Pos() + r.pos = pos r.mu.Unlock() - // Track current position - replicaWALIndexGaugeVec.WithLabelValues(r.db.Path(), r.Name()).Set(float64(rd.Pos().Index)) - replicaWALOffsetGaugeVec.WithLabelValues(r.db.Path(), r.Name()).Set(float64(rd.Pos().Offset)) + replicaWALBytesCounterVec.WithLabelValues(r.db.Path(), r.Name()).Add(float64(pos.Offset - initialPos.Offset)) + + // Track total WAL bytes written to replica client. + replicaWALIndexGaugeVec.WithLabelValues(r.db.Path(), r.Name()).Set(float64(pos.Index)) + replicaWALOffsetGaugeVec.WithLabelValues(r.db.Path(), r.Name()).Set(float64(pos.Offset)) + + r.Logger.Printf("wal segment written: %s sz=%d", initialPos, pos.Offset-initialPos.Offset) return nil } // snapshotN returns the number of snapshots for a generation. func (r *Replica) snapshotN(generation string) (int, error) { - itr, err := r.Client.Snapshots(context.Background(), generation) + itr, err := r.client.Snapshots(context.Background(), generation) if err != nil { return 0, err } @@ -329,7 +369,7 @@ func (r *Replica) calcPos(ctx context.Context, generation string) (pos Pos, err } // Read segment to determine size to add to offset. - rd, err := r.Client.WALSegmentReader(ctx, segment.Pos()) + rd, err := r.client.WALSegmentReader(ctx, segment.Pos()) if err != nil { return pos, fmt.Errorf("wal segment reader: %w", err) } @@ -350,7 +390,7 @@ func (r *Replica) calcPos(ctx context.Context, generation string) (pos Pos, err // maxSnapshot returns the last snapshot in a generation. func (r *Replica) maxSnapshot(ctx context.Context, generation string) (*SnapshotInfo, error) { - itr, err := r.Client.Snapshots(ctx, generation) + itr, err := r.client.Snapshots(ctx, generation) if err != nil { return nil, err } @@ -367,7 +407,7 @@ func (r *Replica) maxSnapshot(ctx context.Context, generation string) (*Snapshot // maxWALSegment returns the highest WAL segment in a generation. func (r *Replica) maxWALSegment(ctx context.Context, generation string) (*WALSegmentInfo, error) { - itr, err := r.Client.WALSegments(ctx, generation) + itr, err := r.client.WALSegments(ctx, generation) if err != nil { return nil, err } @@ -392,7 +432,7 @@ func (r *Replica) Pos() Pos { // Snapshots returns a list of all snapshots across all generations. func (r *Replica) Snapshots(ctx context.Context) ([]SnapshotInfo, error) { - generations, err := r.Client.Generations(ctx) + generations, err := r.client.Generations(ctx) if err != nil { return nil, fmt.Errorf("cannot fetch generations: %w", err) } @@ -400,7 +440,7 @@ func (r *Replica) Snapshots(ctx context.Context) ([]SnapshotInfo, error) { var a []SnapshotInfo for _, generation := range generations { if err := func() error { - itr, err := r.Client.Snapshots(ctx, generation) + itr, err := r.client.Snapshots(ctx, generation) if err != nil { return err } @@ -448,10 +488,8 @@ func (r *Replica) Snapshot(ctx context.Context) (info SnapshotInfo, err error) { defer func() { _ = tx.Rollback() }() // Obtain current position. - pos, err := r.db.Pos() - if err != nil { - return info, fmt.Errorf("cannot determine db position: %w", err) - } else if pos.IsZero() { + pos := r.db.Pos() + if pos.IsZero() { return info, ErrNoGeneration } @@ -475,23 +513,23 @@ func (r *Replica) Snapshot(ctx context.Context) (info SnapshotInfo, err error) { defer zr.Close() if _, err := io.Copy(zr, r.f); err != nil { - pw.CloseWithError(err) + _ = pw.CloseWithError(err) return err } else if err := zr.Close(); err != nil { - pw.CloseWithError(err) + _ = pw.CloseWithError(err) return err } return pw.Close() }) // Delegate write to client & wait for writer goroutine to finish. - if info, err = r.Client.WriteSnapshot(ctx, pos.Generation, pos.Index, pr); err != nil { + if info, err = r.client.WriteSnapshot(ctx, pos.Generation, pos.Index, pr); err != nil { return info, err } else if err := g.Wait(); err != nil { return info, err } - log.Printf("%s(%s): snapshot written %s/%08x", r.db.Path(), r.Name(), pos.Generation, pos.Index) + r.Logger.Printf("snapshot written %s/%s", pos.Generation, FormatIndex(pos.Index)) return info, nil } @@ -516,7 +554,7 @@ func (r *Replica) EnforceRetention(ctx context.Context) (err error) { } // Loop over generations and delete unretained snapshots & WAL files. - generations, err := r.Client.Generations(ctx) + generations, err := r.client.Generations(ctx) if err != nil { return fmt.Errorf("generations: %w", err) } @@ -526,7 +564,7 @@ func (r *Replica) EnforceRetention(ctx context.Context) (err error) { // Delete entire generation if no snapshots are being retained. if snapshot == nil { - if err := r.Client.DeleteGeneration(ctx, generation); err != nil { + if err := r.client.DeleteGeneration(ctx, generation); err != nil { return fmt.Errorf("delete generation: %w", err) } continue @@ -544,7 +582,7 @@ func (r *Replica) EnforceRetention(ctx context.Context) (err error) { } func (r *Replica) deleteSnapshotsBeforeIndex(ctx context.Context, generation string, index int) error { - itr, err := r.Client.Snapshots(ctx, generation) + itr, err := r.client.Snapshots(ctx, generation) if err != nil { return fmt.Errorf("fetch snapshots: %w", err) } @@ -556,17 +594,17 @@ func (r *Replica) deleteSnapshotsBeforeIndex(ctx context.Context, generation str continue } - if err := r.Client.DeleteSnapshot(ctx, info.Generation, info.Index); err != nil { - return fmt.Errorf("delete snapshot %s/%08x: %w", info.Generation, info.Index, err) + if err := r.client.DeleteSnapshot(ctx, info.Generation, info.Index); err != nil { + return fmt.Errorf("delete snapshot %s/%s: %w", info.Generation, FormatIndex(info.Index), err) } - log.Printf("%s(%s): snapshot deleted %s/%08x", r.db.Path(), r.Name(), generation, index) + r.Logger.Printf("snapshot deleted %s/%s", generation, FormatIndex(index)) } return itr.Close() } func (r *Replica) deleteWALSegmentsBeforeIndex(ctx context.Context, generation string, index int) error { - itr, err := r.Client.WALSegments(ctx, generation) + itr, err := r.client.WALSegments(ctx, generation) if err != nil { return fmt.Errorf("fetch wal segments: %w", err) } @@ -588,48 +626,52 @@ func (r *Replica) deleteWALSegmentsBeforeIndex(ctx context.Context, generation s return nil } - if err := r.Client.DeleteWALSegments(ctx, a); err != nil { + if err := r.client.DeleteWALSegments(ctx, a); err != nil { return fmt.Errorf("delete wal segments: %w", err) } - log.Printf("%s(%s): wal segmented deleted before %s/%08x: n=%d", r.db.Path(), r.Name(), generation, index, len(a)) + + for _, pos := range a { + r.Logger.Printf("wal segmented deleted: %s", pos) + } return nil } // monitor runs in a separate goroutine and continuously replicates the DB. func (r *Replica) monitor(ctx context.Context) { - ticker := time.NewTicker(r.SyncInterval) - defer ticker.Stop() + timer := time.NewTimer(r.SyncInterval) + defer timer.Stop() - // Continuously check for new data to replicate. - ch := make(chan struct{}) - close(ch) - var notify <-chan struct{} = ch + for { + if err := r.Sync(ctx); ctx.Err() != nil { + return + } else if err != nil && err != ErrNoGeneration { + r.Logger.Printf("monitor error: %s", err) + } - for initial := true; ; initial = false { - // Enforce a minimum time between synchronization. - if !initial { + // Wait for a change to the WAL iterator. + if r.itr != nil { select { case <-ctx.Done(): return - case <-ticker.C: + case <-r.itr.NotifyCh(): } } - // Wait for changes to the database. + // Wait for the sync interval to collect additional changes. + timer.Reset(r.SyncInterval) select { case <-ctx.Done(): return - case <-notify: + case <-timer.C: } - // Fetch new notify channel before replicating data. - notify = r.db.Notify() - - // Synchronize the shadow wal into the replication directory. - if err := r.Sync(ctx); err != nil { - log.Printf("%s(%s): monitor error: %s", r.db.Path(), r.Name(), err) - continue + // Flush any additional notifications from the WAL iterator. + if r.itr != nil { + select { + case <-r.itr.NotifyCh(): + default: + } } } } @@ -656,7 +698,7 @@ func (r *Replica) retainer(ctx context.Context) { return case <-ticker.C: if err := r.EnforceRetention(ctx); err != nil { - log.Printf("%s(%s): retainer error: %s", r.db.Path(), r.Name(), err) + r.Logger.Printf("retainer error: %s", err) continue } } @@ -678,171 +720,19 @@ func (r *Replica) snapshotter(ctx context.Context) { return case <-ticker.C: if _, err := r.Snapshot(ctx); err != nil && err != ErrNoGeneration { - log.Printf("%s(%s): snapshotter error: %s", r.db.Path(), r.Name(), err) - continue - } - } - } -} - -// validator runs in a separate goroutine and handles periodic validation. -func (r *Replica) validator(ctx context.Context) { - // Initialize counters since validation occurs infrequently. - for _, status := range []string{"ok", "error"} { - replicaValidationTotalCounterVec.WithLabelValues(r.db.Path(), r.Name(), status).Add(0) - } - - // Exit validation if interval is not set. - if r.ValidationInterval <= 0 { - return - } - - ticker := time.NewTicker(r.ValidationInterval) - defer ticker.Stop() - - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - if err := r.Validate(ctx); err != nil { - log.Printf("%s(%s): validation error: %s", r.db.Path(), r.Name(), err) + r.Logger.Printf("snapshotter error: %s", err) continue } } } } -// Validate restores the most recent data from a replica and validates -// that the resulting database matches the current database. -func (r *Replica) Validate(ctx context.Context) error { - db := r.DB() - - // Restore replica to a temporary directory. - tmpdir, err := ioutil.TempDir("", "*-litestream") - if err != nil { - return err - } - defer os.RemoveAll(tmpdir) - - // Compute checksum of primary database under lock. This prevents a - // sync from occurring and the database will not be written. - chksum0, pos, err := db.CRC64(ctx) - if err != nil { - return fmt.Errorf("cannot compute checksum: %w", err) - } - - // Wait until replica catches up to position. - if err := r.waitForReplica(ctx, pos); err != nil { - return fmt.Errorf("cannot wait for replica: %w", err) - } - - restorePath := filepath.Join(tmpdir, "replica") - if err := r.Restore(ctx, RestoreOptions{ - OutputPath: restorePath, - ReplicaName: r.Name(), - Generation: pos.Generation, - Index: pos.Index - 1, - Logger: log.New(os.Stderr, "", 0), - }); err != nil { - return fmt.Errorf("cannot restore: %w", err) - } - - // Open file handle for restored database. - // NOTE: This open is ok as the restored database is not managed by litestream. - f, err := os.Open(restorePath) - if err != nil { - return err - } - defer f.Close() - - // Read entire file into checksum. - h := crc64.New(crc64.MakeTable(crc64.ISO)) - if _, err := io.Copy(h, f); err != nil { - return err - } - chksum1 := h.Sum64() - - status := "ok" - mismatch := chksum0 != chksum1 - if mismatch { - status = "mismatch" - } - log.Printf("%s(%s): validator: status=%s db=%016x replica=%016x pos=%s", db.Path(), r.Name(), status, chksum0, chksum1, pos) - - // Validate checksums match. - if mismatch { - replicaValidationTotalCounterVec.WithLabelValues(r.db.Path(), r.Name(), "error").Inc() - return ErrChecksumMismatch - } - - replicaValidationTotalCounterVec.WithLabelValues(r.db.Path(), r.Name(), "ok").Inc() - - if err := os.RemoveAll(tmpdir); err != nil { - return fmt.Errorf("cannot remove temporary validation directory: %w", err) - } - return nil -} - -// waitForReplica blocks until replica reaches at least the given position. -func (r *Replica) waitForReplica(ctx context.Context, pos Pos) error { - db := r.DB() - - ticker := time.NewTicker(500 * time.Millisecond) - defer ticker.Stop() - - timer := time.NewTicker(10 * time.Second) - defer ticker.Stop() - - once := make(chan struct{}, 1) - once <- struct{}{} - - for { - select { - case <-ctx.Done(): - return ctx.Err() - case <-timer.C: - return fmt.Errorf("replica wait exceeded timeout") - case <-ticker.C: - case <-once: // immediate on first check - } - - // Obtain current position of replica, check if past target position. - curr := r.Pos() - if curr.IsZero() { - log.Printf("%s(%s): validator: no replica position available", db.Path(), r.Name()) - continue - } - - // Exit if the generation has changed while waiting as there will be - // no further progress on the old generation. - if curr.Generation != pos.Generation { - return fmt.Errorf("generation changed") - } - - ready := true - if curr.Index < pos.Index { - ready = false - } else if curr.Index == pos.Index && curr.Offset < pos.Offset { - ready = false - } - - // If not ready, restart loop. - if !ready { - continue - } - - // Current position at or after target position. - return nil - } -} - // GenerationCreatedAt returns the earliest creation time of any snapshot. // Returns zero time if no snapshots exist. func (r *Replica) GenerationCreatedAt(ctx context.Context, generation string) (time.Time, error) { var min time.Time - itr, err := r.Client.Snapshots(ctx, generation) + itr, err := r.client.Snapshots(ctx, generation) if err != nil { return min, err } @@ -856,296 +746,10 @@ func (r *Replica) GenerationCreatedAt(ctx context.Context, generation string) (t return min, itr.Close() } -// GenerationTimeBounds returns the creation time & last updated time of a generation. -// Returns zero time if no snapshots or WAL segments exist. -func (r *Replica) GenerationTimeBounds(ctx context.Context, generation string) (createdAt, updatedAt time.Time, err error) { - // Iterate over snapshots. - sitr, err := r.Client.Snapshots(ctx, generation) - if err != nil { - return createdAt, updatedAt, err - } - defer sitr.Close() - - for sitr.Next() { - info := sitr.Snapshot() - if createdAt.IsZero() || info.CreatedAt.Before(createdAt) { - createdAt = info.CreatedAt - } - if updatedAt.IsZero() || info.CreatedAt.After(updatedAt) { - updatedAt = info.CreatedAt - } - } - if err := sitr.Close(); err != nil { - return createdAt, updatedAt, err - } - - // Iterate over WAL segments. - witr, err := r.Client.WALSegments(ctx, generation) - if err != nil { - return createdAt, updatedAt, err - } - defer witr.Close() - - for witr.Next() { - info := witr.WALSegment() - if createdAt.IsZero() || info.CreatedAt.Before(createdAt) { - createdAt = info.CreatedAt - } - if updatedAt.IsZero() || info.CreatedAt.After(updatedAt) { - updatedAt = info.CreatedAt - } - } - if err := witr.Close(); err != nil { - return createdAt, updatedAt, err - } - - return createdAt, updatedAt, nil -} - -// CalcRestoreTarget returns a generation to restore from. -func (r *Replica) CalcRestoreTarget(ctx context.Context, opt RestoreOptions) (generation string, updatedAt time.Time, err error) { - var target struct { - generation string - updatedAt time.Time - } - - generations, err := r.Client.Generations(ctx) - if err != nil { - return "", time.Time{}, fmt.Errorf("cannot fetch generations: %w", err) - } - - // Search generations for one that contains the requested timestamp. - for _, generation := range generations { - // Skip generation if it does not match filter. - if opt.Generation != "" && generation != opt.Generation { - continue - } - - // Determine the time bounds for the generation. - createdAt, updatedAt, err := r.GenerationTimeBounds(ctx, generation) - if err != nil { - return "", time.Time{}, fmt.Errorf("generation created at: %w", err) - } - - // Skip if it does not contain timestamp. - if !opt.Timestamp.IsZero() { - if opt.Timestamp.Before(createdAt) || opt.Timestamp.After(updatedAt) { - continue - } - } - - // Use the latest replica if we have multiple candidates. - if !updatedAt.After(target.updatedAt) { - continue - } - - target.generation = generation - target.updatedAt = updatedAt - } - - return target.generation, target.updatedAt, nil -} - -// Replica restores the database from a replica based on the options given. -// This method will restore into opt.OutputPath, if specified, or into the -// DB's original database path. It can optionally restore from a specific -// replica or generation or it will automatically choose the best one. Finally, -// a timestamp can be specified to restore the database to a specific -// point-in-time. -func (r *Replica) Restore(ctx context.Context, opt RestoreOptions) (err error) { - // Validate options. - if opt.OutputPath == "" { - return fmt.Errorf("output path required") - } else if opt.Generation == "" && opt.Index != math.MaxInt32 { - return fmt.Errorf("must specify generation when restoring to index") - } else if opt.Index != math.MaxInt32 && !opt.Timestamp.IsZero() { - return fmt.Errorf("cannot specify index & timestamp to restore") - } - - // Ensure logger exists. - logger := opt.Logger - if logger == nil { - logger = log.New(ioutil.Discard, "", 0) - } - - logPrefix := r.Name() - if db := r.DB(); db != nil { - logPrefix = fmt.Sprintf("%s(%s)", db.Path(), r.Name()) - } - - // Ensure output path does not already exist. - if _, err := os.Stat(opt.OutputPath); err == nil { - return fmt.Errorf("cannot restore, output path already exists: %s", opt.OutputPath) - } else if err != nil && !os.IsNotExist(err) { - return err - } - - // Find lastest snapshot that occurs before timestamp or index. - var minWALIndex int - if opt.Index < math.MaxInt32 { - if minWALIndex, err = r.SnapshotIndexByIndex(ctx, opt.Generation, opt.Index); err != nil { - return fmt.Errorf("cannot find snapshot index: %w", err) - } - } else { - if minWALIndex, err = r.SnapshotIndexAt(ctx, opt.Generation, opt.Timestamp); err != nil { - return fmt.Errorf("cannot find snapshot index by timestamp: %w", err) - } - } - - // Compute list of offsets for each WAL index. - walSegmentMap, err := r.walSegmentMap(ctx, opt.Generation, opt.Index, opt.Timestamp) - if err != nil { - return fmt.Errorf("cannot find max wal index for restore: %w", err) - } - - // Find the maximum WAL index that occurs before timestamp. - maxWALIndex := -1 - for index := range walSegmentMap { - if index > maxWALIndex { - maxWALIndex = index - } - } - - // Ensure that we found the specific index, if one was specified. - if opt.Index != math.MaxInt32 && opt.Index != opt.Index { - return fmt.Errorf("unable to locate index %d in generation %q, highest index was %d", opt.Index, opt.Generation, maxWALIndex) - } - - // If no WAL files were found, mark this as a snapshot-only restore. - snapshotOnly := maxWALIndex == -1 - - // Initialize starting position. - pos := Pos{Generation: opt.Generation, Index: minWALIndex} - tmpPath := opt.OutputPath + ".tmp" - - // Copy snapshot to output path. - logger.Printf("%s: restoring snapshot %s/%08x to %s", logPrefix, opt.Generation, minWALIndex, tmpPath) - if err := r.restoreSnapshot(ctx, pos.Generation, pos.Index, tmpPath); err != nil { - return fmt.Errorf("cannot restore snapshot: %w", err) - } - - // If no WAL files available, move snapshot to final path & exit early. - if snapshotOnly { - logger.Printf("%s: snapshot only, finalizing database", logPrefix) - return os.Rename(tmpPath, opt.OutputPath) - } - - // Begin processing WAL files. - logger.Printf("%s: restoring wal files: generation=%s index=[%08x,%08x]", logPrefix, opt.Generation, minWALIndex, maxWALIndex) - - // Fill input channel with all WAL indexes to be loaded in order. - // Verify every index has at least one offset. - ch := make(chan int, maxWALIndex-minWALIndex+1) - for index := minWALIndex; index <= maxWALIndex; index++ { - if len(walSegmentMap[index]) == 0 { - return fmt.Errorf("missing WAL index: %s/%08x", opt.Generation, index) - } - ch <- index - } - close(ch) - - // Track load state for each WAL. - var mu sync.Mutex - cond := sync.NewCond(&mu) - walStates := make([]walRestoreState, maxWALIndex-minWALIndex+1) - - parallelism := opt.Parallelism - if parallelism < 1 { - parallelism = 1 - } - - // Download WAL files to disk in parallel. - g, ctx := errgroup.WithContext(ctx) - for i := 0; i < parallelism; i++ { - g.Go(func() error { - for { - select { - case <-ctx.Done(): - cond.Broadcast() - return err - case index, ok := <-ch: - if !ok { - cond.Broadcast() - return nil - } - - startTime := time.Now() - - err := r.downloadWAL(ctx, opt.Generation, index, walSegmentMap[index], tmpPath) - if err != nil { - err = fmt.Errorf("cannot download wal %s/%08x: %w", opt.Generation, index, err) - } - - // Mark index as ready-to-apply and notify applying code. - mu.Lock() - walStates[index-minWALIndex] = walRestoreState{ready: true, err: err} - mu.Unlock() - cond.Broadcast() - - // Returning the error here will cancel the other goroutines. - if err != nil { - return err - } - - logger.Printf("%s: downloaded wal %s/%08x elapsed=%s", - logPrefix, opt.Generation, index, - time.Since(startTime).String(), - ) - } - } - }) - } - - // Apply WAL files in order as they are ready. - for index := minWALIndex; index <= maxWALIndex; index++ { - // Wait until next WAL file is ready to apply. - mu.Lock() - for !walStates[index-minWALIndex].ready { - if err := ctx.Err(); err != nil { - return err - } - cond.Wait() - } - if err := walStates[index-minWALIndex].err; err != nil { - return err - } - mu.Unlock() - - // Apply WAL to database file. - startTime := time.Now() - if err = applyWAL(ctx, index, tmpPath); err != nil { - return fmt.Errorf("cannot apply wal: %w", err) - } - logger.Printf("%s: applied wal %s/%08x elapsed=%s", - logPrefix, opt.Generation, index, - time.Since(startTime).String(), - ) - } - - // Ensure all goroutines finish. All errors should have been handled during - // the processing of WAL files but this ensures that all processing is done. - if err := g.Wait(); err != nil { - return err - } - - // Copy file to final location. - logger.Printf("%s: renaming database from temporary location", logPrefix) - if err := os.Rename(tmpPath, opt.OutputPath); err != nil { - return err - } - - return nil -} - -type walRestoreState struct { - ready bool - err error -} - // SnapshotIndexAt returns the highest index for a snapshot within a generation // that occurs before timestamp. If timestamp is zero, returns the latest snapshot. func (r *Replica) SnapshotIndexAt(ctx context.Context, generation string, timestamp time.Time) (int, error) { - itr, err := r.Client.Snapshots(ctx, generation) + itr, err := r.client.Snapshots(ctx, generation) if err != nil { return 0, err } @@ -1172,137 +776,19 @@ func (r *Replica) SnapshotIndexAt(ctx context.Context, generation string, timest return snapshotIndex, nil } -// SnapshotIndexbyIndex returns the highest index for a snapshot within a generation -// that occurs before a given index. If index is MaxInt32, returns the latest snapshot. -func (r *Replica) SnapshotIndexByIndex(ctx context.Context, generation string, index int) (int, error) { - itr, err := r.Client.Snapshots(ctx, generation) - if err != nil { - return 0, err - } - defer itr.Close() - - snapshotIndex := -1 - for itr.Next() { - snapshot := itr.Snapshot() - - if index < math.MaxInt32 && snapshot.Index > index { - continue // after index, skip - } - - // Use snapshot if it newer. - if snapshotIndex == -1 || snapshotIndex >= snapshotIndex { - snapshotIndex = snapshot.Index - } - } - if err := itr.Close(); err != nil { - return 0, err - } else if snapshotIndex == -1 { - return 0, ErrNoSnapshots - } - return snapshotIndex, nil -} - -// walSegmentMap returns a map of WAL indices to their segments. -// Filters by a max timestamp or a max index. -func (r *Replica) walSegmentMap(ctx context.Context, generation string, maxIndex int, maxTimestamp time.Time) (map[int][]int64, error) { - itr, err := r.Client.WALSegments(ctx, generation) - if err != nil { - return nil, err - } - defer itr.Close() - - m := make(map[int][]int64) - for itr.Next() { - info := itr.WALSegment() - - // Exit if we go past the max timestamp or index. - if !maxTimestamp.IsZero() && info.CreatedAt.After(maxTimestamp) { - break // after max timestamp, skip - } else if info.Index > maxIndex { - break // after max index, skip - } - - // Verify offsets are added in order. - offsets := m[info.Index] - if len(offsets) == 0 && info.Offset != 0 { - return nil, fmt.Errorf("missing initial wal segment: generation=%s index=%08x offset=%d", generation, info.Index, info.Offset) - } else if len(offsets) > 0 && offsets[len(offsets)-1] >= info.Offset { - return nil, fmt.Errorf("wal segments out of order: generation=%s index=%08x offsets=(%d,%d)", generation, info.Index, offsets[len(offsets)-1], info.Offset) - } - - // Append to the end of the WAL file. - m[info.Index] = append(offsets, info.Offset) - } - return m, itr.Close() -} - -// restoreSnapshot copies a snapshot from the replica to a file. -func (r *Replica) restoreSnapshot(ctx context.Context, generation string, index int, filename string) error { - // Determine the user/group & mode based on the DB, if available. - var fileInfo, dirInfo os.FileInfo - if db := r.DB(); db != nil { - fileInfo, dirInfo = db.fileInfo, db.dirInfo - } - - if err := internal.MkdirAll(filepath.Dir(filename), dirInfo); err != nil { - return err - } - - f, err := internal.CreateFile(filename, fileInfo) - if err != nil { - return err - } - defer f.Close() - - rd, err := r.Client.SnapshotReader(ctx, generation, index) - if err != nil { - return err - } - defer rd.Close() - - if _, err := io.Copy(f, lz4.NewReader(rd)); err != nil { - return err - } else if err := f.Sync(); err != nil { - return err - } - return f.Close() -} - -// downloadWAL copies a WAL file from the replica to a local copy next to the DB. -// The WAL is later applied by applyWAL(). This function can be run in parallel -// to download multiple WAL files simultaneously. -func (r *Replica) downloadWAL(ctx context.Context, generation string, index int, offsets []int64, dbPath string) (err error) { - // Determine the user/group & mode based on the DB, if available. - var fileInfo os.FileInfo - if db := r.DB(); db != nil { - fileInfo = db.fileInfo - } - - // Open readers for every segment in the WAL file, in order. - var readers []io.Reader - for _, offset := range offsets { - rd, err := r.Client.WALSegmentReader(ctx, Pos{Generation: generation, Index: index, Offset: offset}) +// LatestReplica returns the most recently updated replica. +func LatestReplica(ctx context.Context, replicas []*Replica) (*Replica, error) { + var t time.Time + var r *Replica + for i := range replicas { + _, max, err := ReplicaClientTimeBounds(ctx, replicas[i].client) if err != nil { - return err + return nil, err + } else if r == nil || max.After(t) { + r, t = replicas[i], max } - defer rd.Close() - readers = append(readers, lz4.NewReader(rd)) - } - - // Open handle to destination WAL path. - f, err := internal.CreateFile(fmt.Sprintf("%s-%08x-wal", dbPath, index), fileInfo) - if err != nil { - return err - } - defer f.Close() - - // Combine segments together and copy WAL to target path. - if _, err := io.Copy(f, io.MultiReader(readers...)); err != nil { - return err - } else if err := f.Close(); err != nil { - return err } - return nil + return r, nil } // Replica metrics. @@ -1334,11 +820,4 @@ var ( Name: "wal_offset", Help: "The current WAL offset", }, []string{"db", "name"}) - - replicaValidationTotalCounterVec = promauto.NewCounterVec(prometheus.CounterOpts{ - Namespace: "litestream", - Subsystem: "replica", - Name: "validation_total", - Help: "The number of validations performed", - }, []string{"db", "name", "status"}) ) diff --git a/replica_client.go b/replica_client.go index 3a914e47..46af8501 100644 --- a/replica_client.go +++ b/replica_client.go @@ -2,9 +2,19 @@ package litestream import ( "context" + "fmt" "io" + "log" + "os" + "time" + + "github.com/benbjohnson/litestream/internal" + "github.com/pierrec/lz4/v4" ) +// DefaultRestoreParallelism is the default parallelism when downloading WAL files. +const DefaultRestoreParallelism = 8 + // ReplicaClient represents client to connect to a Replica. type ReplicaClient interface { // Returns the type of client. @@ -46,3 +56,464 @@ type ReplicaClient interface { // WAL segment does not exist. WALSegmentReader(ctx context.Context, pos Pos) (io.ReadCloser, error) } + +// FindSnapshotForIndex returns the highest index for a snapshot within a +// generation that occurs before a given index. +func FindSnapshotForIndex(ctx context.Context, client ReplicaClient, generation string, index int) (int, error) { + itr, err := client.Snapshots(ctx, generation) + if err != nil { + return 0, fmt.Errorf("snapshots: %w", err) + } + defer itr.Close() + + // Iterate over all snapshots to find the closest to our given index. + snapshotIndex := -1 + var n int + for ; itr.Next(); n++ { + info := itr.Snapshot() + if info.Index > index { + continue // after given index, skip + } + + // Use snapshot if it's more recent. + if info.Index >= snapshotIndex { + snapshotIndex = info.Index + } + } + if err := itr.Close(); err != nil { + return 0, fmt.Errorf("snapshot iteration: %w", err) + } + + // Ensure we find at least one snapshot and that it's before the given index. + if n == 0 { + return 0, ErrNoSnapshots + } else if snapshotIndex == -1 { + return 0, fmt.Errorf("no snapshots available at or before index %s", FormatIndex(index)) + } + return snapshotIndex, nil +} + +// GenerationTimeBounds returns the creation time & last updated time of a generation. +// Returns ErrNoSnapshots if no data exists for the generation. +func GenerationTimeBounds(ctx context.Context, client ReplicaClient, generation string) (createdAt, updatedAt time.Time, err error) { + // Determine bounds for snapshots only first. + // This will return ErrNoSnapshots if no snapshots exist. + if createdAt, updatedAt, err = SnapshotTimeBounds(ctx, client, generation); err != nil { + return createdAt, updatedAt, err + } + + // Update ending time bounds if WAL segments exist after the last snapshot. + _, max, err := WALTimeBounds(ctx, client, generation) + if err != nil && err != ErrNoWALSegments { + return createdAt, updatedAt, err + } else if max.After(updatedAt) { + updatedAt = max + } + + return createdAt, updatedAt, nil +} + +// SnapshotTimeBounds returns the minimum and maximum snapshot timestamps within a generation. +// Returns ErrNoSnapshots if no data exists for the generation. +func SnapshotTimeBounds(ctx context.Context, client ReplicaClient, generation string) (min, max time.Time, err error) { + itr, err := client.Snapshots(ctx, generation) + if err != nil { + return min, max, fmt.Errorf("snapshots: %w", err) + } + defer itr.Close() + + // Iterate over all snapshots to find the oldest and newest. + var n int + for ; itr.Next(); n++ { + info := itr.Snapshot() + if min.IsZero() || info.CreatedAt.Before(min) { + min = info.CreatedAt + } + if max.IsZero() || info.CreatedAt.After(max) { + max = info.CreatedAt + } + } + if err := itr.Close(); err != nil { + return min, max, fmt.Errorf("snapshot iteration: %w", err) + } + + // Return error if no snapshots exist. + if n == 0 { + return min, max, ErrNoSnapshots + } + return min, max, nil +} + +// WALTimeBounds returns the minimum and maximum snapshot timestamps. +// Returns ErrNoWALSegments if no data exists for the generation. +func WALTimeBounds(ctx context.Context, client ReplicaClient, generation string) (min, max time.Time, err error) { + itr, err := client.WALSegments(ctx, generation) + if err != nil { + return min, max, fmt.Errorf("wal segments: %w", err) + } + defer itr.Close() + + // Iterate over all WAL segments to find oldest and newest. + var n int + for ; itr.Next(); n++ { + info := itr.WALSegment() + if min.IsZero() || info.CreatedAt.Before(min) { + min = info.CreatedAt + } + if max.IsZero() || info.CreatedAt.After(max) { + max = info.CreatedAt + } + } + if err := itr.Close(); err != nil { + return min, max, fmt.Errorf("wal segment iteration: %w", err) + } + + if n == 0 { + return min, max, ErrNoWALSegments + } + return min, max, nil +} + +// FindLatestGeneration returns the most recent generation for a client. +func FindLatestGeneration(ctx context.Context, client ReplicaClient) (generation string, err error) { + generations, err := client.Generations(ctx) + if err != nil { + return "", fmt.Errorf("generations: %w", err) + } + + // Search generations for one latest updated. + var maxTime time.Time + for i := range generations { + // Determine the latest update for the generation. + _, updatedAt, err := GenerationTimeBounds(ctx, client, generations[i]) + if err != nil { + return "", fmt.Errorf("generation time bounds: %w", err) + } + + // Use the latest replica if we have multiple candidates. + if updatedAt.After(maxTime) { + maxTime = updatedAt + generation = generations[i] + } + } + + if generation == "" { + return "", ErrNoGeneration + } + return generation, nil +} + +// ReplicaClientTimeBounds returns time range covered by a replica client +// across all generations. It scans the time range of all generations and +// computes the lower and upper bounds of them. +func ReplicaClientTimeBounds(ctx context.Context, client ReplicaClient) (min, max time.Time, err error) { + generations, err := client.Generations(ctx) + if err != nil { + return min, max, fmt.Errorf("generations: %w", err) + } else if len(generations) == 0 { + return min, max, ErrNoGeneration + } + + // Iterate over generations to determine outer bounds. + for i := range generations { + // Determine the time range for the generation. + createdAt, updatedAt, err := GenerationTimeBounds(ctx, client, generations[i]) + if err != nil { + return min, max, fmt.Errorf("generation time bounds: %w", err) + } + + // Update time bounds. + if min.IsZero() || createdAt.Before(min) { + min = createdAt + } + if max.IsZero() || updatedAt.After(max) { + max = updatedAt + } + } + + return min, max, nil +} + +// FindIndexByTimestamp returns the highest index before a given point-in-time +// within a generation. Returns ErrNoSnapshots if no index exists on the replica +// for the generation. +func FindIndexByTimestamp(ctx context.Context, client ReplicaClient, generation string, timestamp time.Time) (index int, err error) { + snapshotIndex, err := FindSnapshotIndexByTimestamp(ctx, client, generation, timestamp) + if err == ErrNoSnapshots { + return 0, err + } else if err != nil { + return 0, fmt.Errorf("max snapshot index: %w", err) + } + + // Determine the highest available WAL index. + walIndex, err := FindWALIndexByTimestamp(ctx, client, generation, timestamp) + if err != nil && err != ErrNoWALSegments { + return 0, fmt.Errorf("max wal index: %w", err) + } + + // Use snapshot index if it's after the last WAL index. + if snapshotIndex > walIndex { + return snapshotIndex, nil + } + return walIndex, nil +} + +// FindSnapshotIndexByTimestamp returns the highest snapshot index before timestamp. +// Returns ErrNoSnapshots if no snapshots exist for the generation on the replica. +func FindSnapshotIndexByTimestamp(ctx context.Context, client ReplicaClient, generation string, timestamp time.Time) (index int, err error) { + itr, err := client.Snapshots(ctx, generation) + if err != nil { + return 0, fmt.Errorf("snapshots: %w", err) + } + defer func() { _ = itr.Close() }() + + // Iterate over snapshots to find the highest index. + var n int + for ; itr.Next(); n++ { + if info := itr.Snapshot(); info.CreatedAt.After(timestamp) { + continue + } else if info.Index > index { + index = info.Index + } + } + if err := itr.Close(); err != nil { + return 0, fmt.Errorf("snapshot iteration: %w", err) + } + + // Return an error if no snapshots were found. + if n == 0 { + return 0, ErrNoSnapshots + } + return index, nil +} + +// FindWALIndexByTimestamp returns the highest WAL index before timestamp. +// Returns ErrNoWALSegments if no segments exist for the generation on the replica. +func FindWALIndexByTimestamp(ctx context.Context, client ReplicaClient, generation string, timestamp time.Time) (index int, err error) { + itr, err := client.WALSegments(ctx, generation) + if err != nil { + return 0, fmt.Errorf("wal segments: %w", err) + } + defer func() { _ = itr.Close() }() + + // Iterate over WAL segments to find the highest index. + var n int + for ; itr.Next(); n++ { + if info := itr.WALSegment(); info.CreatedAt.After(timestamp) { + continue + } else if info.Index > index { + index = info.Index + } + } + if err := itr.Close(); err != nil { + return 0, fmt.Errorf("wal segment iteration: %w", err) + } + + // Return an error if no WAL segments were found. + if n == 0 { + return 0, ErrNoWALSegments + } + return index, nil +} + +// FindMaxIndexByGeneration returns the last index within a generation. +// Returns ErrNoSnapshots if no index exists on the replica for the generation. +func FindMaxIndexByGeneration(ctx context.Context, client ReplicaClient, generation string) (index int, err error) { + // Determine the highest available snapshot index. Returns an error if no + // snapshot are available as WALs are not useful without snapshots. + snapshotIndex, err := FindMaxSnapshotIndexByGeneration(ctx, client, generation) + if err == ErrNoSnapshots { + return index, err + } else if err != nil { + return index, fmt.Errorf("max snapshot index: %w", err) + } + + // Determine the highest available WAL index. + walIndex, err := FindMaxWALIndexByGeneration(ctx, client, generation) + if err != nil && err != ErrNoWALSegments { + return index, fmt.Errorf("max wal index: %w", err) + } + + // Use snapshot index if it's after the last WAL index. + if snapshotIndex > walIndex { + return snapshotIndex, nil + } + return walIndex, nil +} + +// FindMaxSnapshotIndexByGeneration returns the last snapshot index within a generation. +// Returns ErrNoSnapshots if no snapshots exist for the generation on the replica. +func FindMaxSnapshotIndexByGeneration(ctx context.Context, client ReplicaClient, generation string) (index int, err error) { + itr, err := client.Snapshots(ctx, generation) + if err != nil { + return 0, fmt.Errorf("snapshots: %w", err) + } + defer func() { _ = itr.Close() }() + + // Iterate over snapshots to find the highest index. + var n int + for ; itr.Next(); n++ { + if info := itr.Snapshot(); info.Index > index { + index = info.Index + } + } + if err := itr.Close(); err != nil { + return 0, fmt.Errorf("snapshot iteration: %w", err) + } + + // Return an error if no snapshots were found. + if n == 0 { + return 0, ErrNoSnapshots + } + return index, nil +} + +// FindMaxWALIndexByGeneration returns the last WAL index within a generation. +// Returns ErrNoWALSegments if no segments exist for the generation on the replica. +func FindMaxWALIndexByGeneration(ctx context.Context, client ReplicaClient, generation string) (index int, err error) { + itr, err := client.WALSegments(ctx, generation) + if err != nil { + return 0, fmt.Errorf("wal segments: %w", err) + } + defer func() { _ = itr.Close() }() + + // Iterate over WAL segments to find the highest index. + var n int + for ; itr.Next(); n++ { + if info := itr.WALSegment(); info.Index > index { + index = info.Index + } + } + if err := itr.Close(); err != nil { + return 0, fmt.Errorf("wal segment iteration: %w", err) + } + + // Return an error if no WAL segments were found. + if n == 0 { + return 0, ErrNoWALSegments + } + return index, nil +} + +// Restore restores the database to the given index on a generation. +func Restore(ctx context.Context, client ReplicaClient, filename, generation string, snapshotIndex, targetIndex int, opt RestoreOptions) (err error) { + // Validate options. + if filename == "" { + return fmt.Errorf("restore path required") + } else if generation == "" { + return fmt.Errorf("generation required") + } else if snapshotIndex < 0 { + return fmt.Errorf("snapshot index required") + } else if targetIndex < 0 { + return fmt.Errorf("target index required") + } + + // Require a default level of parallelism. + if opt.Parallelism < 1 { + opt.Parallelism = DefaultRestoreParallelism + } + + // Ensure logger exists. + logger := opt.Logger + if logger == nil { + logger = log.New(io.Discard, "", 0) + } + + // Ensure output path does not already exist. + // If doesn't exist, also remove the journal, shm, & wal if left behind. + if _, err := os.Stat(filename); err == nil { + return fmt.Errorf("cannot restore, output path already exists: %s", filename) + } else if err != nil && !os.IsNotExist(err) { + return err + } else if err := removeDBFiles(filename); err != nil { + return err + } + + // Copy snapshot to output path. + tmpPath := filename + ".tmp" + logger.Printf("%srestoring snapshot %s/%s to %s", opt.LogPrefix, generation, FormatIndex(snapshotIndex), tmpPath) + if err := RestoreSnapshot(ctx, client, tmpPath, generation, snapshotIndex, opt.Mode, opt.Uid, opt.Gid); err != nil { + return fmt.Errorf("cannot restore snapshot: %w", err) + } + + // Download & apply all WAL files between the snapshot & the target index. + d := NewWALDownloader(client, tmpPath, generation, snapshotIndex, targetIndex) + d.Parallelism = opt.Parallelism + d.Mode = opt.Mode + d.Uid, d.Gid = opt.Uid, opt.Gid + + for { + // Read next WAL file from downloader. + walIndex, walPath, err := d.Next(ctx) + if err == io.EOF { + break + } + + // If we are only reading a single index, a WAL file may not be found. + if _, ok := err.(*WALNotFoundError); ok && snapshotIndex == targetIndex { + logger.Printf("%sno wal files found, snapshot only", opt.LogPrefix) + break + } else if err != nil { + return fmt.Errorf("cannot download WAL: %w", err) + } + + // Apply WAL file. + startTime := time.Now() + if err = ApplyWAL(ctx, tmpPath, walPath); err != nil { + return fmt.Errorf("cannot apply wal: %w", err) + } + logger.Printf("%sapplied wal %s/%s elapsed=%s", opt.LogPrefix, generation, FormatIndex(walIndex), time.Since(startTime).String()) + } + + // Copy file to final location. + logger.Printf("%srenaming database from temporary location", opt.LogPrefix) + if err := os.Rename(tmpPath, filename); err != nil { + return err + } + + return nil +} + +// RestoreOptions represents options for DB.Restore(). +type RestoreOptions struct { + // File info used for restored snapshot & WAL files. + Mode os.FileMode + Uid, Gid int + + // Specifies how many WAL files are downloaded in parallel during restore. + Parallelism int + + // Logging settings. + Logger *log.Logger + LogPrefix string +} + +// NewRestoreOptions returns a new instance of RestoreOptions with defaults. +func NewRestoreOptions() RestoreOptions { + return RestoreOptions{ + Mode: 0600, + Parallelism: DefaultRestoreParallelism, + } +} + +// RestoreSnapshot copies a snapshot from the replica client to a file. +func RestoreSnapshot(ctx context.Context, client ReplicaClient, filename, generation string, index int, mode os.FileMode, uid, gid int) error { + f, err := internal.CreateFile(filename, mode, uid, gid) + if err != nil { + return err + } + defer f.Close() + + rd, err := client.SnapshotReader(ctx, generation, index) + if err != nil { + return err + } + defer rd.Close() + + if _, err := io.Copy(f, lz4.NewReader(rd)); err != nil { + return err + } else if err := f.Sync(); err != nil { + return err + } + return f.Close() +} diff --git a/replica_client_test.go b/replica_client_test.go index 69f9746a..37f92d2d 100644 --- a/replica_client_test.go +++ b/replica_client_test.go @@ -2,572 +2,741 @@ package litestream_test import ( "context" - "flag" "fmt" - "io/ioutil" - "math/rand" "os" - "path" - "reflect" - "sort" + "path/filepath" "strings" "testing" "time" "github.com/benbjohnson/litestream" - "github.com/benbjohnson/litestream/abs" - "github.com/benbjohnson/litestream/file" - "github.com/benbjohnson/litestream/gcs" - "github.com/benbjohnson/litestream/s3" - "github.com/benbjohnson/litestream/sftp" + "github.com/benbjohnson/litestream/mock" ) -func init() { - rand.Seed(time.Now().UnixNano()) -} +func TestFindSnapshotForIndex(t *testing.T) { + t.Run("BeforeIndex", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "find-snapshot-for-index", "ok")) + if snapshotIndex, err := litestream.FindSnapshotForIndex(context.Background(), client, "0000000000000000", 0x000007d0); err != nil { + t.Fatal(err) + } else if got, want := snapshotIndex, 0x000003e8; got != want { + t.Fatalf("index=%s, want %s", litestream.FormatIndex(got), litestream.FormatIndex(want)) + } + }) -var ( - // Enables integration tests. - integration = flag.String("integration", "file", "") -) + t.Run("AtIndex", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "find-snapshot-for-index", "ok")) + if snapshotIndex, err := litestream.FindSnapshotForIndex(context.Background(), client, "0000000000000000", 0x000003e8); err != nil { + t.Fatal(err) + } else if got, want := snapshotIndex, 0x000003e8; got != want { + t.Fatalf("index=%s, want %s", litestream.FormatIndex(got), litestream.FormatIndex(want)) + } + }) -// S3 settings -var ( - // Replica client settings - s3AccessKeyID = flag.String("s3-access-key-id", os.Getenv("LITESTREAM_S3_ACCESS_KEY_ID"), "") - s3SecretAccessKey = flag.String("s3-secret-access-key", os.Getenv("LITESTREAM_S3_SECRET_ACCESS_KEY"), "") - s3Region = flag.String("s3-region", os.Getenv("LITESTREAM_S3_REGION"), "") - s3Bucket = flag.String("s3-bucket", os.Getenv("LITESTREAM_S3_BUCKET"), "") - s3Path = flag.String("s3-path", os.Getenv("LITESTREAM_S3_PATH"), "") - s3Endpoint = flag.String("s3-endpoint", os.Getenv("LITESTREAM_S3_ENDPOINT"), "") - s3ForcePathStyle = flag.Bool("s3-force-path-style", os.Getenv("LITESTREAM_S3_FORCE_PATH_STYLE") == "true", "") - s3SkipVerify = flag.Bool("s3-skip-verify", os.Getenv("LITESTREAM_S3_SKIP_VERIFY") == "true", "") -) + t.Run("ErrNoSnapshotsBeforeIndex", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "find-snapshot-for-index", "no-snapshots-before-index")) + _, err := litestream.FindSnapshotForIndex(context.Background(), client, "0000000000000000", 0x000003e8) + if err == nil || err.Error() != `no snapshots available at or before index 00000000000003e8` { + t.Fatalf("unexpected error: %#v", err) + } + }) -// Google cloud storage settings -var ( - gcsBucket = flag.String("gcs-bucket", os.Getenv("LITESTREAM_GCS_BUCKET"), "") - gcsPath = flag.String("gcs-path", os.Getenv("LITESTREAM_GCS_PATH"), "") -) + t.Run("ErrNoSnapshots", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "find-snapshot-for-index", "no-snapshots")) + _, err := litestream.FindSnapshotForIndex(context.Background(), client, "0000000000000000", 0x000003e8) + if err != litestream.ErrNoSnapshots { + t.Fatalf("unexpected error: %#v", err) + } + }) -// Azure blob storage settings -var ( - absAccountName = flag.String("abs-account-name", os.Getenv("LITESTREAM_ABS_ACCOUNT_NAME"), "") - absAccountKey = flag.String("abs-account-key", os.Getenv("LITESTREAM_ABS_ACCOUNT_KEY"), "") - absBucket = flag.String("abs-bucket", os.Getenv("LITESTREAM_ABS_BUCKET"), "") - absPath = flag.String("abs-path", os.Getenv("LITESTREAM_ABS_PATH"), "") -) + t.Run("ErrSnapshots", func(t *testing.T) { + var client mock.ReplicaClient + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return nil, fmt.Errorf("marker") + } + _, err := litestream.FindSnapshotForIndex(context.Background(), &client, "0000000000000000", 0x000003e8) + if err == nil || err.Error() != `snapshots: marker` { + t.Fatalf("unexpected error: %#v", err) + } + }) -// SFTP settings -var ( - sftpHost = flag.String("sftp-host", os.Getenv("LITESTREAM_SFTP_HOST"), "") - sftpUser = flag.String("sftp-user", os.Getenv("LITESTREAM_SFTP_USER"), "") - sftpPassword = flag.String("sftp-password", os.Getenv("LITESTREAM_SFTP_PASSWORD"), "") - sftpKeyPath = flag.String("sftp-key-path", os.Getenv("LITESTREAM_SFTP_KEY_PATH"), "") - sftpPath = flag.String("sftp-path", os.Getenv("LITESTREAM_SFTP_PATH"), "") -) + t.Run("ErrSnapshotIterator", func(t *testing.T) { + var itr mock.SnapshotIterator + itr.NextFunc = func() bool { return false } + itr.CloseFunc = func() error { return fmt.Errorf("marker") } -func TestReplicaClient_Generations(t *testing.T) { - RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() + var client mock.ReplicaClient + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return &itr, nil + } - // Write snapshots. - if _, err := c.WriteSnapshot(context.Background(), "5efbd8d042012dca", 0, strings.NewReader(`foo`)); err != nil { - t.Fatal(err) - } else if _, err := c.WriteSnapshot(context.Background(), "b16ddcf5c697540f", 0, strings.NewReader(`bar`)); err != nil { - t.Fatal(err) - } else if _, err := c.WriteSnapshot(context.Background(), "155fe292f8333c72", 0, strings.NewReader(`baz`)); err != nil { - t.Fatal(err) + _, err := litestream.FindSnapshotForIndex(context.Background(), &client, "0000000000000000", 0x000003e8) + if err == nil || err.Error() != `snapshot iteration: marker` { + t.Fatalf("unexpected error: %#v", err) } + }) +} - // Verify returned generations. - if got, err := c.Generations(context.Background()); err != nil { +func TestSnapshotTimeBounds(t *testing.T) { + t.Run("OK", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "snapshot-time-bounds", "ok")) + if min, max, err := litestream.SnapshotTimeBounds(context.Background(), client, "0000000000000000"); err != nil { t.Fatal(err) - } else if want := []string{"155fe292f8333c72", "5efbd8d042012dca", "b16ddcf5c697540f"}; !reflect.DeepEqual(got, want) { - t.Fatalf("Generations()=%v, want %v", got, want) + } else if got, want := min, time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC); !got.Equal(want) { + t.Fatalf("min=%s, want %s", got, want) + } else if got, want := max, time.Date(2000, 1, 3, 0, 0, 0, 0, time.UTC); !got.Equal(want) { + t.Fatalf("max=%s, want %s", got, want) + } + }) + + t.Run("ErrNoSnapshots", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "snapshot-time-bounds", "no-snapshots")) + if _, _, err := litestream.SnapshotTimeBounds(context.Background(), client, "0000000000000000"); err != litestream.ErrNoSnapshots { + t.Fatalf("unexpected error: %#v", err) } }) - RunWithReplicaClient(t, "NoGenerationsDir", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() + t.Run("ErrSnapshots", func(t *testing.T) { + var client mock.ReplicaClient + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return nil, fmt.Errorf("marker") + } - if generations, err := c.Generations(context.Background()); err != nil { - t.Fatal(err) - } else if got, want := len(generations), 0; got != want { - t.Fatalf("len(Generations())=%v, want %v", got, want) + _, _, err := litestream.SnapshotTimeBounds(context.Background(), &client, "0000000000000000") + if err == nil || err.Error() != `snapshots: marker` { + t.Fatalf("unexpected error: %s", err) } }) -} -func TestReplicaClient_Snapshots(t *testing.T) { - RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() + t.Run("ErrSnapshotIterator", func(t *testing.T) { + var itr mock.SnapshotIterator + itr.NextFunc = func() bool { return false } + itr.CloseFunc = func() error { return fmt.Errorf("marker") } - // Write snapshots. - if _, err := c.WriteSnapshot(context.Background(), "5efbd8d042012dca", 1, strings.NewReader(``)); err != nil { - t.Fatal(err) - } else if _, err := c.WriteSnapshot(context.Background(), "b16ddcf5c697540f", 5, strings.NewReader(`x`)); err != nil { - t.Fatal(err) - } else if _, err := c.WriteSnapshot(context.Background(), "b16ddcf5c697540f", 10, strings.NewReader(`xyz`)); err != nil { - t.Fatal(err) + var client mock.ReplicaClient + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return &itr, nil } - // Fetch all snapshots by generation. - itr, err := c.Snapshots(context.Background(), "b16ddcf5c697540f") - if err != nil { - t.Fatal(err) + _, _, err := litestream.SnapshotTimeBounds(context.Background(), &client, "0000000000000000") + if err == nil || err.Error() != `snapshot iteration: marker` { + t.Fatalf("unexpected error: %s", err) } - defer itr.Close() + }) +} - // Read all snapshots into a slice so they can be sorted. - a, err := litestream.SliceSnapshotIterator(itr) - if err != nil { - t.Fatal(err) - } else if got, want := len(a), 2; got != want { - t.Fatalf("len=%v, want %v", got, want) - } - sort.Sort(litestream.SnapshotInfoSlice(a)) - - // Verify first snapshot metadata. - if got, want := a[0].Generation, "b16ddcf5c697540f"; got != want { - t.Fatalf("Generation=%v, want %v", got, want) - } else if got, want := a[0].Index, 5; got != want { - t.Fatalf("Index=%v, want %v", got, want) - } else if got, want := a[0].Size, int64(1); got != want { - t.Fatalf("Size=%v, want %v", got, want) - } else if a[0].CreatedAt.IsZero() { - t.Fatalf("expected CreatedAt") - } - - // Verify second snapshot metadata. - if got, want := a[1].Generation, "b16ddcf5c697540f"; got != want { - t.Fatalf("Generation=%v, want %v", got, want) - } else if got, want := a[1].Index, 0xA; got != want { - t.Fatalf("Index=%v, want %v", got, want) - } else if got, want := a[1].Size, int64(3); got != want { - t.Fatalf("Size=%v, want %v", got, want) - } else if a[1].CreatedAt.IsZero() { - t.Fatalf("expected CreatedAt") - } - - // Ensure close is clean. - if err := itr.Close(); err != nil { +func TestWALTimeBounds(t *testing.T) { + t.Run("OK", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "wal-time-bounds", "ok")) + if min, max, err := litestream.WALTimeBounds(context.Background(), client, "0000000000000000"); err != nil { t.Fatal(err) + } else if got, want := min, time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC); !got.Equal(want) { + t.Fatalf("min=%s, want %s", got, want) + } else if got, want := max, time.Date(2000, 1, 3, 0, 0, 0, 0, time.UTC); !got.Equal(want) { + t.Fatalf("max=%s, want %s", got, want) } }) - RunWithReplicaClient(t, "NoGenerationDir", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() + t.Run("ErrNoWALSegments", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "wal-time-bounds", "no-wal-segments")) + if _, _, err := litestream.WALTimeBounds(context.Background(), client, "0000000000000000"); err != litestream.ErrNoWALSegments { + t.Fatalf("unexpected error: %#v", err) + } + }) - itr, err := c.Snapshots(context.Background(), "5efbd8d042012dca") - if err != nil { - t.Fatal(err) + t.Run("ErrWALSegments", func(t *testing.T) { + var client mock.ReplicaClient + client.WALSegmentsFunc = func(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) { + return nil, fmt.Errorf("marker") } - defer itr.Close() - if itr.Next() { - t.Fatal("expected no snapshots") + _, _, err := litestream.WALTimeBounds(context.Background(), &client, "0000000000000000") + if err == nil || err.Error() != `wal segments: marker` { + t.Fatalf("unexpected error: %s", err) } }) - RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() + t.Run("ErrWALSegmentIterator", func(t *testing.T) { + var itr mock.WALSegmentIterator + itr.NextFunc = func() bool { return false } + itr.CloseFunc = func() error { return fmt.Errorf("marker") } - itr, err := c.Snapshots(context.Background(), "") - if err == nil { - err = itr.Close() + var client mock.ReplicaClient + client.WALSegmentsFunc = func(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) { + return &itr, nil } - if err == nil || err.Error() != `cannot determine snapshots path: generation required` { - t.Fatalf("unexpected error: %v", err) + + _, _, err := litestream.WALTimeBounds(context.Background(), &client, "0000000000000000") + if err == nil || err.Error() != `wal segment iteration: marker` { + t.Fatalf("unexpected error: %s", err) } }) } -func TestReplicaClient_WriteSnapshot(t *testing.T) { - RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() - - if _, err := c.WriteSnapshot(context.Background(), "b16ddcf5c697540f", 1000, strings.NewReader(`foobar`)); err != nil { +func TestGenerationTimeBounds(t *testing.T) { + t.Run("OK", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "generation-time-bounds", "ok")) + if min, max, err := litestream.GenerationTimeBounds(context.Background(), client, "0000000000000000"); err != nil { t.Fatal(err) + } else if got, want := min, time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC); !got.Equal(want) { + t.Fatalf("min=%s, want %s", got, want) + } else if got, want := max, time.Date(2000, 1, 3, 0, 0, 0, 0, time.UTC); !got.Equal(want) { + t.Fatalf("max=%s, want %s", got, want) } + }) - if r, err := c.SnapshotReader(context.Background(), "b16ddcf5c697540f", 1000); err != nil { - t.Fatal(err) - } else if buf, err := ioutil.ReadAll(r); err != nil { + t.Run("SnapshotsOnly", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "generation-time-bounds", "snapshots-only")) + if min, max, err := litestream.GenerationTimeBounds(context.Background(), client, "0000000000000000"); err != nil { t.Fatal(err) - } else if err := r.Close(); err != nil { - t.Fatal(err) - } else if got, want := string(buf), `foobar`; got != want { - t.Fatalf("data=%q, want %q", got, want) + } else if got, want := min, time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC); !got.Equal(want) { + t.Fatalf("min=%s, want %s", got, want) + } else if got, want := max, time.Date(2000, 1, 2, 0, 0, 0, 0, time.UTC); !got.Equal(want) { + t.Fatalf("max=%s, want %s", got, want) } }) - RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() - if _, err := c.WriteSnapshot(context.Background(), "", 0, nil); err == nil || err.Error() != `cannot determine snapshot path: generation required` { - t.Fatalf("unexpected error: %v", err) + t.Run("ErrNoSnapshots", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "generation-time-bounds", "no-snapshots")) + if _, _, err := litestream.GenerationTimeBounds(context.Background(), client, "0000000000000000"); err != litestream.ErrNoSnapshots { + t.Fatalf("unexpected error: %#v", err) } }) -} -func TestReplicaClient_SnapshotReader(t *testing.T) { - RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() + t.Run("ErrWALSegments", func(t *testing.T) { + var snapshotN int + var itr mock.SnapshotIterator + itr.NextFunc = func() bool { + snapshotN++ + return snapshotN == 1 + } + itr.SnapshotFunc = func() litestream.SnapshotInfo { + return litestream.SnapshotInfo{CreatedAt: time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)} + } + itr.CloseFunc = func() error { return nil } - if _, err := c.WriteSnapshot(context.Background(), "5efbd8d042012dca", 10, strings.NewReader(`foo`)); err != nil { - t.Fatal(err) + var client mock.ReplicaClient + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return &itr, nil + } + client.WALSegmentsFunc = func(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) { + return nil, fmt.Errorf("marker") } - r, err := c.SnapshotReader(context.Background(), "5efbd8d042012dca", 10) - if err != nil { - t.Fatal(err) + _, _, err := litestream.GenerationTimeBounds(context.Background(), &client, "0000000000000000") + if err == nil || err.Error() != `wal segments: marker` { + t.Fatalf("unexpected error: %s", err) } - defer r.Close() + }) +} - if buf, err := ioutil.ReadAll(r); err != nil { +func TestFindLatestGeneration(t *testing.T) { + t.Run("OK", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "find-latest-generation", "ok")) + if generation, err := litestream.FindLatestGeneration(context.Background(), client); err != nil { t.Fatal(err) - } else if got, want := string(buf), "foo"; got != want { - t.Fatalf("ReadAll=%v, want %v", got, want) + } else if got, want := generation, "0000000000000001"; got != want { + t.Fatalf("generation=%s, want %s", got, want) + } + }) + + t.Run("ErrNoSnapshots", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "find-latest-generation", "no-generations")) + if generation, err := litestream.FindLatestGeneration(context.Background(), client); err != litestream.ErrNoGeneration { + t.Fatalf("unexpected error: %s", err) + } else if got, want := generation, ""; got != want { + t.Fatalf("generation=%s, want %s", got, want) } }) - RunWithReplicaClient(t, "ErrNotFound", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() + t.Run("ErrGenerations", func(t *testing.T) { + var client mock.ReplicaClient + client.GenerationsFunc = func(ctx context.Context) ([]string, error) { + return nil, fmt.Errorf("marker") + } - if _, err := c.SnapshotReader(context.Background(), "5efbd8d042012dca", 1); !os.IsNotExist(err) { - t.Fatalf("expected not exist, got %#v", err) + _, err := litestream.FindLatestGeneration(context.Background(), &client) + if err == nil || err.Error() != `generations: marker` { + t.Fatalf("unexpected error: %s", err) } }) - RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() + t.Run("ErrSnapshots", func(t *testing.T) { + var client mock.ReplicaClient + client.GenerationsFunc = func(ctx context.Context) ([]string, error) { + return []string{"0000000000000000"}, nil + } + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return nil, fmt.Errorf("marker") + } - if _, err := c.SnapshotReader(context.Background(), "", 1); err == nil || err.Error() != `cannot determine snapshot path: generation required` { - t.Fatalf("unexpected error: %v", err) + _, err := litestream.FindLatestGeneration(context.Background(), &client) + if err == nil || err.Error() != `generation time bounds: snapshots: marker` { + t.Fatalf("unexpected error: %s", err) } }) } -func TestReplicaClient_WALs(t *testing.T) { - RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() - - if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 1, Offset: 0}, strings.NewReader(``)); err != nil { +func TestReplicaClientTimeBounds(t *testing.T) { + t.Run("OK", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "find-latest-generation", "ok")) + if min, max, err := litestream.ReplicaClientTimeBounds(context.Background(), client); err != nil { t.Fatal(err) + } else if got, want := min, time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC); !got.Equal(want) { + t.Fatalf("min=%s, want %s", got, want) + } else if got, want := max, time.Date(2000, 1, 3, 0, 0, 0, 0, time.UTC); !got.Equal(want) { + t.Fatalf("max=%s, want %s", got, want) } - if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 2, Offset: 0}, strings.NewReader(`12345`)); err != nil { - t.Fatal(err) - } else if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 2, Offset: 5}, strings.NewReader(`67`)); err != nil { - t.Fatal(err) - } else if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 3, Offset: 0}, strings.NewReader(`xyz`)); err != nil { - t.Fatal(err) + }) + + t.Run("ErrNoGeneration", func(t *testing.T) { + var client mock.ReplicaClient + client.GenerationsFunc = func(ctx context.Context) ([]string, error) { + return nil, nil } - itr, err := c.WALSegments(context.Background(), "b16ddcf5c697540f") - if err != nil { - t.Fatal(err) + _, _, err := litestream.ReplicaClientTimeBounds(context.Background(), &client) + if err != litestream.ErrNoGeneration { + t.Fatalf("unexpected error: %s", err) } - defer itr.Close() + }) - // Read all WAL segment files into a slice so they can be sorted. - a, err := litestream.SliceWALSegmentIterator(itr) - if err != nil { - t.Fatal(err) - } else if got, want := len(a), 3; got != want { - t.Fatalf("len=%v, want %v", got, want) - } - sort.Sort(litestream.WALSegmentInfoSlice(a)) - - // Verify first WAL segment metadata. - if got, want := a[0].Generation, "b16ddcf5c697540f"; got != want { - t.Fatalf("Generation=%v, want %v", got, want) - } else if got, want := a[0].Index, 2; got != want { - t.Fatalf("Index=%v, want %v", got, want) - } else if got, want := a[0].Offset, int64(0); got != want { - t.Fatalf("Offset=%v, want %v", got, want) - } else if got, want := a[0].Size, int64(5); got != want { - t.Fatalf("Size=%v, want %v", got, want) - } else if a[0].CreatedAt.IsZero() { - t.Fatalf("expected CreatedAt") - } - - // Verify first WAL segment metadata. - if got, want := a[1].Generation, "b16ddcf5c697540f"; got != want { - t.Fatalf("Generation=%v, want %v", got, want) - } else if got, want := a[1].Index, 2; got != want { - t.Fatalf("Index=%v, want %v", got, want) - } else if got, want := a[1].Offset, int64(5); got != want { - t.Fatalf("Offset=%v, want %v", got, want) - } else if got, want := a[1].Size, int64(2); got != want { - t.Fatalf("Size=%v, want %v", got, want) - } else if a[1].CreatedAt.IsZero() { - t.Fatalf("expected CreatedAt") - } - - // Verify third WAL segment metadata. - if got, want := a[2].Generation, "b16ddcf5c697540f"; got != want { - t.Fatalf("Generation=%v, want %v", got, want) - } else if got, want := a[2].Index, 3; got != want { - t.Fatalf("Index=%v, want %v", got, want) - } else if got, want := a[2].Offset, int64(0); got != want { - t.Fatalf("Offset=%v, want %v", got, want) - } else if got, want := a[2].Size, int64(3); got != want { - t.Fatalf("Size=%v, want %v", got, want) - } else if a[1].CreatedAt.IsZero() { - t.Fatalf("expected CreatedAt") - } - - // Ensure close is clean. - if err := itr.Close(); err != nil { - t.Fatal(err) + t.Run("ErrGenerations", func(t *testing.T) { + var client mock.ReplicaClient + client.GenerationsFunc = func(ctx context.Context) ([]string, error) { + return nil, fmt.Errorf("marker") + } + + _, _, err := litestream.ReplicaClientTimeBounds(context.Background(), &client) + if err == nil || err.Error() != `generations: marker` { + t.Fatalf("unexpected error: %s", err) } }) - RunWithReplicaClient(t, "NoGenerationDir", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() + t.Run("ErrSnapshots", func(t *testing.T) { + var client mock.ReplicaClient + client.GenerationsFunc = func(ctx context.Context) ([]string, error) { + return []string{"0000000000000000"}, nil + } + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return nil, fmt.Errorf("marker") + } + + _, _, err := litestream.ReplicaClientTimeBounds(context.Background(), &client) + if err == nil || err.Error() != `generation time bounds: snapshots: marker` { + t.Fatalf("unexpected error: %s", err) + } + }) +} - itr, err := c.WALSegments(context.Background(), "5efbd8d042012dca") - if err != nil { +func TestFindMaxSnapshotIndexByGeneration(t *testing.T) { + t.Run("OK", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "max-snapshot-index", "ok")) + if index, err := litestream.FindMaxSnapshotIndexByGeneration(context.Background(), client, "0000000000000000"); err != nil { t.Fatal(err) + } else if got, want := index, 0x000007d0; got != want { + t.Fatalf("index=%d, want %d", got, want) } - defer itr.Close() + }) - if itr.Next() { - t.Fatal("expected no wal files") + t.Run("ErrNoSnapshots", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "max-snapshot-index", "no-snapshots")) + + _, err := litestream.FindMaxSnapshotIndexByGeneration(context.Background(), client, "0000000000000000") + if err != litestream.ErrNoSnapshots { + t.Fatalf("unexpected error: %s", err) } }) - RunWithReplicaClient(t, "NoWALs", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() + t.Run("ErrSnapshots", func(t *testing.T) { + var client mock.ReplicaClient + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return nil, fmt.Errorf("marker") + } - if _, err := c.WriteSnapshot(context.Background(), "5efbd8d042012dca", 0, strings.NewReader(`foo`)); err != nil { - t.Fatal(err) + _, err := litestream.FindMaxSnapshotIndexByGeneration(context.Background(), &client, "0000000000000000") + if err == nil || err.Error() != `snapshots: marker` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrSnapshotIteration", func(t *testing.T) { + var itr mock.SnapshotIterator + itr.NextFunc = func() bool { return false } + itr.CloseFunc = func() error { return fmt.Errorf("marker") } + + var client mock.ReplicaClient + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return &itr, nil } - itr, err := c.WALSegments(context.Background(), "5efbd8d042012dca") - if err != nil { + _, err := litestream.FindMaxSnapshotIndexByGeneration(context.Background(), &client, "0000000000000000") + if err == nil || err.Error() != `snapshot iteration: marker` { + t.Fatalf("unexpected error: %s", err) + } + }) +} + +func TestFindMaxWALIndexByGeneration(t *testing.T) { + t.Run("OK", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "max-wal-index", "ok")) + if index, err := litestream.FindMaxWALIndexByGeneration(context.Background(), client, "0000000000000000"); err != nil { t.Fatal(err) + } else if got, want := index, 1; got != want { + t.Fatalf("index=%d, want %d", got, want) + } + }) + + t.Run("ErrNoWALSegments", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "max-wal-index", "no-wal")) + + _, err := litestream.FindMaxWALIndexByGeneration(context.Background(), client, "0000000000000000") + if err != litestream.ErrNoWALSegments { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrWALSegments", func(t *testing.T) { + var client mock.ReplicaClient + client.WALSegmentsFunc = func(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) { + return nil, fmt.Errorf("marker") } - defer itr.Close() - if itr.Next() { - t.Fatal("expected no wal files") + _, err := litestream.FindMaxWALIndexByGeneration(context.Background(), &client, "0000000000000000") + if err == nil || err.Error() != `wal segments: marker` { + t.Fatalf("unexpected error: %s", err) } }) - RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() + t.Run("ErrWALSegmentIteration", func(t *testing.T) { + var itr mock.WALSegmentIterator + itr.NextFunc = func() bool { return false } + itr.CloseFunc = func() error { return fmt.Errorf("marker") } - itr, err := c.WALSegments(context.Background(), "") - if err == nil { - err = itr.Close() + var client mock.ReplicaClient + client.WALSegmentsFunc = func(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) { + return &itr, nil } - if err == nil || err.Error() != `cannot determine wal path: generation required` { - t.Fatalf("unexpected error: %v", err) + + _, err := litestream.FindMaxWALIndexByGeneration(context.Background(), &client, "0000000000000000") + if err == nil || err.Error() != `wal segment iteration: marker` { + t.Fatalf("unexpected error: %s", err) } }) } -func TestReplicaClient_WriteWALSegment(t *testing.T) { - RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() - - if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1000, Offset: 2000}, strings.NewReader(`foobar`)); err != nil { +func TestFindMaxIndexByGeneration(t *testing.T) { + t.Run("OK", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "max-index", "ok")) + if index, err := litestream.FindMaxIndexByGeneration(context.Background(), client, "0000000000000000"); err != nil { t.Fatal(err) + } else if got, want := index, 0x00000002; got != want { + t.Fatalf("index=%d, want %d", got, want) } + }) - if r, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1000, Offset: 2000}); err != nil { - t.Fatal(err) - } else if buf, err := ioutil.ReadAll(r); err != nil { + t.Run("NoWAL", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "max-index", "no-wal")) + if index, err := litestream.FindMaxIndexByGeneration(context.Background(), client, "0000000000000000"); err != nil { t.Fatal(err) - } else if err := r.Close(); err != nil { + } else if got, want := index, 0x00000001; got != want { + t.Fatalf("index=%d, want %d", got, want) + } + }) + + t.Run("SnapshotLaterThanWAL", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "max-index", "snapshot-later-than-wal")) + if index, err := litestream.FindMaxIndexByGeneration(context.Background(), client, "0000000000000000"); err != nil { t.Fatal(err) - } else if got, want := string(buf), `foobar`; got != want { - t.Fatalf("data=%q, want %q", got, want) + } else if got, want := index, 0x00000001; got != want { + t.Fatalf("index=%d, want %d", got, want) } }) - RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() - if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "", Index: 0, Offset: 0}, nil); err == nil || err.Error() != `cannot determine wal segment path: generation required` { - t.Fatalf("unexpected error: %v", err) + t.Run("ErrNoSnapshots", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "max-index", "no-snapshots")) + + _, err := litestream.FindMaxIndexByGeneration(context.Background(), client, "0000000000000000") + if err != litestream.ErrNoSnapshots { + t.Fatalf("unexpected error: %s", err) } }) -} -func TestReplicaClient_WALReader(t *testing.T) { + t.Run("ErrSnapshots", func(t *testing.T) { + var client mock.ReplicaClient + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return nil, fmt.Errorf("marker") + } - RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() - if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 10, Offset: 5}, strings.NewReader(`foobar`)); err != nil { - t.Fatal(err) + _, err := litestream.FindMaxIndexByGeneration(context.Background(), &client, "0000000000000000") + if err == nil || err.Error() != `max snapshot index: snapshots: marker` { + t.Fatalf("unexpected error: %s", err) } + }) - r, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 10, Offset: 5}) - if err != nil { - t.Fatal(err) + t.Run("ErrWALSegments", func(t *testing.T) { + var client mock.ReplicaClient + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return litestream.NewSnapshotInfoSliceIterator([]litestream.SnapshotInfo{{Index: 0x00000001}}), nil + } + client.WALSegmentsFunc = func(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) { + return nil, fmt.Errorf("marker") } - defer r.Close() - if buf, err := ioutil.ReadAll(r); err != nil { + _, err := litestream.FindMaxIndexByGeneration(context.Background(), &client, "0000000000000000") + if err == nil || err.Error() != `max wal index: wal segments: marker` { + t.Fatalf("unexpected error: %s", err) + } + }) +} + +func TestFindSnapshotIndexByTimestamp(t *testing.T) { + t.Run("OK", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "snapshot-index-by-timestamp", "ok")) + if index, err := litestream.FindSnapshotIndexByTimestamp(context.Background(), client, "0000000000000000", time.Date(2000, 1, 3, 0, 0, 0, 0, time.UTC)); err != nil { t.Fatal(err) - } else if got, want := string(buf), "foobar"; got != want { - t.Fatalf("ReadAll=%v, want %v", got, want) + } else if got, want := index, 0x000007d0; got != want { + t.Fatalf("index=%d, want %d", got, want) } }) - RunWithReplicaClient(t, "ErrNotFound", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() + t.Run("ErrNoSnapshots", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "snapshot-index-by-timestamp", "no-snapshots")) - if _, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 1, Offset: 0}); !os.IsNotExist(err) { - t.Fatalf("expected not exist, got %#v", err) + _, err := litestream.FindSnapshotIndexByTimestamp(context.Background(), client, "0000000000000000", time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)) + if err != litestream.ErrNoSnapshots { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrSnapshots", func(t *testing.T) { + var client mock.ReplicaClient + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return nil, fmt.Errorf("marker") + } + + _, err := litestream.FindSnapshotIndexByTimestamp(context.Background(), &client, "0000000000000000", time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)) + if err == nil || err.Error() != `snapshots: marker` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrSnapshotIteration", func(t *testing.T) { + var itr mock.SnapshotIterator + itr.NextFunc = func() bool { return false } + itr.CloseFunc = func() error { return fmt.Errorf("marker") } + + var client mock.ReplicaClient + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return &itr, nil + } + + _, err := litestream.FindSnapshotIndexByTimestamp(context.Background(), &client, "0000000000000000", time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)) + if err == nil || err.Error() != `snapshot iteration: marker` { + t.Fatalf("unexpected error: %s", err) } }) } -func TestReplicaClient_DeleteWALSegments(t *testing.T) { - RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() +func TestFindWALIndexByTimestamp(t *testing.T) { + t.Run("OK", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "wal-index-by-timestamp", "ok")) + if index, err := litestream.FindWALIndexByTimestamp(context.Background(), client, "0000000000000000", time.Date(2000, 1, 3, 0, 0, 0, 0, time.UTC)); err != nil { + t.Fatal(err) + } else if got, want := index, 1; got != want { + t.Fatalf("index=%d, want %d", got, want) + } + }) - if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1, Offset: 2}, strings.NewReader(`foo`)); err != nil { + t.Run("ErrNoWALSegments", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "wal-index-by-timestamp", "no-wal")) + + _, err := litestream.FindWALIndexByTimestamp(context.Background(), client, "0000000000000000", time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)) + if err != litestream.ErrNoWALSegments { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrWALSegments", func(t *testing.T) { + var client mock.ReplicaClient + client.WALSegmentsFunc = func(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) { + return nil, fmt.Errorf("marker") + } + + _, err := litestream.FindWALIndexByTimestamp(context.Background(), &client, "0000000000000000", time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)) + if err == nil || err.Error() != `wal segments: marker` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrWALSegmentIteration", func(t *testing.T) { + var itr mock.WALSegmentIterator + itr.NextFunc = func() bool { return false } + itr.CloseFunc = func() error { return fmt.Errorf("marker") } + + var client mock.ReplicaClient + client.WALSegmentsFunc = func(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) { + return &itr, nil + } + + _, err := litestream.FindWALIndexByTimestamp(context.Background(), &client, "0000000000000000", time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)) + if err == nil || err.Error() != `wal segment iteration: marker` { + t.Fatalf("unexpected error: %s", err) + } + }) +} + +func TestFindIndexByTimestamp(t *testing.T) { + t.Run("OK", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "index-by-timestamp", "ok")) + if index, err := litestream.FindIndexByTimestamp(context.Background(), client, "0000000000000000", time.Date(2000, 1, 4, 0, 0, 0, 0, time.UTC)); err != nil { t.Fatal(err) - } else if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 3, Offset: 4}, strings.NewReader(`bar`)); err != nil { + } else if got, want := index, 0x00000002; got != want { + t.Fatalf("index=%d, want %d", got, want) + } + }) + + t.Run("NoWAL", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "index-by-timestamp", "no-wal")) + if index, err := litestream.FindIndexByTimestamp(context.Background(), client, "0000000000000000", time.Date(2000, 1, 2, 0, 0, 0, 0, time.UTC)); err != nil { t.Fatal(err) + } else if got, want := index, 0x00000001; got != want { + t.Fatalf("index=%d, want %d", got, want) } + }) - if err := c.DeleteWALSegments(context.Background(), []litestream.Pos{ - {Generation: "b16ddcf5c697540f", Index: 1, Offset: 2}, - {Generation: "5efbd8d042012dca", Index: 3, Offset: 4}, - }); err != nil { + t.Run("SnapshotLaterThanWAL", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "index-by-timestamp", "snapshot-later-than-wal")) + if index, err := litestream.FindIndexByTimestamp(context.Background(), client, "0000000000000000", time.Date(2000, 1, 3, 0, 0, 0, 0, time.UTC)); err != nil { t.Fatal(err) + } else if got, want := index, 0x00000001; got != want { + t.Fatalf("index=%d, want %d", got, want) } + }) + + t.Run("ErrNoSnapshots", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "index-by-timestamp", "no-snapshots")) - if _, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1, Offset: 2}); !os.IsNotExist(err) { - t.Fatalf("expected not exist, got %#v", err) - } else if _, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 3, Offset: 4}); !os.IsNotExist(err) { - t.Fatalf("expected not exist, got %#v", err) + _, err := litestream.FindIndexByTimestamp(context.Background(), client, "0000000000000000", time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)) + if err != litestream.ErrNoSnapshots { + t.Fatalf("unexpected error: %s", err) } }) - RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() - if err := c.DeleteWALSegments(context.Background(), []litestream.Pos{{}}); err == nil || err.Error() != `cannot determine wal segment path: generation required` { - t.Fatalf("unexpected error: %v", err) + t.Run("ErrSnapshots", func(t *testing.T) { + var client mock.ReplicaClient + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return nil, fmt.Errorf("marker") + } + + _, err := litestream.FindIndexByTimestamp(context.Background(), &client, "0000000000000000", time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)) + if err == nil || err.Error() != `max snapshot index: snapshots: marker` { + t.Fatalf("unexpected error: %s", err) } }) -} -// RunWithReplicaClient executes fn with each replica specified by the -integration flag -func RunWithReplicaClient(t *testing.T, name string, fn func(*testing.T, litestream.ReplicaClient)) { - t.Run(name, func(t *testing.T) { - for _, typ := range strings.Split(*integration, ",") { - t.Run(typ, func(t *testing.T) { - c := NewReplicaClient(t, typ) - defer MustDeleteAll(t, c) + t.Run("ErrWALSegments", func(t *testing.T) { + var client mock.ReplicaClient + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return litestream.NewSnapshotInfoSliceIterator([]litestream.SnapshotInfo{{Index: 0x00000001}}), nil + } + client.WALSegmentsFunc = func(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) { + return nil, fmt.Errorf("marker") + } - fn(t, c) - }) + _, err := litestream.FindIndexByTimestamp(context.Background(), &client, "0000000000000000", time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)) + if err == nil || err.Error() != `max wal index: wal segments: marker` { + t.Fatalf("unexpected error: %s", err) } }) } -// NewReplicaClient returns a new client for integration testing by type name. -func NewReplicaClient(tb testing.TB, typ string) litestream.ReplicaClient { - tb.Helper() - - switch typ { - case file.ReplicaClientType: - return NewFileReplicaClient(tb) - case s3.ReplicaClientType: - return NewS3ReplicaClient(tb) - case gcs.ReplicaClientType: - return NewGCSReplicaClient(tb) - case abs.ReplicaClientType: - return NewABSReplicaClient(tb) - case sftp.ReplicaClientType: - return NewSFTPReplicaClient(tb) - default: - tb.Fatalf("invalid replica client type: %q", typ) - return nil - } -} +func TestRestore(t *testing.T) { + t.Run("OK", func(t *testing.T) { + testDir := filepath.Join("testdata", "restore", "ok") + tempDir := t.TempDir() -// NewFileReplicaClient returns a new client for integration testing. -func NewFileReplicaClient(tb testing.TB) *file.ReplicaClient { - tb.Helper() - return file.NewReplicaClient(tb.TempDir()) -} + client := litestream.NewFileReplicaClient(testDir) + if err := litestream.Restore(context.Background(), client, filepath.Join(tempDir, "db"), "0000000000000000", 0, 2, litestream.NewRestoreOptions()); err != nil { + t.Fatal(err) + } else if !fileEqual(t, filepath.Join(testDir, "0000000000000002.db"), filepath.Join(tempDir, "db")) { + t.Fatalf("file mismatch") + } + }) -// NewS3ReplicaClient returns a new client for integration testing. -func NewS3ReplicaClient(tb testing.TB) *s3.ReplicaClient { - tb.Helper() - - c := s3.NewReplicaClient() - c.AccessKeyID = *s3AccessKeyID - c.SecretAccessKey = *s3SecretAccessKey - c.Region = *s3Region - c.Bucket = *s3Bucket - c.Path = path.Join(*s3Path, fmt.Sprintf("%016x", rand.Uint64())) - c.Endpoint = *s3Endpoint - c.ForcePathStyle = *s3ForcePathStyle - c.SkipVerify = *s3SkipVerify - return c -} + t.Run("SnapshotOnly", func(t *testing.T) { + testDir := filepath.Join("testdata", "restore", "snapshot-only") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + if err := litestream.Restore(context.Background(), client, filepath.Join(tempDir, "db"), "0000000000000000", 0, 0, litestream.NewRestoreOptions()); err != nil { + t.Fatal(err) + } else if !fileEqual(t, filepath.Join(testDir, "0000000000000000.db"), filepath.Join(tempDir, "db")) { + t.Fatalf("file mismatch") + } + }) -// NewGCSReplicaClient returns a new client for integration testing. -func NewGCSReplicaClient(tb testing.TB) *gcs.ReplicaClient { - tb.Helper() + t.Run("DefaultParallelism", func(t *testing.T) { + testDir := filepath.Join("testdata", "restore", "ok") + tempDir := t.TempDir() - c := gcs.NewReplicaClient() - c.Bucket = *gcsBucket - c.Path = path.Join(*gcsPath, fmt.Sprintf("%016x", rand.Uint64())) - return c -} + client := litestream.NewFileReplicaClient(testDir) + opt := litestream.NewRestoreOptions() + opt.Parallelism = 0 + if err := litestream.Restore(context.Background(), client, filepath.Join(tempDir, "db"), "0000000000000000", 0, 2, opt); err != nil { + t.Fatal(err) + } else if !fileEqual(t, filepath.Join(testDir, "0000000000000002.db"), filepath.Join(tempDir, "db")) { + t.Fatalf("file mismatch") + } + }) -// NewABSReplicaClient returns a new client for integration testing. -func NewABSReplicaClient(tb testing.TB) *abs.ReplicaClient { - tb.Helper() + t.Run("ErrPathRequired", func(t *testing.T) { + var client mock.ReplicaClient + if err := litestream.Restore(context.Background(), &client, "", "0000000000000000", 0, 0, litestream.NewRestoreOptions()); err == nil || err.Error() != `restore path required` { + t.Fatalf("unexpected error: %#v", err) + } + }) - c := abs.NewReplicaClient() - c.AccountName = *absAccountName - c.AccountKey = *absAccountKey - c.Bucket = *absBucket - c.Path = path.Join(*absPath, fmt.Sprintf("%016x", rand.Uint64())) - return c -} + t.Run("ErrGenerationRequired", func(t *testing.T) { + var client mock.ReplicaClient + if err := litestream.Restore(context.Background(), &client, t.TempDir(), "", 0, 0, litestream.NewRestoreOptions()); err == nil || err.Error() != `generation required` { + t.Fatalf("unexpected error: %#v", err) + } + }) -// NewSFTPReplicaClient returns a new client for integration testing. -func NewSFTPReplicaClient(tb testing.TB) *sftp.ReplicaClient { - tb.Helper() - - c := sftp.NewReplicaClient() - c.Host = *sftpHost - c.User = *sftpUser - c.Password = *sftpPassword - c.KeyPath = *sftpKeyPath - c.Path = path.Join(*sftpPath, fmt.Sprintf("%016x", rand.Uint64())) - return c -} + t.Run("ErrSnapshotIndexRequired", func(t *testing.T) { + var client mock.ReplicaClient + if err := litestream.Restore(context.Background(), &client, t.TempDir(), "0000000000000000", -1, 0, litestream.NewRestoreOptions()); err == nil || err.Error() != `snapshot index required` { + t.Fatalf("unexpected error: %#v", err) + } + }) -// MustDeleteAll deletes all objects under the client's path. -func MustDeleteAll(tb testing.TB, c litestream.ReplicaClient) { - tb.Helper() + t.Run("ErrTargetIndexRequired", func(t *testing.T) { + var client mock.ReplicaClient + if err := litestream.Restore(context.Background(), &client, t.TempDir(), "0000000000000000", 0, -1, litestream.NewRestoreOptions()); err == nil || err.Error() != `target index required` { + t.Fatalf("unexpected error: %#v", err) + } + }) - generations, err := c.Generations(context.Background()) - if err != nil { - tb.Fatalf("cannot list generations for deletion: %s", err) - } + t.Run("ErrPathExists", func(t *testing.T) { + filename := filepath.Join(t.TempDir(), "db") + if err := os.WriteFile(filename, []byte("foo"), 0600); err != nil { + t.Fatal(err) + } + var client mock.ReplicaClient + if err := litestream.Restore(context.Background(), &client, filename, "0000000000000000", 0, 0, litestream.NewRestoreOptions()); err == nil || !strings.Contains(err.Error(), `cannot restore, output path already exists`) { + t.Fatalf("unexpected error: %#v", err) + } + }) - for _, generation := range generations { - if err := c.DeleteGeneration(context.Background(), generation); err != nil { - tb.Fatalf("cannot delete generation: %s", err) + t.Run("ErrPathPermissions", func(t *testing.T) { + dir := t.TempDir() + if err := os.Chmod(dir, 0000); err != nil { + t.Fatal(err) } - } - switch c := c.(type) { - case *sftp.ReplicaClient: - if err := c.Cleanup(context.Background()); err != nil { - tb.Fatalf("cannot cleanup sftp: %s", err) + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "restore", "bad-permissions")) + if err := litestream.Restore(context.Background(), client, filepath.Join(dir, "db"), "0000000000000000", 0, 0, litestream.NewRestoreOptions()); err == nil || !strings.Contains(err.Error(), `permission denied`) { + t.Fatalf("unexpected error: %#v", err) } - } + }) } diff --git a/replica_test.go b/replica_test.go index 7f42c08a..97455acc 100644 --- a/replica_test.go +++ b/replica_test.go @@ -8,20 +8,18 @@ import ( "testing" "github.com/benbjohnson/litestream" - "github.com/benbjohnson/litestream/file" "github.com/benbjohnson/litestream/mock" "github.com/pierrec/lz4/v4" ) func TestReplica_Name(t *testing.T) { t.Run("WithName", func(t *testing.T) { - if got, want := litestream.NewReplica(nil, "NAME").Name(), "NAME"; got != want { + if got, want := litestream.NewReplica(nil, "NAME", nil).Name(), "NAME"; got != want { t.Fatalf("Name()=%v, want %v", got, want) } }) t.Run("WithoutName", func(t *testing.T) { - r := litestream.NewReplica(nil, "") - r.Client = &mock.ReplicaClient{} + r := litestream.NewReplica(nil, "", &mock.ReplicaClient{}) if got, want := r.Name(), "mock"; got != want { t.Fatalf("Name()=%v, want %v", got, want) } @@ -43,14 +41,10 @@ func TestReplica_Sync(t *testing.T) { } // Fetch current database position. - dpos, err := db.Pos() - if err != nil { - t.Fatal(err) - } + dpos := db.Pos() - c := file.NewReplicaClient(t.TempDir()) - r := litestream.NewReplica(db, "") - c.Replica, r.Client = r, c + c := litestream.NewFileReplicaClient(t.TempDir()) + r := litestream.NewReplica(db, "", c) if err := r.Sync(context.Background()); err != nil { t.Fatal(err) @@ -69,11 +63,11 @@ func TestReplica_Sync(t *testing.T) { // Verify WAL matches replica WAL. if b0, err := os.ReadFile(db.Path() + "-wal"); err != nil { t.Fatal(err) - } else if r, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: generations[0], Index: 0, Offset: 0}); err != nil { + } else if r0, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: generations[0], Index: 0, Offset: 0}); err != nil { t.Fatal(err) - } else if b1, err := io.ReadAll(lz4.NewReader(r)); err != nil { + } else if b1, err := io.ReadAll(lz4.NewReader(r0)); err != nil { t.Fatal(err) - } else if err := r.Close(); err != nil { + } else if err := r0.Close(); err != nil { t.Fatal(err) } else if !bytes.Equal(b0, b1) { t.Fatalf("wal mismatch: len(%d), len(%d)", len(b0), len(b1)) @@ -84,9 +78,8 @@ func TestReplica_Snapshot(t *testing.T) { db, sqldb := MustOpenDBs(t) defer MustCloseDBs(t, db, sqldb) - c := file.NewReplicaClient(t.TempDir()) - r := litestream.NewReplica(db, "") - r.Client = c + c := litestream.NewFileReplicaClient(t.TempDir()) + r := litestream.NewReplica(db, "", c) // Execute a query to force a write to the WAL. if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil { @@ -98,10 +91,8 @@ func TestReplica_Snapshot(t *testing.T) { } // Fetch current database position & snapshot. - pos0, err := db.Pos() - if err != nil { - t.Fatal(err) - } else if info, err := r.Snapshot(context.Background()); err != nil { + pos0 := db.Pos() + if info, err := r.Snapshot(context.Background()); err != nil { t.Fatal(err) } else if got, want := info.Pos(), pos0.Truncate(); got != want { t.Fatalf("pos=%s, want %s", got, want) @@ -122,10 +113,8 @@ func TestReplica_Snapshot(t *testing.T) { } // Fetch current database position & snapshot. - pos1, err := db.Pos() - if err != nil { - t.Fatal(err) - } else if info, err := r.Snapshot(context.Background()); err != nil { + pos1 := db.Pos() + if info, err := r.Snapshot(context.Background()); err != nil { t.Fatal(err) } else if got, want := info.Pos(), pos1.Truncate(); got != want { t.Fatalf("pos=%v, want %v", got, want) diff --git a/s3/replica_client.go b/s3/replica_client.go index b68628a1..79c7e67c 100644 --- a/s3/replica_client.go +++ b/s3/replica_client.go @@ -10,13 +10,14 @@ import ( "os" "path" "regexp" + "strconv" + "strings" "sync" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/defaults" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3manager" @@ -93,6 +94,7 @@ func (c *ReplicaClient) Init(ctx context.Context) (err error) { if region != "" { config.Region = aws.String(region) } + sess, err := session.NewSession(config) if err != nil { return fmt.Errorf("cannot create aws session: %w", err) @@ -105,7 +107,8 @@ func (c *ReplicaClient) Init(ctx context.Context) (err error) { // config returns the AWS configuration. Uses the default credential chain // unless a key/secret are explicitly set. func (c *ReplicaClient) config() *aws.Config { - config := defaults.Get().Config + config := &aws.Config{} + if c.AccessKeyID != "" || c.SecretAccessKey != "" { config.Credentials = credentials.NewStaticCredentials(c.AccessKeyID, c.SecretAccessKey, "") } @@ -154,7 +157,7 @@ func (c *ReplicaClient) Generations(ctx context.Context) ([]string, error) { var generations []string if err := c.s3.ListObjectsPagesWithContext(ctx, &s3.ListObjectsInput{ Bucket: aws.String(c.Bucket), - Prefix: aws.String(litestream.GenerationsPath(c.Path) + "/"), + Prefix: aws.String(path.Join(c.Path, "generations") + "/"), Delimiter: aws.String("/"), }, func(page *s3.ListObjectsOutput, lastPage bool) bool { internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc() @@ -178,18 +181,15 @@ func (c *ReplicaClient) Generations(ctx context.Context) ([]string, error) { func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string) error { if err := c.Init(ctx); err != nil { return err - } - - dir, err := litestream.GenerationPath(c.Path, generation) - if err != nil { - return fmt.Errorf("cannot determine generation path: %w", err) + } else if generation == "" { + return fmt.Errorf("generation required") } // Collect all files for the generation. var objIDs []*s3.ObjectIdentifier if err := c.s3.ListObjectsPagesWithContext(ctx, &s3.ListObjectsInput{ Bucket: aws.String(c.Bucket), - Prefix: aws.String(dir), + Prefix: aws.String(path.Join(c.Path, "generations", generation)), }, func(page *s3.ListObjectsOutput, lastPage bool) bool { internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc() @@ -208,10 +208,14 @@ func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string) n = len(objIDs) } - if _, err := c.s3.DeleteObjectsWithContext(ctx, &s3.DeleteObjectsInput{ + out, err := c.s3.DeleteObjectsWithContext(ctx, &s3.DeleteObjectsInput{ Bucket: aws.String(c.Bucket), Delete: &s3.Delete{Objects: objIDs[:n], Quiet: aws.Bool(true)}, - }); err != nil { + }) + if err != nil { + return err + } + if err := deleteOutputError(out); err != nil { return err } internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc() @@ -236,12 +240,11 @@ func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (lites func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, index int, rd io.Reader) (info litestream.SnapshotInfo, err error) { if err := c.Init(ctx); err != nil { return info, err + } else if generation == "" { + return info, fmt.Errorf("generation required") } - key, err := litestream.SnapshotPath(c.Path, generation, index) - if err != nil { - return info, fmt.Errorf("cannot determine snapshot path: %w", err) - } + key := path.Join(c.Path, "generations", generation, "snapshots", litestream.FormatIndex(index)+".snapshot.lz4") startTime := time.Now() rc := internal.NewReadCounter(rd) @@ -256,8 +259,6 @@ func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, in internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "PUT").Inc() internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "PUT").Add(float64(rc.N())) - // log.Printf("%s(%s): snapshot: creating %s/%08x t=%s", r.db.Path(), r.Name(), generation, index, time.Since(startTime).Truncate(time.Millisecond)) - return litestream.SnapshotInfo{ Generation: generation, Index: index, @@ -270,12 +271,11 @@ func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, in func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error) { if err := c.Init(ctx); err != nil { return nil, err + } else if generation == "" { + return nil, fmt.Errorf("generation required") } - key, err := litestream.SnapshotPath(c.Path, generation, index) - if err != nil { - return nil, fmt.Errorf("cannot determine snapshot path: %w", err) - } + key := path.Join(c.Path, "generations", generation, "snapshots", litestream.FormatIndex(index)+".snapshot.lz4") out, err := c.s3.GetObjectWithContext(ctx, &s3.GetObjectInput{ Bucket: aws.String(c.Bucket), @@ -296,17 +296,20 @@ func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, i func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, index int) error { if err := c.Init(ctx); err != nil { return err + } else if generation == "" { + return fmt.Errorf("generation required") } - key, err := litestream.SnapshotPath(c.Path, generation, index) - if err != nil { - return fmt.Errorf("cannot determine snapshot path: %w", err) - } + key := path.Join(c.Path, "generations", generation, "snapshots", litestream.FormatIndex(index)+".snapshot.lz4") - if _, err := c.s3.DeleteObjectsWithContext(ctx, &s3.DeleteObjectsInput{ + out, err := c.s3.DeleteObjectsWithContext(ctx, &s3.DeleteObjectsInput{ Bucket: aws.String(c.Bucket), Delete: &s3.Delete{Objects: []*s3.ObjectIdentifier{{Key: &key}}, Quiet: aws.Bool(true)}, - }); err != nil { + }) + if err != nil { + return err + } + if err := deleteOutputError(out); err != nil { return err } @@ -326,12 +329,11 @@ func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (lit func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, rd io.Reader) (info litestream.WALSegmentInfo, err error) { if err := c.Init(ctx); err != nil { return info, err + } else if pos.Generation == "" { + return info, fmt.Errorf("generation required") } - key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset) - if err != nil { - return info, fmt.Errorf("cannot determine wal segment path: %w", err) - } + key := path.Join(c.Path, "generations", pos.Generation, "wal", litestream.FormatIndex(pos.Index), litestream.FormatOffset(pos.Offset)+".wal.lz4") startTime := time.Now() rc := internal.NewReadCounter(rd) @@ -360,12 +362,11 @@ func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, func (c *ReplicaClient) WALSegmentReader(ctx context.Context, pos litestream.Pos) (io.ReadCloser, error) { if err := c.Init(ctx); err != nil { return nil, err + } else if pos.Generation == "" { + return nil, fmt.Errorf("generation required") } - key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset) - if err != nil { - return nil, fmt.Errorf("cannot determine wal segment path: %w", err) - } + key := path.Join(c.Path, "generations", pos.Generation, "wal", litestream.FormatIndex(pos.Index), litestream.FormatOffset(pos.Offset)+".wal.lz4") out, err := c.s3.GetObjectWithContext(ctx, &s3.GetObjectInput{ Bucket: aws.String(c.Bucket), @@ -397,21 +398,24 @@ func (c *ReplicaClient) DeleteWALSegments(ctx context.Context, a []litestream.Po // Generate a batch of object IDs for deleting the WAL segments. for i, pos := range a[:n] { - key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset) - if err != nil { - return fmt.Errorf("cannot determine wal segment path: %w", err) + if pos.Generation == "" { + return fmt.Errorf("generation required") } + key := path.Join(c.Path, "generations", pos.Generation, "wal", litestream.FormatIndex(pos.Index), litestream.FormatOffset(pos.Offset)+".wal.lz4") objIDs[i] = &s3.ObjectIdentifier{Key: &key} } // Delete S3 objects in bulk. - if _, err := c.s3.DeleteObjectsWithContext(ctx, &s3.DeleteObjectsInput{ + out, err := c.s3.DeleteObjectsWithContext(ctx, &s3.DeleteObjectsInput{ Bucket: aws.String(c.Bucket), Delete: &s3.Delete{Objects: objIDs[:n], Quiet: aws.Bool(true)}, - }); err != nil { + }) + if err != nil { + return err + } + if err := deleteOutputError(out); err != nil { return err } - internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc() a = a[n:] @@ -454,10 +458,14 @@ func (c *ReplicaClient) DeleteAll(ctx context.Context) error { n = len(objIDs) } - if _, err := c.s3.DeleteObjectsWithContext(ctx, &s3.DeleteObjectsInput{ + out, err := c.s3.DeleteObjectsWithContext(ctx, &s3.DeleteObjectsInput{ Bucket: aws.String(c.Bucket), Delete: &s3.Delete{Objects: objIDs[:n], Quiet: aws.Bool(true)}, - }); err != nil { + }) + if err != nil { + return err + } + if err := deleteOutputError(out); err != nil { return err } internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc() @@ -498,11 +506,12 @@ func newSnapshotIterator(ctx context.Context, client *ReplicaClient, generation func (itr *snapshotIterator) fetch() error { defer close(itr.ch) - dir, err := litestream.SnapshotsPath(itr.client.Path, itr.generation) - if err != nil { - return fmt.Errorf("cannot determine snapshots path: %w", err) + if itr.generation == "" { + return fmt.Errorf("generation required") } + dir := path.Join(itr.client.Path, "generations", itr.generation, "snapshots") + return itr.client.s3.ListObjectsPagesWithContext(itr.ctx, &s3.ListObjectsInput{ Bucket: aws.String(itr.client.Bucket), Prefix: aws.String(dir + "/"), @@ -511,8 +520,7 @@ func (itr *snapshotIterator) fetch() error { internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc() for _, obj := range page.Contents { - key := path.Base(*obj.Key) - index, err := litestream.ParseSnapshotPath(key) + index, err := internal.ParseSnapshotPath(path.Base(*obj.Key)) if err != nil { continue } @@ -601,21 +609,20 @@ func newWALSegmentIterator(ctx context.Context, client *ReplicaClient, generatio func (itr *walSegmentIterator) fetch() error { defer close(itr.ch) - dir, err := litestream.WALPath(itr.client.Path, itr.generation) - if err != nil { - return fmt.Errorf("cannot determine wal path: %w", err) + if itr.generation == "" { + return fmt.Errorf("generation required") } + prefix := path.Join(itr.client.Path, "generations", itr.generation, "wal") + "/" + return itr.client.s3.ListObjectsPagesWithContext(itr.ctx, &s3.ListObjectsInput{ - Bucket: aws.String(itr.client.Bucket), - Prefix: aws.String(dir + "/"), - Delimiter: aws.String("/"), + Bucket: aws.String(itr.client.Bucket), + Prefix: aws.String(prefix), }, func(page *s3.ListObjectsOutput, lastPage bool) bool { internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc() for _, obj := range page.Contents { - key := path.Base(*obj.Key) - index, offset, err := litestream.ParseWALSegmentPath(key) + index, offset, err := internal.ParseWALSegmentPath(strings.TrimPrefix(*obj.Key, prefix)) if err != nil { continue } @@ -713,6 +720,27 @@ func ParseHost(s string) (bucket, region, endpoint string, forcePathStyle bool) endpoint = net.JoinHostPort(endpoint, port) } + if s := os.Getenv("LITESTREAM_SCHEME"); s != "" { + if s != "https" && s != "http" { + panic(fmt.Sprintf("Unsupported LITESTREAM_SCHEME value: %q", s)) + } else { + scheme = s + } + } + if e := os.Getenv("LITESTREAM_ENDPOINT"); e != "" { + endpoint = e + } + if r := os.Getenv("LITESTREAM_REGION"); r != "" { + region = r + } + if s := os.Getenv("LITESTREAM_FORCE_PATH_STYLE"); s != "" { + if b, err := strconv.ParseBool(s); err != nil { + panic(fmt.Sprintf("Invalid LITESTREAM_FORCE_PATH_STYLE value: %q", s)) + } else { + forcePathStyle = b + } + } + // Prepend scheme to endpoint. if endpoint != "" { endpoint = scheme + "://" + endpoint @@ -723,10 +751,10 @@ func ParseHost(s string) (bucket, region, endpoint string, forcePathStyle bool) var ( localhostRegex = regexp.MustCompile(`^(?:(.+)\.)?localhost$`) - backblazeRegex = regexp.MustCompile(`^(?:(.+)\.)?s3.([^.]+)\.backblazeb2.com$`) - filebaseRegex = regexp.MustCompile(`^(?:(.+)\.)?s3.filebase.com$`) - digitalOceanRegex = regexp.MustCompile(`^(?:(.+)\.)?([^.]+)\.digitaloceanspaces.com$`) - linodeRegex = regexp.MustCompile(`^(?:(.+)\.)?([^.]+)\.linodeobjects.com$`) + backblazeRegex = regexp.MustCompile(`^(?:(.+)\.)?s3\.([^.]+)\.backblazeb2\.com$`) + filebaseRegex = regexp.MustCompile(`^(?:(.+)\.)?s3\.filebase\.com$`) + digitalOceanRegex = regexp.MustCompile(`^(?:(.+)\.)?([^.]+)\.digitaloceanspaces\.com$`) + linodeRegex = regexp.MustCompile(`^(?:(.+)\.)?([^.]+)\.linodeobjects\.com$`) ) func isNotExists(err error) bool { @@ -737,3 +765,15 @@ func isNotExists(err error) bool { return false } } + +func deleteOutputError(out *s3.DeleteObjectsOutput) error { + switch len(out.Errors) { + case 0: + return nil + case 1: + return fmt.Errorf("deleting object %s: %s - %s", *out.Errors[0].Key, *out.Errors[0].Code, *out.Errors[0].Message) + default: + return fmt.Errorf("%d errors occured deleting objects, %s: %s - (%s (and %d others)", + len(out.Errors), *out.Errors[0].Key, *out.Errors[0].Code, *out.Errors[0].Message, len(out.Errors)-1) + } +} diff --git a/server.go b/server.go new file mode 100644 index 00000000..e487b55b --- /dev/null +++ b/server.go @@ -0,0 +1,199 @@ +package litestream + +import ( + "context" + "fmt" + "path/filepath" + "strings" + "sync" + + "github.com/fsnotify/fsnotify" + "golang.org/x/sync/errgroup" +) + +// Server represents the top-level container. +// It manage databases and routes global file system events. +type Server struct { + mu sync.Mutex + dbs map[string]*DB // databases by path + watcher *fsnotify.Watcher + + ctx context.Context + cancel func() + errgroup errgroup.Group +} + +// NewServer returns a new instance of Server. +func NewServer() *Server { + return &Server{ + dbs: make(map[string]*DB), + } +} + +// Open initializes the server and begins watching for file system events. +func (s *Server) Open() error { + var err error + s.watcher, err = fsnotify.NewWatcher() + if err != nil { + return err + } + + s.ctx, s.cancel = context.WithCancel(context.Background()) + s.errgroup.Go(func() error { + if err := s.monitor(s.ctx); err != nil && err != context.Canceled { + return fmt.Errorf("server monitor error: %w", err) + } + return nil + }) + return nil +} + +// Close shuts down the server and all databases it manages. +func (s *Server) Close() (err error) { + // Cancel context and wait for goroutines to finish. + s.cancel() + if e := s.errgroup.Wait(); e != nil && err == nil { + err = e + } + + s.mu.Lock() + defer s.mu.Unlock() + + if s.watcher != nil { + if e := s.watcher.Close(); e != nil && err == nil { + err = fmt.Errorf("close watcher: %w", e) + } + } + + for _, db := range s.dbs { + if e := db.Close(); e != nil && err == nil { + err = fmt.Errorf("close db: path=%s err=%w", db.Path(), e) + } + } + s.dbs = make(map[string]*DB) + + return err +} + +// DB returns the database with the given path, if it's managed by the server. +func (s *Server) DB(path string) *DB { + s.mu.Lock() + defer s.mu.Unlock() + return s.dbs[path] +} + +// DBs returns a slice of all databases managed by the server. +func (s *Server) DBs() []*DB { + s.mu.Lock() + defer s.mu.Unlock() + + a := make([]*DB, 0, len(s.dbs)) + for _, db := range s.dbs { + a = append(a, db) + } + return a +} + +// Watch adds a database path to be managed by the server. +func (s *Server) Watch(path string, fn func(path string) (*DB, error)) error { + s.mu.Lock() + defer s.mu.Unlock() + + // Instantiate DB from factory function. + db, err := fn(path) + if err != nil { + return fmt.Errorf("new database: %w", err) + } + + // Start watching the database for changes. + if err := db.Open(); err != nil { + return fmt.Errorf("open database: %w", err) + } + s.dbs[path] = db + + // Watch for changes on the database file & WAL. + if err := s.watcher.Add(filepath.Dir(path)); err != nil { + return fmt.Errorf("watch db file: %w", err) + } + + // Kick off an initial sync. + select { + case db.NotifyCh() <- struct{}{}: + default: + } + + return nil +} + +// Unwatch removes a database path from being managed by the server. +func (s *Server) Unwatch(path string) error { + s.mu.Lock() + defer s.mu.Unlock() + + db := s.dbs[path] + if db == nil { + return nil + } + delete(s.dbs, path) + + // Stop watching for changes on the database WAL. + if err := s.watcher.Remove(filepath.Dir(path)); err != nil { + return fmt.Errorf("unwatch file: %w", err) + } + + // Shut down database. + if err := db.Close(); err != nil { + return fmt.Errorf("close db: %w", err) + } + + return nil +} + +func (s *Server) isWatched(event fsnotify.Event) bool { + path := event.Name + path = strings.TrimSuffix(path, "-wal") + + if _, ok := s.dbs[path]; ok { + return true + } + return false +} + +// monitor runs in a separate goroutine and dispatches notifications to managed DBs. +func (s *Server) monitor(ctx context.Context) error { + for { + select { + case <-ctx.Done(): + return ctx.Err() + case event := <-s.watcher.Events: + if !s.isWatched(event) { + continue + } + if err := s.dispatchFileEvent(ctx, event); err != nil { + return err + } + } + } +} + +// dispatchFileEvent dispatches a notification to the database which owns the file. +func (s *Server) dispatchFileEvent(ctx context.Context, event fsnotify.Event) error { + path := event.Name + path = strings.TrimSuffix(path, "-wal") + + db := s.DB(path) + if db == nil { + return nil + } + + // TODO: If deleted, remove from server and close DB. + + select { + case <-ctx.Done(): + return ctx.Err() + case db.NotifyCh() <- struct{}{}: + return nil // notify db + default: + return nil // already pending notification, skip + } +} diff --git a/server_test.go b/server_test.go new file mode 100644 index 00000000..3d7601f0 --- /dev/null +++ b/server_test.go @@ -0,0 +1 @@ +package litestream_test diff --git a/sftp/replica_client.go b/sftp/replica_client.go index 6b082b4a..269e8ca1 100644 --- a/sftp/replica_client.go +++ b/sftp/replica_client.go @@ -9,6 +9,7 @@ import ( "os" "path" "sort" + "strings" "sync" "time" @@ -40,6 +41,7 @@ type ReplicaClient struct { Password string Path string KeyPath string + HostKeyPath string DialTimeout time.Duration } @@ -50,12 +52,12 @@ func NewReplicaClient() *ReplicaClient { } } -// Type returns "gcs" as the client type. +// Type returns "sftp" as the client type. func (c *ReplicaClient) Type() string { return ReplicaClientType } -// Init initializes the connection to GCS. No-op if already initialized. +// Init initializes the connection to SFTP. No-op if already initialized. func (c *ReplicaClient) Init(ctx context.Context) (_ *sftp.Client, err error) { c.mu.Lock() defer c.mu.Unlock() @@ -70,14 +72,28 @@ func (c *ReplicaClient) Init(ctx context.Context) (_ *sftp.Client, err error) { // Build SSH configuration & auth methods config := &ssh.ClientConfig{ - User: c.User, - HostKeyCallback: ssh.InsecureIgnoreHostKey(), - BannerCallback: ssh.BannerDisplayStderr(), + User: c.User, + BannerCallback: ssh.BannerDisplayStderr(), } if c.Password != "" { config.Auth = append(config.Auth, ssh.Password(c.Password)) } + if c.HostKeyPath == "" { + config.HostKeyCallback = ssh.InsecureIgnoreHostKey() + } else { + buf, err := os.ReadFile(c.HostKeyPath) + if err != nil { + return nil, fmt.Errorf("cannot read sftp host key path: %w", err) + } + + key, _, _, _, err := ssh.ParseAuthorizedKey(buf) + if err != nil { + return nil, fmt.Errorf("cannot parse sftp host key path: path=%s len=%d err=%w", c.HostKeyPath, len(buf), err) + } + config.HostKeyCallback = ssh.FixedHostKey(key) + } + if c.KeyPath != "" { buf, err := os.ReadFile(c.KeyPath) if err != nil { @@ -121,7 +137,7 @@ func (c *ReplicaClient) Generations(ctx context.Context) (_ []string, err error) return nil, err } - fis, err := sftpClient.ReadDir(litestream.GenerationsPath(c.Path)) + fis, err := sftpClient.ReadDir(path.Join(c.Path, "generations")) if os.IsNotExist(err) { return nil, nil } else if err != nil { @@ -153,12 +169,11 @@ func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string) sftpClient, err := c.Init(ctx) if err != nil { return err + } else if generation == "" { + return fmt.Errorf("generation required") } - dir, err := litestream.GenerationPath(c.Path, generation) - if err != nil { - return fmt.Errorf("cannot determine generation path: %w", err) - } + dir := path.Join(c.Path, "generations", generation) var dirs []string walker := sftpClient.Walk(dir) @@ -198,12 +213,11 @@ func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (_ lit sftpClient, err := c.Init(ctx) if err != nil { return nil, err + } else if generation == "" { + return nil, fmt.Errorf("generation required") } - dir, err := litestream.SnapshotsPath(c.Path, generation) - if err != nil { - return nil, fmt.Errorf("cannot determine snapshots path: %w", err) - } + dir := path.Join(c.Path, "generations", generation, "snapshots") fis, err := sftpClient.ReadDir(dir) if os.IsNotExist(err) { @@ -216,7 +230,7 @@ func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (_ lit infos := make([]litestream.SnapshotInfo, 0, len(fis)) for _, fi := range fis { // Parse index from filename. - index, err := litestream.ParseSnapshotPath(path.Base(fi.Name())) + index, err := internal.ParseSnapshotPath(path.Base(fi.Name())) if err != nil { continue } @@ -241,12 +255,11 @@ func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, in sftpClient, err := c.Init(ctx) if err != nil { return info, err + } else if generation == "" { + return info, fmt.Errorf("generation required") } - filename, err := litestream.SnapshotPath(c.Path, generation, index) - if err != nil { - return info, fmt.Errorf("cannot determine snapshot path: %w", err) - } + filename := path.Join(c.Path, "generations", generation, "snapshots", litestream.FormatIndex(index)+".snapshot.lz4") startTime := time.Now() if err := sftpClient.MkdirAll(path.Dir(filename)); err != nil { @@ -257,20 +270,19 @@ func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, in if err != nil { return info, fmt.Errorf("cannot open snapshot file for writing: %w", err) } - defer f.Close() + closer := internal.OnceCloser(f) + defer closer.Close() n, err := io.Copy(f, rd) if err != nil { return info, err - } else if err := f.Close(); err != nil { + } else if err := closer.Close(); err != nil { return info, err } internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "PUT").Inc() internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "PUT").Add(float64(n)) - // log.Printf("%s(%s): snapshot: creating %s/%08x t=%s", r.db.Path(), r.Name(), generation, index, time.Since(startTime).Truncate(time.Millisecond)) - return litestream.SnapshotInfo{ Generation: generation, Index: index, @@ -286,12 +298,11 @@ func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, i sftpClient, err := c.Init(ctx) if err != nil { return nil, err + } else if generation == "" { + return nil, fmt.Errorf("generation required") } - filename, err := litestream.SnapshotPath(c.Path, generation, index) - if err != nil { - return nil, fmt.Errorf("cannot determine snapshot path: %w", err) - } + filename := path.Join(c.Path, "generations", generation, "snapshots", litestream.FormatIndex(index)+".snapshot.lz4") f, err := sftpClient.Open(filename) if err != nil { @@ -310,12 +321,11 @@ func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, i sftpClient, err := c.Init(ctx) if err != nil { return err + } else if generation == "" { + return fmt.Errorf("generation required") } - filename, err := litestream.SnapshotPath(c.Path, generation, index) - if err != nil { - return fmt.Errorf("cannot determine snapshot path: %w", err) - } + filename := path.Join(c.Path, "generations", generation, "snapshots", litestream.FormatIndex(index)+".snapshot.lz4") if err := sftpClient.Remove(filename); err != nil && !os.IsNotExist(err) { return fmt.Errorf("cannot delete snapshot %q: %w", filename, err) @@ -332,12 +342,11 @@ func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (_ l sftpClient, err := c.Init(ctx) if err != nil { return nil, err + } else if generation == "" { + return nil, fmt.Errorf("generation required") } - dir, err := litestream.WALPath(c.Path, generation) - if err != nil { - return nil, fmt.Errorf("cannot determine wal path: %w", err) - } + dir := path.Join(c.Path, "generations", generation, "wal") fis, err := sftpClient.ReadDir(dir) if os.IsNotExist(err) { @@ -347,25 +356,18 @@ func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (_ l } // Iterate over every file and convert to metadata. - infos := make([]litestream.WALSegmentInfo, 0, len(fis)) + indexes := make([]int, 0, len(fis)) for _, fi := range fis { - index, offset, err := litestream.ParseWALSegmentPath(path.Base(fi.Name())) - if err != nil { + index, err := litestream.ParseIndex(fi.Name()) + if err != nil || !fi.IsDir() { continue } - - infos = append(infos, litestream.WALSegmentInfo{ - Generation: generation, - Index: index, - Offset: offset, - Size: fi.Size(), - CreatedAt: fi.ModTime().UTC(), - }) + indexes = append(indexes, index) } - sort.Sort(litestream.WALSegmentInfoSlice(infos)) + sort.Ints(indexes) - return litestream.NewWALSegmentInfoSliceIterator(infos), nil + return newWALSegmentIterator(ctx, c, dir, generation, indexes), nil } // WriteWALSegment writes LZ4 compressed data from rd into a file on disk. @@ -375,12 +377,11 @@ func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, sftpClient, err := c.Init(ctx) if err != nil { return info, err + } else if pos.Generation == "" { + return info, fmt.Errorf("generation required") } - filename, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset) - if err != nil { - return info, fmt.Errorf("cannot determine wal segment path: %w", err) - } + filename := path.Join(c.Path, "generations", pos.Generation, "wal", litestream.FormatIndex(pos.Index), litestream.FormatOffset(pos.Offset)+".wal.lz4") startTime := time.Now() if err := sftpClient.MkdirAll(path.Dir(filename)); err != nil { @@ -391,12 +392,13 @@ func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, if err != nil { return info, fmt.Errorf("cannot open snapshot file for writing: %w", err) } - defer f.Close() + closer := internal.OnceCloser(f) + defer closer.Close() n, err := io.Copy(f, rd) if err != nil { return info, err - } else if err := f.Close(); err != nil { + } else if err := closer.Close(); err != nil { return info, err } @@ -420,12 +422,11 @@ func (c *ReplicaClient) WALSegmentReader(ctx context.Context, pos litestream.Pos sftpClient, err := c.Init(ctx) if err != nil { return nil, err + } else if pos.Generation == "" { + return nil, fmt.Errorf("generation required") } - filename, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset) - if err != nil { - return nil, fmt.Errorf("cannot determine wal segment path: %w", err) - } + filename := path.Join(c.Path, "generations", pos.Generation, "wal", litestream.FormatIndex(pos.Index), litestream.FormatOffset(pos.Offset)+".wal.lz4") f, err := sftpClient.Open(filename) if err != nil { @@ -447,11 +448,12 @@ func (c *ReplicaClient) DeleteWALSegments(ctx context.Context, a []litestream.Po } for _, pos := range a { - filename, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset) - if err != nil { - return fmt.Errorf("cannot determine wal segment path: %w", err) + if pos.Generation == "" { + return fmt.Errorf("generation required") } + filename := path.Join(c.Path, "generations", pos.Generation, "wal", litestream.FormatIndex(pos.Index), litestream.FormatOffset(pos.Offset)+".wal.lz4") + if err := sftpClient.Remove(filename); err != nil && !os.IsNotExist(err) { return fmt.Errorf("cannot delete wal segment %q: %w", filename, err) } @@ -470,7 +472,7 @@ func (c *ReplicaClient) Cleanup(ctx context.Context) (err error) { return err } - if err := sftpClient.RemoveDirectory(litestream.GenerationsPath(c.Path)); err != nil && !os.IsNotExist(err) { + if err := sftpClient.RemoveDirectory(path.Join(c.Path, "generations")); err != nil && !os.IsNotExist(err) { return fmt.Errorf("cannot delete generations path: %w", err) } else if err := sftpClient.RemoveDirectory(c.Path); err != nil && !os.IsNotExist(err) { return fmt.Errorf("cannot delete path: %w", err) @@ -493,3 +495,101 @@ func (c *ReplicaClient) resetOnConnError(err error) { c.sshClient = nil } } + +type walSegmentIterator struct { + ctx context.Context + client *ReplicaClient + dir string + generation string + indexes []int + + infos []litestream.WALSegmentInfo + err error +} + +func newWALSegmentIterator(ctx context.Context, client *ReplicaClient, dir, generation string, indexes []int) *walSegmentIterator { + return &walSegmentIterator{ + ctx: ctx, + client: client, + dir: dir, + generation: generation, + indexes: indexes, + } +} + +func (itr *walSegmentIterator) Close() (err error) { + return itr.err +} + +func (itr *walSegmentIterator) Next() bool { + sftpClient, err := itr.client.Init(itr.ctx) + if err != nil { + itr.err = err + return false + } + + // Exit if an error has already occurred. + if itr.err != nil { + return false + } + + for { + // Move to the next segment in cache, if available. + if len(itr.infos) > 1 { + itr.infos = itr.infos[1:] + return true + } + itr.infos = itr.infos[:0] // otherwise clear infos + + // Move to the next index unless this is the first time initializing. + if itr.infos != nil && len(itr.indexes) > 0 { + itr.indexes = itr.indexes[1:] + } + + // If no indexes remain, stop iteration. + if len(itr.indexes) == 0 { + return false + } + + // Read segments into a cache for the current index. + index := itr.indexes[0] + fis, err := sftpClient.ReadDir(path.Join(itr.dir, litestream.FormatIndex(index))) + if err != nil { + itr.err = err + return false + } + + for _, fi := range fis { + filename := path.Base(fi.Name()) + if fi.IsDir() { + continue + } + + offset, err := litestream.ParseOffset(strings.TrimSuffix(filename, ".wal.lz4")) + if err != nil { + continue + } + + itr.infos = append(itr.infos, litestream.WALSegmentInfo{ + Generation: itr.generation, + Index: index, + Offset: offset, + Size: fi.Size(), + CreatedAt: fi.ModTime().UTC(), + }) + } + + if len(itr.infos) > 0 { + return true + } + } +} + +func (itr *walSegmentIterator) Err() error { return itr.err } + +func (itr *walSegmentIterator) WALSegment() litestream.WALSegmentInfo { + if len(itr.infos) == 0 { + return litestream.WALSegmentInfo{} + } + return itr.infos[0] +} diff --git a/testdata/Makefile b/testdata/Makefile new file mode 100644 index 00000000..504fe254 --- /dev/null +++ b/testdata/Makefile @@ -0,0 +1,13 @@ +.PHONY: default +default: + make -C find-latest-generation/ok + make -C index-by-timestamp/no-wal + make -C index-by-timestamp/ok + make -C index-by-timestamp/snapshot-later-than-wal + make -C generation-time-bounds/ok + make -C generation-time-bounds/snapshots-only + make -C replica-client-time-bounds/ok + make -C snapshot-time-bounds/ok + make -C snapshot-index-by-timestamp/ok + make -C wal-time-bounds/ok + make -C wal-index-by-timestamp/ok diff --git a/testdata/find-latest-generation/no-generations/.gitignore b/testdata/find-latest-generation/no-generations/.gitignore new file mode 100644 index 00000000..e69de29b diff --git a/testdata/find-latest-generation/ok/Makefile b/testdata/find-latest-generation/ok/Makefile new file mode 100644 index 00000000..45a7e010 --- /dev/null +++ b/testdata/find-latest-generation/ok/Makefile @@ -0,0 +1,7 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001010000 generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 + TZ=UTC touch -ct 200001010000 generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001030000 generations/0000000000000001/snapshots/0000000000000001.snapshot.lz4 + TZ=UTC touch -ct 200001010000 generations/0000000000000002/snapshots/0000000000000000.snapshot.lz4 diff --git a/testdata/find-latest-generation/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 b/testdata/find-latest-generation/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/find-latest-generation/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/testdata/find-latest-generation/ok/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 b/testdata/find-latest-generation/ok/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/find-latest-generation/ok/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 differ diff --git a/testdata/find-latest-generation/ok/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 b/testdata/find-latest-generation/ok/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/find-latest-generation/ok/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/testdata/find-latest-generation/ok/generations/0000000000000001/snapshots/0000000000000001.snapshot.lz4 b/testdata/find-latest-generation/ok/generations/0000000000000001/snapshots/0000000000000001.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/find-latest-generation/ok/generations/0000000000000001/snapshots/0000000000000001.snapshot.lz4 differ diff --git a/testdata/find-latest-generation/ok/generations/0000000000000002/snapshots/0000000000000000.snapshot.lz4 b/testdata/find-latest-generation/ok/generations/0000000000000002/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/find-latest-generation/ok/generations/0000000000000002/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/testdata/find-snapshot-for-index/no-snapshots-before-index/generations/0000000000000000/snapshots/0000000000000bb8.snapshot.lz4 b/testdata/find-snapshot-for-index/no-snapshots-before-index/generations/0000000000000000/snapshots/0000000000000bb8.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/find-snapshot-for-index/no-snapshots-before-index/generations/0000000000000000/snapshots/0000000000000bb8.snapshot.lz4 differ diff --git a/testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 b/testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/00000000000003e8.snapshot.lz4 b/testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/00000000000003e8.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/00000000000003e8.snapshot.lz4 differ diff --git a/testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/0000000000000bb8.snapshot.lz4 b/testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/0000000000000bb8.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/0000000000000bb8.snapshot.lz4 differ diff --git a/testdata/generation-time-bounds/no-snapshots/generations/0000000000000000/snapshots/.gitignore b/testdata/generation-time-bounds/no-snapshots/generations/0000000000000000/snapshots/.gitignore new file mode 100644 index 00000000..e69de29b diff --git a/testdata/generation-time-bounds/ok/Makefile b/testdata/generation-time-bounds/ok/Makefile new file mode 100644 index 00000000..7f2ad771 --- /dev/null +++ b/testdata/generation-time-bounds/ok/Makefile @@ -0,0 +1,8 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001010000 generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 + TZ=UTC touch -ct 200001010000 generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 + TZ=UTC touch -ct 200001020000 generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 + TZ=UTC touch -ct 200001030000 generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 + diff --git a/testdata/generation-time-bounds/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 b/testdata/generation-time-bounds/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/generation-time-bounds/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/testdata/generation-time-bounds/ok/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 b/testdata/generation-time-bounds/ok/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/generation-time-bounds/ok/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 differ diff --git a/testdata/generation-time-bounds/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 b/testdata/generation-time-bounds/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/generation-time-bounds/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 differ diff --git a/testdata/generation-time-bounds/ok/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 b/testdata/generation-time-bounds/ok/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/generation-time-bounds/ok/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 differ diff --git a/testdata/generation-time-bounds/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 b/testdata/generation-time-bounds/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/generation-time-bounds/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 differ diff --git a/testdata/generation-time-bounds/snapshots-only/Makefile b/testdata/generation-time-bounds/snapshots-only/Makefile new file mode 100644 index 00000000..aa5978bd --- /dev/null +++ b/testdata/generation-time-bounds/snapshots-only/Makefile @@ -0,0 +1,5 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001010000 generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 + diff --git a/testdata/generation-time-bounds/snapshots-only/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 b/testdata/generation-time-bounds/snapshots-only/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/generation-time-bounds/snapshots-only/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/testdata/generation-time-bounds/snapshots-only/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 b/testdata/generation-time-bounds/snapshots-only/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/generation-time-bounds/snapshots-only/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 differ diff --git a/testdata/index-by-timestamp/no-snapshots/generations/0000000000000000/.gitignore b/testdata/index-by-timestamp/no-snapshots/generations/0000000000000000/.gitignore new file mode 100644 index 00000000..e69de29b diff --git a/testdata/index-by-timestamp/no-wal/Makefile b/testdata/index-by-timestamp/no-wal/Makefile new file mode 100644 index 00000000..87751339 --- /dev/null +++ b/testdata/index-by-timestamp/no-wal/Makefile @@ -0,0 +1,6 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001010000 generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 + TZ=UTC touch -ct 200001030000 generations/0000000000000000/snapshots/0000000000000002.snapshot.lz4 + diff --git a/testdata/index-by-timestamp/no-wal/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 b/testdata/index-by-timestamp/no-wal/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/index-by-timestamp/no-wal/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/testdata/index-by-timestamp/no-wal/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 b/testdata/index-by-timestamp/no-wal/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/index-by-timestamp/no-wal/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 differ diff --git a/testdata/index-by-timestamp/no-wal/generations/0000000000000000/snapshots/0000000000000002.snapshot.lz4 b/testdata/index-by-timestamp/no-wal/generations/0000000000000000/snapshots/0000000000000002.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/index-by-timestamp/no-wal/generations/0000000000000000/snapshots/0000000000000002.snapshot.lz4 differ diff --git a/testdata/index-by-timestamp/ok/Makefile b/testdata/index-by-timestamp/ok/Makefile new file mode 100644 index 00000000..258f1e8c --- /dev/null +++ b/testdata/index-by-timestamp/ok/Makefile @@ -0,0 +1,11 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001010000 generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001030000 generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 + + TZ=UTC touch -ct 200001010000 generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 + TZ=UTC touch -ct 200001020000 generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 + TZ=UTC touch -ct 200001030000 generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 + TZ=UTC touch -ct 200001040000 generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 + TZ=UTC touch -ct 200001050000 generations/0000000000000000/wal/0000000000000003/0000000000000000.wal.lz4 + diff --git a/testdata/index-by-timestamp/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 b/testdata/index-by-timestamp/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/index-by-timestamp/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/testdata/index-by-timestamp/ok/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 b/testdata/index-by-timestamp/ok/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/index-by-timestamp/ok/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 differ diff --git a/testdata/index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 b/testdata/index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 differ diff --git a/testdata/index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 b/testdata/index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 differ diff --git a/testdata/index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 b/testdata/index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 differ diff --git a/testdata/index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 b/testdata/index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 differ diff --git a/testdata/index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000003/0000000000000000.wal.lz4 b/testdata/index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000003/0000000000000000.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000003/0000000000000000.wal.lz4 differ diff --git a/testdata/index-by-timestamp/snapshot-later-than-wal/Makefile b/testdata/index-by-timestamp/snapshot-later-than-wal/Makefile new file mode 100644 index 00000000..9e0d3908 --- /dev/null +++ b/testdata/index-by-timestamp/snapshot-later-than-wal/Makefile @@ -0,0 +1,7 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001010000 generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001030000 generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 + + TZ=UTC touch -ct 200001020000 generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 + TZ=UTC touch -ct 200001020000 generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 diff --git a/testdata/index-by-timestamp/snapshot-later-than-wal/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 b/testdata/index-by-timestamp/snapshot-later-than-wal/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/index-by-timestamp/snapshot-later-than-wal/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/testdata/index-by-timestamp/snapshot-later-than-wal/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 b/testdata/index-by-timestamp/snapshot-later-than-wal/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/index-by-timestamp/snapshot-later-than-wal/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 differ diff --git a/testdata/index-by-timestamp/snapshot-later-than-wal/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 b/testdata/index-by-timestamp/snapshot-later-than-wal/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/index-by-timestamp/snapshot-later-than-wal/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 differ diff --git a/testdata/index-by-timestamp/snapshot-later-than-wal/generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 b/testdata/index-by-timestamp/snapshot-later-than-wal/generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/index-by-timestamp/snapshot-later-than-wal/generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 differ diff --git a/testdata/max-index/no-snapshots/generations/0000000000000000/.gitignore b/testdata/max-index/no-snapshots/generations/0000000000000000/.gitignore new file mode 100644 index 00000000..e69de29b diff --git a/testdata/max-index/no-wal/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 b/testdata/max-index/no-wal/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/max-index/no-wal/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/testdata/max-index/no-wal/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 b/testdata/max-index/no-wal/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/max-index/no-wal/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 differ diff --git a/testdata/max-index/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 b/testdata/max-index/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/max-index/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/testdata/max-index/ok/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 b/testdata/max-index/ok/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/max-index/ok/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 differ diff --git a/testdata/max-index/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 b/testdata/max-index/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/max-index/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 differ diff --git a/testdata/max-index/ok/generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 b/testdata/max-index/ok/generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/max-index/ok/generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 differ diff --git a/testdata/max-index/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 b/testdata/max-index/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/max-index/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 differ diff --git a/testdata/max-index/ok/generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 b/testdata/max-index/ok/generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/max-index/ok/generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 differ diff --git a/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 b/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 b/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 differ diff --git a/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 b/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 differ diff --git a/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 b/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 differ diff --git a/testdata/max-snapshot-index/no-snapshots/generations/0000000000000000/.gitignore b/testdata/max-snapshot-index/no-snapshots/generations/0000000000000000/.gitignore new file mode 100644 index 00000000..e69de29b diff --git a/testdata/max-snapshot-index/ok/Makefile b/testdata/max-snapshot-index/ok/Makefile new file mode 100644 index 00000000..47e186bd --- /dev/null +++ b/testdata/max-snapshot-index/ok/Makefile @@ -0,0 +1,6 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001020000 generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001010000 generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001030000 generations/0000000000000001/snapshots/0000000000000001.snapshot.lz4 + TZ=UTC touch -ct 200001010000 generations/0000000000000002/snapshots/0000000000000000.snapshot.lz4 diff --git a/testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 b/testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/00000000000003e8.snapshot.lz4 b/testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/00000000000003e8.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/00000000000003e8.snapshot.lz4 differ diff --git a/testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/00000000000007d0.snapshot.lz4 b/testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/00000000000007d0.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/00000000000007d0.snapshot.lz4 differ diff --git a/testdata/max-wal-index/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 b/testdata/max-wal-index/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/max-wal-index/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 differ diff --git a/testdata/max-wal-index/ok/generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 b/testdata/max-wal-index/ok/generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/max-wal-index/ok/generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 differ diff --git a/testdata/max-wal-index/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 b/testdata/max-wal-index/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/max-wal-index/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 differ diff --git a/testdata/read-wal-fields/ok b/testdata/read-wal-fields/ok new file mode 100644 index 00000000..e019bfe2 Binary files /dev/null and b/testdata/read-wal-fields/ok differ diff --git a/testdata/replica-client-time-bounds/ok/Makefile b/testdata/replica-client-time-bounds/ok/Makefile new file mode 100644 index 00000000..47e186bd --- /dev/null +++ b/testdata/replica-client-time-bounds/ok/Makefile @@ -0,0 +1,6 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001020000 generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001010000 generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001030000 generations/0000000000000001/snapshots/0000000000000001.snapshot.lz4 + TZ=UTC touch -ct 200001010000 generations/0000000000000002/snapshots/0000000000000000.snapshot.lz4 diff --git a/testdata/replica-client-time-bounds/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 b/testdata/replica-client-time-bounds/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/replica-client-time-bounds/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/testdata/replica-client-time-bounds/ok/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 b/testdata/replica-client-time-bounds/ok/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/replica-client-time-bounds/ok/generations/0000000000000001/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/testdata/replica-client-time-bounds/ok/generations/0000000000000001/snapshots/0000000000000001.snapshot.lz4 b/testdata/replica-client-time-bounds/ok/generations/0000000000000001/snapshots/0000000000000001.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/replica-client-time-bounds/ok/generations/0000000000000001/snapshots/0000000000000001.snapshot.lz4 differ diff --git a/testdata/replica-client-time-bounds/ok/generations/0000000000000002/snapshots/0000000000000000.snapshot.lz4 b/testdata/replica-client-time-bounds/ok/generations/0000000000000002/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/replica-client-time-bounds/ok/generations/0000000000000002/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/testdata/restore/bad-permissions/0000000000000000.db b/testdata/restore/bad-permissions/0000000000000000.db new file mode 100644 index 00000000..86bbea7e Binary files /dev/null and b/testdata/restore/bad-permissions/0000000000000000.db differ diff --git a/testdata/restore/bad-permissions/README b/testdata/restore/bad-permissions/README new file mode 100644 index 00000000..48c0fd4e --- /dev/null +++ b/testdata/restore/bad-permissions/README @@ -0,0 +1,36 @@ +To reproduce this testdata, run sqlite3 and execute: + + PRAGMA journal_mode = WAL; + CREATE TABLE t (x); + INSERT INTO t (x) VALUES (1); + INSERT INTO t (x) VALUES (2); + + sl3 split -o generations/0000000000000000/wal/0000000000000000 db-wal + cp db generations/0000000000000000/snapshots/0000000000000000.snapshot + lz4 -c --rm generations/0000000000000000/snapshots/0000000000000000.snapshot + + +Then execute: + + PRAGMA wal_checkpoint(TRUNCATE); + INSERT INTO t (x) VALUES (3); + + sl3 split -o generations/0000000000000000/wal/0000000000000001 db-wal + + +Then execute: + + PRAGMA wal_checkpoint(TRUNCATE); + INSERT INTO t (x) VALUES (4); + INSERT INTO t (x) VALUES (5); + + sl3 split -o generations/0000000000000000/wal/0000000000000002 db-wal + + +Finally, obtain the final snapshot: + + PRAGMA wal_checkpoint(TRUNCATE); + + cp db 0000000000000002.db + rm db* + diff --git a/testdata/restore/bad-permissions/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 b/testdata/restore/bad-permissions/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/restore/bad-permissions/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/testdata/restore/ok/0000000000000002.db b/testdata/restore/ok/0000000000000002.db new file mode 100644 index 00000000..cfd2b8d8 Binary files /dev/null and b/testdata/restore/ok/0000000000000002.db differ diff --git a/testdata/restore/ok/README b/testdata/restore/ok/README new file mode 100644 index 00000000..48c0fd4e --- /dev/null +++ b/testdata/restore/ok/README @@ -0,0 +1,36 @@ +To reproduce this testdata, run sqlite3 and execute: + + PRAGMA journal_mode = WAL; + CREATE TABLE t (x); + INSERT INTO t (x) VALUES (1); + INSERT INTO t (x) VALUES (2); + + sl3 split -o generations/0000000000000000/wal/0000000000000000 db-wal + cp db generations/0000000000000000/snapshots/0000000000000000.snapshot + lz4 -c --rm generations/0000000000000000/snapshots/0000000000000000.snapshot + + +Then execute: + + PRAGMA wal_checkpoint(TRUNCATE); + INSERT INTO t (x) VALUES (3); + + sl3 split -o generations/0000000000000000/wal/0000000000000001 db-wal + + +Then execute: + + PRAGMA wal_checkpoint(TRUNCATE); + INSERT INTO t (x) VALUES (4); + INSERT INTO t (x) VALUES (5); + + sl3 split -o generations/0000000000000000/wal/0000000000000002 db-wal + + +Finally, obtain the final snapshot: + + PRAGMA wal_checkpoint(TRUNCATE); + + cp db 0000000000000002.db + rm db* + diff --git a/testdata/restore/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 b/testdata/restore/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/restore/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/testdata/restore/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 b/testdata/restore/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 new file mode 100644 index 00000000..37e1dcf9 Binary files /dev/null and b/testdata/restore/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 differ diff --git a/testdata/restore/ok/generations/0000000000000000/wal/0000000000000000/0000000000002050.wal.lz4 b/testdata/restore/ok/generations/0000000000000000/wal/0000000000000000/0000000000002050.wal.lz4 new file mode 100644 index 00000000..3bd7ab70 Binary files /dev/null and b/testdata/restore/ok/generations/0000000000000000/wal/0000000000000000/0000000000002050.wal.lz4 differ diff --git a/testdata/restore/ok/generations/0000000000000000/wal/0000000000000000/0000000000003068.wal.lz4 b/testdata/restore/ok/generations/0000000000000000/wal/0000000000000000/0000000000003068.wal.lz4 new file mode 100644 index 00000000..c73bf2cb Binary files /dev/null and b/testdata/restore/ok/generations/0000000000000000/wal/0000000000000000/0000000000003068.wal.lz4 differ diff --git a/testdata/restore/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 b/testdata/restore/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 new file mode 100644 index 00000000..64a4899b Binary files /dev/null and b/testdata/restore/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 differ diff --git a/testdata/restore/ok/generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 b/testdata/restore/ok/generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 new file mode 100644 index 00000000..2265d0e0 Binary files /dev/null and b/testdata/restore/ok/generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 differ diff --git a/testdata/restore/ok/generations/0000000000000000/wal/0000000000000002/0000000000001038.wal.lz4 b/testdata/restore/ok/generations/0000000000000000/wal/0000000000000002/0000000000001038.wal.lz4 new file mode 100644 index 00000000..c7dc94ff Binary files /dev/null and b/testdata/restore/ok/generations/0000000000000000/wal/0000000000000002/0000000000001038.wal.lz4 differ diff --git a/testdata/restore/snapshot-only/0000000000000000.db b/testdata/restore/snapshot-only/0000000000000000.db new file mode 100644 index 00000000..86bbea7e Binary files /dev/null and b/testdata/restore/snapshot-only/0000000000000000.db differ diff --git a/testdata/restore/snapshot-only/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 b/testdata/restore/snapshot-only/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/restore/snapshot-only/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/testdata/snapshot-index-by-timestamp/no-snapshots/generations/0000000000000000/.gitignore b/testdata/snapshot-index-by-timestamp/no-snapshots/generations/0000000000000000/.gitignore new file mode 100644 index 00000000..e69de29b diff --git a/testdata/snapshot-index-by-timestamp/ok/Makefile b/testdata/snapshot-index-by-timestamp/ok/Makefile new file mode 100644 index 00000000..a11b6dbd --- /dev/null +++ b/testdata/snapshot-index-by-timestamp/ok/Makefile @@ -0,0 +1,5 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001010000 generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 generations/0000000000000000/snapshots/00000000000003e8.snapshot.lz4 + TZ=UTC touch -ct 200001030000 generations/0000000000000000/snapshots/00000000000007d0.snapshot.lz4 diff --git a/testdata/snapshot-index-by-timestamp/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 b/testdata/snapshot-index-by-timestamp/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/snapshot-index-by-timestamp/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/testdata/snapshot-index-by-timestamp/ok/generations/0000000000000000/snapshots/00000000000003e8.snapshot.lz4 b/testdata/snapshot-index-by-timestamp/ok/generations/0000000000000000/snapshots/00000000000003e8.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/snapshot-index-by-timestamp/ok/generations/0000000000000000/snapshots/00000000000003e8.snapshot.lz4 differ diff --git a/testdata/snapshot-index-by-timestamp/ok/generations/0000000000000000/snapshots/00000000000007d0.snapshot.lz4 b/testdata/snapshot-index-by-timestamp/ok/generations/0000000000000000/snapshots/00000000000007d0.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/snapshot-index-by-timestamp/ok/generations/0000000000000000/snapshots/00000000000007d0.snapshot.lz4 differ diff --git a/testdata/snapshot-time-bounds/no-snapshots/generations/0000000000000000/snapshots/.gitignore b/testdata/snapshot-time-bounds/no-snapshots/generations/0000000000000000/snapshots/.gitignore new file mode 100644 index 00000000..e69de29b diff --git a/testdata/snapshot-time-bounds/ok/Makefile b/testdata/snapshot-time-bounds/ok/Makefile new file mode 100644 index 00000000..87751339 --- /dev/null +++ b/testdata/snapshot-time-bounds/ok/Makefile @@ -0,0 +1,6 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001010000 generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 + TZ=UTC touch -ct 200001020000 generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 + TZ=UTC touch -ct 200001030000 generations/0000000000000000/snapshots/0000000000000002.snapshot.lz4 + diff --git a/testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 b/testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/0000000000000000.snapshot.lz4 differ diff --git a/testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 b/testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/0000000000000001.snapshot.lz4 differ diff --git a/testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/0000000000000002.snapshot.lz4 b/testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/0000000000000002.snapshot.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/0000000000000002.snapshot.lz4 differ diff --git a/testdata/wal-downloader/err-download-wal/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 b/testdata/wal-downloader/err-download-wal/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 new file mode 100644 index 00000000..03f56a31 Binary files /dev/null and b/testdata/wal-downloader/err-download-wal/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 differ diff --git a/testdata/wal-downloader/err-read-wal-segment/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 b/testdata/wal-downloader/err-read-wal-segment/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 new file mode 100644 index 00000000..03f56a31 Binary files /dev/null and b/testdata/wal-downloader/err-read-wal-segment/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 differ diff --git a/testdata/wal-downloader/err-write-wal/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 b/testdata/wal-downloader/err-write-wal/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 new file mode 100644 index 00000000..03f56a31 Binary files /dev/null and b/testdata/wal-downloader/err-write-wal/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 differ diff --git a/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 b/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 new file mode 100644 index 00000000..03f56a31 Binary files /dev/null and b/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 differ diff --git a/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/0000000000000000/0000000000002050.wal.lz4 b/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/0000000000000000/0000000000002050.wal.lz4 new file mode 100644 index 00000000..d8c9ab6f Binary files /dev/null and b/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/0000000000000000/0000000000002050.wal.lz4 differ diff --git a/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/0000000000000000/0000000000003068.wal.lz4 b/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/0000000000000000/0000000000003068.wal.lz4 new file mode 100644 index 00000000..16be1892 Binary files /dev/null and b/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/0000000000000000/0000000000003068.wal.lz4 differ diff --git a/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 b/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 new file mode 100644 index 00000000..46d706b5 Binary files /dev/null and b/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 differ diff --git a/testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 b/testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 new file mode 100644 index 00000000..46d706b5 Binary files /dev/null and b/testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 differ diff --git a/testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 b/testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 new file mode 100644 index 00000000..5366ae2a Binary files /dev/null and b/testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 differ diff --git a/testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/0000000000000002/0000000000001038.wal.lz4 b/testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/0000000000000002/0000000000001038.wal.lz4 new file mode 100644 index 00000000..6fdb481a Binary files /dev/null and b/testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/0000000000000002/0000000000001038.wal.lz4 differ diff --git a/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 b/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 new file mode 100644 index 00000000..03f56a31 Binary files /dev/null and b/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 differ diff --git a/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/0000000000000000/0000000000002050.wal.lz4 b/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/0000000000000000/0000000000002050.wal.lz4 new file mode 100644 index 00000000..d8c9ab6f Binary files /dev/null and b/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/0000000000000000/0000000000002050.wal.lz4 differ diff --git a/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/0000000000000000/0000000000003068.wal.lz4 b/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/0000000000000000/0000000000003068.wal.lz4 new file mode 100644 index 00000000..16be1892 Binary files /dev/null and b/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/0000000000000000/0000000000003068.wal.lz4 differ diff --git a/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 b/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 new file mode 100644 index 00000000..5366ae2a Binary files /dev/null and b/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 differ diff --git a/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/0000000000000002/0000000000001038.wal.lz4 b/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/0000000000000002/0000000000001038.wal.lz4 new file mode 100644 index 00000000..6fdb481a Binary files /dev/null and b/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/0000000000000002/0000000000001038.wal.lz4 differ diff --git a/testdata/wal-downloader/missing-offset/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 b/testdata/wal-downloader/missing-offset/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 new file mode 100644 index 00000000..03f56a31 Binary files /dev/null and b/testdata/wal-downloader/missing-offset/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 differ diff --git a/testdata/wal-downloader/missing-offset/generations/0000000000000000/wal/0000000000000000/0000000000003068.wal.lz4 b/testdata/wal-downloader/missing-offset/generations/0000000000000000/wal/0000000000000000/0000000000003068.wal.lz4 new file mode 100644 index 00000000..16be1892 Binary files /dev/null and b/testdata/wal-downloader/missing-offset/generations/0000000000000000/wal/0000000000000000/0000000000003068.wal.lz4 differ diff --git a/testdata/wal-downloader/ok/0000000000000000.wal b/testdata/wal-downloader/ok/0000000000000000.wal new file mode 100644 index 00000000..c04d8f00 Binary files /dev/null and b/testdata/wal-downloader/ok/0000000000000000.wal differ diff --git a/testdata/wal-downloader/ok/0000000000000001.wal b/testdata/wal-downloader/ok/0000000000000001.wal new file mode 100644 index 00000000..1a59daa4 Binary files /dev/null and b/testdata/wal-downloader/ok/0000000000000001.wal differ diff --git a/testdata/wal-downloader/ok/0000000000000002.wal b/testdata/wal-downloader/ok/0000000000000002.wal new file mode 100644 index 00000000..e8bb5264 Binary files /dev/null and b/testdata/wal-downloader/ok/0000000000000002.wal differ diff --git a/testdata/wal-downloader/ok/README b/testdata/wal-downloader/ok/README new file mode 100644 index 00000000..c4e68f69 --- /dev/null +++ b/testdata/wal-downloader/ok/README @@ -0,0 +1,40 @@ +To reproduce this testdata, run sqlite3 and execute: + + PRAGMA journal_mode = WAL; + CREATE TABLE t (x); + INSERT INTO t (x) VALUES (1); + INSERT INTO t (x) VALUES (2); + +And copy & split the WAL into segments: + + sl3 split -o generations/0000000000000000/wal/0000000000000000 db-wal + cp db-wal 0000000000000000.wal + + +Then execute: + + PRAGMA wal_checkpoint(TRUNCATE); + INSERT INTO t (x) VALUES (3); + +And split again: + + sl3 split -o generations/0000000000000000/wal/0000000000000001 db-wal + cp db-wal 0000000000000001.wal + + +Then execute: + + PRAGMA wal_checkpoint(TRUNCATE); + INSERT INTO t (x) VALUES (4); + INSERT INTO t (x) VALUES (5); + +And split again: + + sl3 split -o generations/0000000000000000/wal/0000000000000002 db-wal + cp db-wal 0000000000000002.wal + + +Finally, remove the original database files: + + rm db* + diff --git a/testdata/wal-downloader/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 b/testdata/wal-downloader/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 new file mode 100644 index 00000000..03f56a31 Binary files /dev/null and b/testdata/wal-downloader/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 differ diff --git a/testdata/wal-downloader/ok/generations/0000000000000000/wal/0000000000000000/0000000000002050.wal.lz4 b/testdata/wal-downloader/ok/generations/0000000000000000/wal/0000000000000000/0000000000002050.wal.lz4 new file mode 100644 index 00000000..d8c9ab6f Binary files /dev/null and b/testdata/wal-downloader/ok/generations/0000000000000000/wal/0000000000000000/0000000000002050.wal.lz4 differ diff --git a/testdata/wal-downloader/ok/generations/0000000000000000/wal/0000000000000000/0000000000003068.wal.lz4 b/testdata/wal-downloader/ok/generations/0000000000000000/wal/0000000000000000/0000000000003068.wal.lz4 new file mode 100644 index 00000000..16be1892 Binary files /dev/null and b/testdata/wal-downloader/ok/generations/0000000000000000/wal/0000000000000000/0000000000003068.wal.lz4 differ diff --git a/testdata/wal-downloader/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 b/testdata/wal-downloader/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 new file mode 100644 index 00000000..46d706b5 Binary files /dev/null and b/testdata/wal-downloader/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 differ diff --git a/testdata/wal-downloader/ok/generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 b/testdata/wal-downloader/ok/generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 new file mode 100644 index 00000000..5366ae2a Binary files /dev/null and b/testdata/wal-downloader/ok/generations/0000000000000000/wal/0000000000000002/0000000000000000.wal.lz4 differ diff --git a/testdata/wal-downloader/ok/generations/0000000000000000/wal/0000000000000002/0000000000001038.wal.lz4 b/testdata/wal-downloader/ok/generations/0000000000000000/wal/0000000000000002/0000000000001038.wal.lz4 new file mode 100644 index 00000000..6fdb481a Binary files /dev/null and b/testdata/wal-downloader/ok/generations/0000000000000000/wal/0000000000000002/0000000000001038.wal.lz4 differ diff --git a/testdata/wal-downloader/one/0000000000000000.wal b/testdata/wal-downloader/one/0000000000000000.wal new file mode 100644 index 00000000..c04d8f00 Binary files /dev/null and b/testdata/wal-downloader/one/0000000000000000.wal differ diff --git a/testdata/wal-downloader/one/README b/testdata/wal-downloader/one/README new file mode 100644 index 00000000..bcad8cf8 --- /dev/null +++ b/testdata/wal-downloader/one/README @@ -0,0 +1,17 @@ +To reproduce this testdata, run sqlite3 and execute: + + PRAGMA journal_mode = WAL; + CREATE TABLE t (x); + INSERT INTO t (x) VALUES (1); + INSERT INTO t (x) VALUES (2); + +And copy & split the WAL into segments: + + sl3 split -o generations/0000000000000000/wal/0000000000000000 db-wal + cp db-wal 0000000000000000.wal + + +Finally, remove the original database files: + + rm db* + diff --git a/testdata/wal-downloader/one/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 b/testdata/wal-downloader/one/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 new file mode 100644 index 00000000..03f56a31 Binary files /dev/null and b/testdata/wal-downloader/one/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 differ diff --git a/testdata/wal-downloader/one/generations/0000000000000000/wal/0000000000000000/0000000000002050.wal.lz4 b/testdata/wal-downloader/one/generations/0000000000000000/wal/0000000000000000/0000000000002050.wal.lz4 new file mode 100644 index 00000000..d8c9ab6f Binary files /dev/null and b/testdata/wal-downloader/one/generations/0000000000000000/wal/0000000000000000/0000000000002050.wal.lz4 differ diff --git a/testdata/wal-downloader/one/generations/0000000000000000/wal/0000000000000000/0000000000003068.wal.lz4 b/testdata/wal-downloader/one/generations/0000000000000000/wal/0000000000000000/0000000000003068.wal.lz4 new file mode 100644 index 00000000..16be1892 Binary files /dev/null and b/testdata/wal-downloader/one/generations/0000000000000000/wal/0000000000000000/0000000000003068.wal.lz4 differ diff --git a/testdata/wal-index-by-timestamp/ok/Makefile b/testdata/wal-index-by-timestamp/ok/Makefile new file mode 100644 index 00000000..40d692ff --- /dev/null +++ b/testdata/wal-index-by-timestamp/ok/Makefile @@ -0,0 +1,6 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001010000 generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 + TZ=UTC touch -ct 200001020000 generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 + TZ=UTC touch -ct 200001030000 generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 + diff --git a/testdata/wal-index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 b/testdata/wal-index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/wal-index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 differ diff --git a/testdata/wal-index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 b/testdata/wal-index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/wal-index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000000/0000000000001234.wal.lz4 differ diff --git a/testdata/wal-index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 b/testdata/wal-index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/wal-index-by-timestamp/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 differ diff --git a/testdata/wal-time-bounds/no-wal-segments/generations/0000000000000000/wal/.gitignore b/testdata/wal-time-bounds/no-wal-segments/generations/0000000000000000/wal/.gitignore new file mode 100644 index 00000000..e69de29b diff --git a/testdata/wal-time-bounds/ok/Makefile b/testdata/wal-time-bounds/ok/Makefile new file mode 100644 index 00000000..155d281d --- /dev/null +++ b/testdata/wal-time-bounds/ok/Makefile @@ -0,0 +1,6 @@ +.PHONY: default +default: + TZ=UTC touch -ct 200001010000 generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 + TZ=UTC touch -ct 200001020000 generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 + TZ=UTC touch -ct 200001030000 generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 + diff --git a/testdata/wal-time-bounds/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 b/testdata/wal-time-bounds/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/wal-time-bounds/ok/generations/0000000000000000/wal/0000000000000000/0000000000000000.wal.lz4 differ diff --git a/testdata/wal-time-bounds/ok/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 b/testdata/wal-time-bounds/ok/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/wal-time-bounds/ok/generations/0000000000000000/wal/0000000000000000/0000000000000001.wal.lz4 differ diff --git a/testdata/wal-time-bounds/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 b/testdata/wal-time-bounds/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 new file mode 100644 index 00000000..75363409 Binary files /dev/null and b/testdata/wal-time-bounds/ok/generations/0000000000000000/wal/0000000000000001/0000000000000000.wal.lz4 differ diff --git a/testdata/wal-writer/live/README.md b/testdata/wal-writer/live/README.md new file mode 100644 index 00000000..23874310 --- /dev/null +++ b/testdata/wal-writer/live/README.md @@ -0,0 +1,19 @@ +WAL Writer Live +================= + +This test is to ensure we can copy a WAL file into place with a live DB and +trigger a checkpoint into the main DB file. + +To reproduce the data files: + +```sh +$ sqlite3 db + +sqlite> PRAGMA journal_mode = 'wal'; +sqlite> CREATE TABLE t (x); +sqlite> PRAGMA wal_checkpoint(TRUNCATE); +sqlite> INSERT INTO t (x) VALUES (1); + +sqlite> CTRL-\ +``` + diff --git a/testdata/wal-writer/live/db b/testdata/wal-writer/live/db new file mode 100644 index 00000000..6a634477 Binary files /dev/null and b/testdata/wal-writer/live/db differ diff --git a/testdata/wal-writer/live/db-shm b/testdata/wal-writer/live/db-shm new file mode 100644 index 00000000..1d5fdd8e Binary files /dev/null and b/testdata/wal-writer/live/db-shm differ diff --git a/testdata/wal-writer/live/db-wal b/testdata/wal-writer/live/db-wal new file mode 100644 index 00000000..43300fc8 Binary files /dev/null and b/testdata/wal-writer/live/db-wal differ diff --git a/testdata/wal-writer/static/README.md b/testdata/wal-writer/static/README.md new file mode 100644 index 00000000..99ffadb8 --- /dev/null +++ b/testdata/wal-writer/static/README.md @@ -0,0 +1,26 @@ +WAL Writer Static +================= + +This test is to ensure that WALWriter will generate the same WAL file as +the `sqlite3` command line. + +To reproduce the data file: + +```sh +$ sqlite3 db + +sqlite> PRAGMA journal_mode = 'wal'; + +sqlite> CREATE TABLE t (x); + +sqlite> INSERT INTO t (x) VALUES (1); + +sqlite> CTRL-\ +``` + +then remove the db & shm files: + +```sh +$ rm db db-shm +``` + diff --git a/testdata/wal-writer/static/db-wal b/testdata/wal-writer/static/db-wal new file mode 100644 index 00000000..5cac19ea Binary files /dev/null and b/testdata/wal-writer/static/db-wal differ diff --git a/wal_downloader.go b/wal_downloader.go new file mode 100644 index 00000000..d051e170 --- /dev/null +++ b/wal_downloader.go @@ -0,0 +1,335 @@ +package litestream + +import ( + "context" + "fmt" + "io" + "os" + "sync" + + "github.com/benbjohnson/litestream/internal" + "github.com/pierrec/lz4/v4" + "golang.org/x/sync/errgroup" +) + +// WALDownloader represents a parallel downloader of WAL files from a replica client. +// +// It works on a per-index level so WAL files are always downloaded in their +// entirety and are not segmented. WAL files are downloaded from minIndex to +// maxIndex, inclusively, and are written to a path prefix. WAL files are named +// with the prefix and suffixed with the WAL index. It is the responsibility of +// the caller to clean up these WAL files. +// +// The purpose of the parallelization is that RTT & WAL apply time can consume +// much of the restore time so it's useful to download multiple WAL files in +// the background to minimize the latency. While some WAL indexes may be +// downloaded out of order, the WALDownloader ensures that Next() always +// returns the WAL files sequentially. +type WALDownloader struct { + ctx context.Context // context used for early close/cancellation + cancel func() + + client ReplicaClient // client to read WAL segments with + generation string // generation to download WAL files from + minIndex int // starting WAL index (inclusive) + maxIndex int // ending WAL index (inclusive) + prefix string // output file prefix + + err error // error occuring during init, propagated to Next() + n int // number of WAL files returned by Next() + + // Concurrency coordination + mu sync.Mutex // used to serialize sending of next WAL index + cond *sync.Cond // used with mu above + g *errgroup.Group // manages worker goroutines for downloading + input chan walDownloadInput // holds ordered WAL indices w/ offsets + output chan walDownloadOutput // always sends next sequential WAL; used by Next() + nextIndex int // tracks next WAL index to send to output channel + + // File info used for downloaded WAL files. + Mode os.FileMode + Uid, Gid int + + // Number of downloads occurring in parallel. + Parallelism int +} + +// NewWALDownloader returns a new instance of WALDownloader. +func NewWALDownloader(client ReplicaClient, prefix string, generation string, minIndex, maxIndex int) *WALDownloader { + d := &WALDownloader{ + client: client, + prefix: prefix, + generation: generation, + minIndex: minIndex, + maxIndex: maxIndex, + + Mode: 0600, + Parallelism: 1, + } + + d.ctx, d.cancel = context.WithCancel(context.Background()) + d.cond = sync.NewCond(&d.mu) + + return d +} + +// Close cancels all downloads and returns any error that has occurred. +func (d *WALDownloader) Close() (err error) { + if d.err != nil { + err = d.err + } + + d.cancel() + + if d.g != nil { + if e := d.g.Wait(); err != nil && e != context.Canceled { + err = e + } + } + return err +} + +// init initializes the downloader on the first invocation only. It generates +// the input channel with all WAL indices & offsets needed, it initializes +// the output channel that Next() waits on, and starts the worker goroutines +// that begin downloading WAL files in the background. +func (d *WALDownloader) init(ctx context.Context) error { + if d.input != nil { + return nil // already initialized + } else if d.minIndex < 0 { + return fmt.Errorf("minimum index required") + } else if d.maxIndex < 0 { + return fmt.Errorf("maximum index required") + } else if d.maxIndex < d.minIndex { + return fmt.Errorf("minimum index cannot be larger than maximum index") + } else if d.Parallelism < 1 { + return fmt.Errorf("parallelism must be at least one") + } + + // Populate input channel with indices & offsets. + if err := d.initInputCh(ctx); err != nil { + return err + } + d.nextIndex = d.minIndex + + // Generate output channel that Next() pulls from. + d.output = make(chan walDownloadOutput) + + // Spawn worker goroutines to download WALs. + d.g, d.ctx = errgroup.WithContext(d.ctx) + for i := 0; i < d.Parallelism; i++ { + d.g.Go(func() error { return d.downloader(d.ctx) }) + } + + return nil +} + +// initInputCh populates the input channel with each WAL index between minIndex +// and maxIndex. It also includes all offsets needed with the index. +func (d *WALDownloader) initInputCh(ctx context.Context) error { + itr, err := d.client.WALSegments(ctx, d.generation) + if err != nil { + return fmt.Errorf("wal segments: %w", err) + } + defer func() { _ = itr.Close() }() + + d.input = make(chan walDownloadInput, d.maxIndex-d.minIndex+1) + defer close(d.input) + + index := d.minIndex - 1 + var offsets []int64 + for itr.Next() { + info := itr.WALSegment() + + // Restrict segments to within our index range. + if info.Index < d.minIndex { + continue // haven't reached minimum index, skip + } else if info.Index > d.maxIndex { + break // after max index, stop + } + + // Flush index & offsets when index changes. + if info.Index != index { + if info.Index != index+1 { // must be sequential + return &WALNotFoundError{Generation: d.generation, Index: index + 1} + } + + if len(offsets) > 0 { + d.input <- walDownloadInput{index: index, offsets: offsets} + offsets = make([]int64, 0) + } + + index = info.Index + } + + // Append to the end of the WAL file. + offsets = append(offsets, info.Offset) + } + + // Ensure we read to the last index. + if index != d.maxIndex { + return &WALNotFoundError{Generation: d.generation, Index: index + 1} + } + + // Flush if we have remaining offsets. + if len(offsets) > 0 { + d.input <- walDownloadInput{index: index, offsets: offsets} + } + + return itr.Close() +} + +// N returns the number of WAL files returned by Next(). +func (d *WALDownloader) N() int { return d.n } + +// Next returns the index & local file path of the next downloaded WAL file. +func (d *WALDownloader) Next(ctx context.Context) (int, string, error) { + if d.err != nil { + return 0, "", d.err + } else if d.err = d.init(ctx); d.err != nil { + return 0, "", d.err + } + + select { + case <-ctx.Done(): + return 0, "", ctx.Err() + case <-d.ctx.Done(): + return 0, "", d.ctx.Err() + case v, ok := <-d.output: + if !ok { + return 0, "", io.EOF + } + + d.n++ + return v.index, v.path, v.err + } +} + +// downloader runs in a separate goroutine and downloads the next input index. +func (d *WALDownloader) downloader(ctx context.Context) error { + for { + select { + case <-ctx.Done(): + d.cond.Broadcast() + return ctx.Err() + + case input, ok := <-d.input: + if !ok { + return nil // no more input + } + + // Wait until next index equals input index and then send file to + // output to ensure sorted order. + if err := func() error { + walPath, err := d.downloadWAL(ctx, input.index, input.offsets) + + d.mu.Lock() + defer d.mu.Unlock() + + // Notify other downloader goroutines when we escape this + // anonymous function. + defer d.cond.Broadcast() + + // Keep looping until our index matches the next index to send. + for d.nextIndex != input.index { + if ctxErr := ctx.Err(); ctxErr != nil { + return ctxErr + } + d.cond.Wait() + } + + // Still under lock, wait until Next() requests next index. + select { + case <-ctx.Done(): + return ctx.Err() + + case d.output <- walDownloadOutput{ + index: input.index, + path: walPath, + err: err, + }: + // At the last index, close out output channel to notify + // the Next() method to return io.EOF. + if d.nextIndex == d.maxIndex { + close(d.output) + return nil + } + + // Update next expected index now that our send is successful. + d.nextIndex++ + } + + return err + }(); err != nil { + return err + } + } + } +} + +// downloadWAL sequentially downloads all the segments for WAL index from the +// replica client and appends them to a single on-disk file. Returns the name +// of the on-disk file on success. +func (d *WALDownloader) downloadWAL(ctx context.Context, index int, offsets []int64) (string, error) { + // Open handle to destination WAL path. + walPath := fmt.Sprintf("%s-%s-wal", d.prefix, FormatIndex(index)) + f, err := internal.CreateFile(walPath, d.Mode, d.Uid, d.Gid) + if err != nil { + return "", err + } + defer f.Close() + + // Open readers for every segment in the WAL file, in order. + var written int64 + for _, offset := range offsets { + if err := func() error { + // Ensure next offset is our current position in the file. + if written != offset { + return fmt.Errorf("missing WAL offset: generation=%s index=%s offset=%s", d.generation, FormatIndex(index), FormatOffset(written)) + } + + rd, err := d.client.WALSegmentReader(ctx, Pos{Generation: d.generation, Index: index, Offset: offset}) + if err != nil { + return fmt.Errorf("read WAL segment: %w", err) + } + defer rd.Close() + + n, err := io.Copy(f, lz4.NewReader(rd)) + if err != nil { + return fmt.Errorf("copy WAL segment: %w", err) + } + written += n + + return nil + }(); err != nil { + return "", err + } + } + + if err := f.Close(); err != nil { + return "", err + } + return walPath, nil +} + +type walDownloadInput struct { + index int + offsets []int64 +} + +type walDownloadOutput struct { + path string + index int + err error +} + +// WALNotFoundError is returned by WALDownloader if an WAL index is not found. +type WALNotFoundError struct { + Generation string + Index int +} + +// Error returns the error string. +func (e *WALNotFoundError) Error() string { + return fmt.Sprintf("wal not found: generation=%s index=%s", e.Generation, FormatIndex(e.Index)) +} diff --git a/wal_downloader_test.go b/wal_downloader_test.go new file mode 100644 index 00000000..f467a2e6 --- /dev/null +++ b/wal_downloader_test.go @@ -0,0 +1,535 @@ +package litestream_test + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/benbjohnson/litestream" + "github.com/benbjohnson/litestream/internal/testingutil" + "github.com/benbjohnson/litestream/mock" +) + +// TestWALDownloader runs downloader tests against different levels of parallelism. +func TestWALDownloader(t *testing.T) { + for _, parallelism := range []int{1, 8, 1024} { + t.Run(fmt.Sprint(parallelism), func(t *testing.T) { + testWALDownloader(t, parallelism) + }) + } +} + +func testWALDownloader(t *testing.T, parallelism int) { + // Ensure WAL files can be downloaded from file replica on disk. + t.Run("OK", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "ok") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 0, 2) + defer d.Close() + + if index, filename, err := d.Next(context.Background()); err != nil { + t.Fatal(err) + } else if got, want := index, 0; got != want { + t.Fatalf("index=%d, want %d", got, want) + } else if !fileEqual(t, filepath.Join(testDir, "0000000000000000.wal"), filename) { + t.Fatalf("output file mismatch: %s", filename) + } + + if index, filename, err := d.Next(context.Background()); err != nil { + t.Fatal(err) + } else if got, want := index, 1; got != want { + t.Fatalf("index=%d, want %d", got, want) + } else if !fileEqual(t, filepath.Join(testDir, "0000000000000001.wal"), filename) { + t.Fatalf("output file mismatch: %s", filename) + } + + if index, filename, err := d.Next(context.Background()); err != nil { + t.Fatal(err) + } else if got, want := index, 2; got != want { + t.Fatalf("index=%d, want %d", got, want) + } else if !fileEqual(t, filepath.Join(testDir, "0000000000000002.wal"), filename) { + t.Fatalf("output file mismatch: %s", filename) + } + + if _, _, err := d.Next(context.Background()); err != io.EOF { + t.Fatalf("unexpected error: %#v", err) + } else if got, want := d.N(), 3; got != want { + t.Fatalf("N=%d, want %d", got, want) + } + + if err := d.Close(); err != nil { + t.Fatal(err) + } + }) + + // Ensure a single WAL index can be downloaded. + t.Run("One", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "one") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 0, 0) + defer d.Close() + + if index, filename, err := d.Next(context.Background()); err != nil { + t.Fatal(err) + } else if got, want := index, 0; got != want { + t.Fatalf("index=%d, want %d", got, want) + } else if !fileEqual(t, filepath.Join(testDir, "0000000000000000.wal"), filename) { + t.Fatalf("output file mismatch: %s", filename) + } + + if _, _, err := d.Next(context.Background()); err != io.EOF { + t.Fatalf("unexpected error: %#v", err) + } else if err := d.Close(); err != nil { + t.Fatal(err) + } + }) + + // Ensure a subset of WAL indexes can be downloaded. + t.Run("Slice", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "ok") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 1, 1) + defer d.Close() + + if index, filename, err := d.Next(context.Background()); err != nil { + t.Fatal(err) + } else if got, want := index, 1; got != want { + t.Fatalf("index=%d, want %d", got, want) + } else if !fileEqual(t, filepath.Join(testDir, "0000000000000001.wal"), filename) { + t.Fatalf("output file mismatch: %s", filename) + } + + if _, _, err := d.Next(context.Background()); err != io.EOF { + t.Fatalf("unexpected error: %#v", err) + } else if err := d.Close(); err != nil { + t.Fatal(err) + } + }) + + // Ensure a subset of WAL indexes can be downloaded starting from zero. + t.Run("SliceLeft", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "ok") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 0, 1) + defer d.Close() + + if index, filename, err := d.Next(context.Background()); err != nil { + t.Fatal(err) + } else if got, want := index, 0; got != want { + t.Fatalf("index=%d, want %d", got, want) + } else if !fileEqual(t, filepath.Join(testDir, "0000000000000000.wal"), filename) { + t.Fatalf("output file mismatch: %s", filename) + } + + if index, filename, err := d.Next(context.Background()); err != nil { + t.Fatal(err) + } else if got, want := index, 1; got != want { + t.Fatalf("index=%d, want %d", got, want) + } else if !fileEqual(t, filepath.Join(testDir, "0000000000000001.wal"), filename) { + t.Fatalf("output file mismatch: %s", filename) + } + + if _, _, err := d.Next(context.Background()); err != io.EOF { + t.Fatalf("unexpected error: %#v", err) + } else if err := d.Close(); err != nil { + t.Fatal(err) + } + }) + + // Ensure a subset of WAL indexes can be downloaded ending at the last index. + t.Run("SliceRight", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "ok") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 1, 2) + defer d.Close() + + if index, filename, err := d.Next(context.Background()); err != nil { + t.Fatal(err) + } else if got, want := index, 1; got != want { + t.Fatalf("index=%d, want %d", got, want) + } else if !fileEqual(t, filepath.Join(testDir, "0000000000000001.wal"), filename) { + t.Fatalf("output file mismatch: %s", filename) + } + + if index, filename, err := d.Next(context.Background()); err != nil { + t.Fatal(err) + } else if got, want := index, 2; got != want { + t.Fatalf("index=%d, want %d", got, want) + } else if !fileEqual(t, filepath.Join(testDir, "0000000000000002.wal"), filename) { + t.Fatalf("output file mismatch: %s", filename) + } + + if _, _, err := d.Next(context.Background()); err != io.EOF { + t.Fatalf("unexpected error: %#v", err) + } else if err := d.Close(); err != nil { + t.Fatal(err) + } + }) + + // Ensure a large, generated set of WAL files can be downloaded in the correct order. + t.Run("Large", func(t *testing.T) { + if testing.Short() { + t.Skip("short mode, skipping") + } + + // Generate WAL files. + const n = 1000 + tempDir := t.TempDir() + for i := 0; i < n; i++ { + filename := filepath.Join(tempDir, "generations", "0000000000000000", "wal", litestream.FormatIndex(i), "0000000000000000.wal.lz4") + if err := os.MkdirAll(filepath.Dir(filename), 0777); err != nil { + t.Fatal(err) + } else if err := os.WriteFile(filename, testingutil.CompressLZ4(t, []byte(fmt.Sprint(i))), 0666); err != nil { + t.Fatal(err) + } + } + + client := litestream.NewFileReplicaClient(tempDir) + d := litestream.NewWALDownloader(client, filepath.Join(t.TempDir(), "wal"), "0000000000000000", 0, n-1) + d.Parallelism = parallelism + defer d.Close() + + for i := 0; i < n; i++ { + if index, filename, err := d.Next(context.Background()); err != nil { + t.Fatal(err) + } else if got, want := index, i; got != want { + t.Fatalf("index[%d]=%d, want %d", i, got, want) + } else if buf, err := os.ReadFile(filename); err != nil { + t.Fatal(err) + } else if got, want := fmt.Sprint(i), string(buf); got != want { + t.Fatalf("file[%d]=%q, want %q", i, got, want) + } + } + + if _, _, err := d.Next(context.Background()); err != io.EOF { + t.Fatalf("unexpected error: %#v", err) + } else if err := d.Close(); err != nil { + t.Fatal(err) + } + }) + + // Ensure a non-existent WAL directory returns error. + t.Run("ErrEmptyGenerationDir", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "empty-generation-dir") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 0, 0) + defer d.Close() + + var e *litestream.WALNotFoundError + if _, _, err := d.Next(context.Background()); !errors.As(err, &e) { + t.Fatalf("unexpected error type: %#v", err) + } else if *e != (litestream.WALNotFoundError{Generation: "0000000000000000", Index: 0}) { + t.Fatalf("unexpected error: %#v", err) + } else if got, want := d.N(), 0; got != want { + t.Fatalf("N=%d, want %d", got, want) + } + + // Reinvoking Next() should return the same error. + if _, _, err := d.Next(context.Background()); !errors.As(err, &e) { + t.Fatalf("unexpected error type: %#v", err) + } else if *e != (litestream.WALNotFoundError{Generation: "0000000000000000", Index: 0}) { + t.Fatalf("unexpected error: %#v", err) + } + + // Close should return the same error. + if err := d.Close(); !errors.As(err, &e) { + t.Fatalf("unexpected error type: %#v", err) + } else if *e != (litestream.WALNotFoundError{Generation: "0000000000000000", Index: 0}) { + t.Fatalf("unexpected error: %#v", err) + } + }) + + // Ensure an empty WAL directory returns error. + t.Run("EmptyWALDir", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "empty-wal-dir") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 0, 0) + defer d.Close() + + var e *litestream.WALNotFoundError + if _, _, err := d.Next(context.Background()); !errors.As(err, &e) { + t.Fatalf("unexpected error type: %#v", err) + } else if *e != (litestream.WALNotFoundError{Generation: "0000000000000000", Index: 0}) { + t.Fatalf("unexpected error: %#v", err) + } else if got, want := d.N(), 0; got != want { + t.Fatalf("N=%d, want %d", got, want) + } + }) + + // Ensure an empty WAL index directory returns EOF. + t.Run("EmptyWALIndexDir", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "empty-wal-index-dir") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 0, 0) + defer d.Close() + + var e *litestream.WALNotFoundError + if _, _, err := d.Next(context.Background()); !errors.As(err, &e) { + t.Fatalf("unexpected error type: %#v", err) + } else if *e != (litestream.WALNotFoundError{Generation: "0000000000000000", Index: 0}) { + t.Fatalf("unexpected error: %#v", err) + } else if got, want := d.N(), 0; got != want { + t.Fatalf("N=%d, want %d", got, want) + } + }) + + // Ensure closing downloader before calling Next() does not panic. + t.Run("CloseWithoutNext", func(t *testing.T) { + client := litestream.NewFileReplicaClient(t.TempDir()) + d := litestream.NewWALDownloader(client, filepath.Join(t.TempDir(), "wal"), "0000000000000000", 0, 2) + if err := d.Close(); err != nil { + t.Fatal(err) + } + }) + + // Ensure downloader closes successfully if invoked after Next() but before last index. + t.Run("CloseEarly", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "ok") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 0, 2) + defer d.Close() + + if index, filename, err := d.Next(context.Background()); err != nil { + t.Fatal(err) + } else if got, want := index, 0; got != want { + t.Fatalf("index=%d, want %d", got, want) + } else if !fileEqual(t, filepath.Join(testDir, "0000000000000000.wal"), filename) { + t.Fatalf("output file mismatch: %s", filename) + } + + if err := d.Close(); err != nil { + t.Fatal(err) + } + + if _, _, err := d.Next(context.Background()); err == nil { + t.Fatal("expected error") + } + }) + + // Ensure downloader without a minimum index returns an error. + t.Run("ErrMinIndexRequired", func(t *testing.T) { + d := litestream.NewWALDownloader(litestream.NewFileReplicaClient(t.TempDir()), t.TempDir(), "0000000000000000", -1, 2) + defer d.Close() + if _, _, err := d.Next(context.Background()); err == nil || err.Error() != `minimum index required` { + t.Fatalf("unexpected error: %#v", err) + } + }) + + // Ensure downloader without a maximum index returns an error. + t.Run("ErrMinIndexRequired", func(t *testing.T) { + d := litestream.NewWALDownloader(litestream.NewFileReplicaClient(t.TempDir()), t.TempDir(), "0000000000000000", 1, -1) + defer d.Close() + if _, _, err := d.Next(context.Background()); err == nil || err.Error() != `maximum index required` { + t.Fatalf("unexpected error: %#v", err) + } + }) + + // Ensure downloader with invalid min/max indexes returns an error. + t.Run("ErrMinIndexTooLarge", func(t *testing.T) { + d := litestream.NewWALDownloader(litestream.NewFileReplicaClient(t.TempDir()), t.TempDir(), "0000000000000000", 2, 1) + defer d.Close() + if _, _, err := d.Next(context.Background()); err == nil || err.Error() != `minimum index cannot be larger than maximum index` { + t.Fatalf("unexpected error: %#v", err) + } + }) + + // Ensure downloader returns error if parallelism field is invalid. + t.Run("ErrParallelismRequired", func(t *testing.T) { + d := litestream.NewWALDownloader(litestream.NewFileReplicaClient(t.TempDir()), t.TempDir(), "0000000000000000", 0, 0) + d.Parallelism = -1 + defer d.Close() + if _, _, err := d.Next(context.Background()); err == nil || err.Error() != `parallelism must be at least one` { + t.Fatalf("unexpected error: %#v", err) + } + }) + + // Ensure a missing index at the beginning returns an error. + t.Run("ErrMissingInitialIndex", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "missing-initial-index") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 0, 2) + defer d.Close() + + var e *litestream.WALNotFoundError + if _, _, err := d.Next(context.Background()); !errors.As(err, &e) { + t.Fatalf("unexpected error type: %#v", err) + } else if *e != (litestream.WALNotFoundError{Generation: "0000000000000000", Index: 0}) { + t.Fatalf("unexpected error: %#v", err) + } + }) + + // Ensure a gap in indices returns an error. + t.Run("ErrMissingMiddleIndex", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "missing-middle-index") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 0, 2) + defer d.Close() + + var e *litestream.WALNotFoundError + if _, _, err := d.Next(context.Background()); !errors.As(err, &e) { + t.Fatalf("unexpected error type: %#v", err) + } else if *e != (litestream.WALNotFoundError{Generation: "0000000000000000", Index: 1}) { + t.Fatalf("unexpected error: %#v", err) + } + }) + + // Ensure a missing index at the end returns an error. + t.Run("ErrMissingEndingIndex", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "missing-ending-index") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 0, 2) + defer d.Close() + + var e *litestream.WALNotFoundError + if _, _, err := d.Next(context.Background()); !errors.As(err, &e) { + t.Fatalf("unexpected error type: %#v", err) + } else if *e != (litestream.WALNotFoundError{Generation: "0000000000000000", Index: 2}) { + t.Fatalf("unexpected error: %#v", err) + } + }) + + // Ensure downloader returns error WAL segment iterator creation returns error. + t.Run("ErrWALSegments", func(t *testing.T) { + var client mock.ReplicaClient + client.WALSegmentsFunc = func(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) { + return nil, errors.New("marker") + } + + d := litestream.NewWALDownloader(&client, filepath.Join(t.TempDir(), "wal"), "0000000000000000", 0, 2) + defer d.Close() + + if _, _, err := d.Next(context.Background()); err == nil || err.Error() != `wal segments: marker` { + t.Fatalf("unexpected error: %#v", err) + } + }) + + // Ensure downloader returns error if WAL segments have a gap in offsets. + t.Run("ErrMissingOffset", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "missing-offset") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 0, 0) + defer d.Close() + + if _, _, err := d.Next(context.Background()); err == nil || err.Error() != `missing WAL offset: generation=0000000000000000 index=0000000000000000 offset=0000000000002050` { + t.Fatal(err) + } else if err := d.Close(); err != nil { + t.Fatal(err) + } + }) + + // Ensure downloader returns error if context is canceled. + t.Run("ErrContextCanceled", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "wal-downloader", "ok")) + d := litestream.NewWALDownloader(client, filepath.Join(t.TempDir(), "wal"), "0000000000000000", 0, 2) + defer d.Close() + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + if _, _, err := d.Next(ctx); err != context.Canceled { + t.Fatal(err) + } else if err := d.Close(); err != nil { + t.Fatal(err) + } + }) + + // Ensure downloader returns error if error occurs while writing WAL to disk. + t.Run("ErrWriteWAL", func(t *testing.T) { + // Create a subdirectory that is not writable. + tempDir := t.TempDir() + if err := os.Mkdir(filepath.Join(tempDir, "nowrite"), 0000); err != nil { + t.Fatal(err) + } + + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "wal-downloader", "err-write-wal")) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "nowrite", "wal"), "0000000000000000", 0, 0) + defer d.Close() + + if _, _, err := d.Next(context.Background()); err == nil || !strings.Contains(err.Error(), `permission denied`) { + t.Fatal(err) + } else if err := d.Close(); err != nil { + t.Fatal(err) + } + }) + + // Ensure downloader returns error if error occurs while downloading WAL. + t.Run("ErrDownloadWAL", func(t *testing.T) { + fileClient := litestream.NewFileReplicaClient(filepath.Join("testdata", "wal-downloader", "err-download-wal")) + + var client mock.ReplicaClient + client.WALSegmentsFunc = fileClient.WALSegments + client.WALSegmentReaderFunc = func(ctx context.Context, pos litestream.Pos) (io.ReadCloser, error) { + return nil, fmt.Errorf("marker") + } + + d := litestream.NewWALDownloader(&client, filepath.Join(t.TempDir(), "wal"), "0000000000000000", 0, 0) + defer d.Close() + + if _, _, err := d.Next(context.Background()); err == nil || err.Error() != `read WAL segment: marker` { + t.Fatalf("unexpected error: %#v", err) + } else if err := d.Close(); err != nil { + t.Fatal(err) + } + }) + + // Ensure downloader returns error if reading the segment fails. + t.Run("ErrReadWALSegment", func(t *testing.T) { + fileClient := litestream.NewFileReplicaClient(filepath.Join("testdata", "wal-downloader", "err-read-wal-segment")) + + var client mock.ReplicaClient + client.WALSegmentsFunc = fileClient.WALSegments + client.WALSegmentReaderFunc = func(ctx context.Context, pos litestream.Pos) (io.ReadCloser, error) { + var rc mock.ReadCloser + rc.ReadFunc = func([]byte) (int, error) { return 0, errors.New("marker") } + rc.CloseFunc = func() error { return nil } + return &rc, nil + } + + d := litestream.NewWALDownloader(&client, filepath.Join(t.TempDir(), "wal"), "0000000000000000", 0, 0) + defer d.Close() + + if _, _, err := d.Next(context.Background()); err == nil || err.Error() != `copy WAL segment: marker` { + t.Fatalf("unexpected error: %#v", err) + } else if err := d.Close(); err != nil { + t.Fatal(err) + } + }) +} + +func TestWALNotFoundError(t *testing.T) { + err := &litestream.WALNotFoundError{Generation: "0123456789abcdef", Index: 1000} + if got, want := err.Error(), `wal not found: generation=0123456789abcdef index=00000000000003e8`; got != want { + t.Fatalf("Error()=%q, want %q", got, want) + } +} diff --git a/wal_writer.go b/wal_writer.go new file mode 100644 index 00000000..0cd90c58 --- /dev/null +++ b/wal_writer.go @@ -0,0 +1,103 @@ +package litestream + +import ( + "encoding/binary" + "fmt" + "os" +) + +// WALWriter represents a writer to a SQLite WAL file. +type WALWriter struct { + path string + mode os.FileMode + pageSize int + + f *os.File // WAL file handle + buf []byte // frame buffer + + chksum0, chksum1 uint32 // ongoing checksum + + Salt0, Salt1 uint32 +} + +// NewWALWriter returns a new instance of WALWriter. +func NewWALWriter(path string, mode os.FileMode, pageSize int) *WALWriter { + return &WALWriter{ + path: path, + mode: mode, + pageSize: pageSize, + + buf: make([]byte, WALFrameHeaderSize+pageSize), + } +} + +// Open opens the file handle to the WAL file. +func (w *WALWriter) Open() (err error) { + w.f, err = os.OpenFile(w.path, os.O_WRONLY|os.O_TRUNC, w.mode) + return err +} + +// Close closes the file handle to the WAL file. +func (w *WALWriter) Close() error { + if w.f == nil { + return nil + } + return w.f.Close() +} + +// WriteHeader writes the WAL header to the beginning of the file. +func (w *WALWriter) WriteHeader() error { + // Build WAL header byte slice. Page size and checksum set afterward. + hdr := []byte{ + 0x37, 0x7f, 0x06, 0x82, // magic (little-endian) + 0x00, 0x2d, 0xe2, 0x18, // file format version (3007000) + 0x00, 0x00, 0x00, 0x00, // page size + 0x00, 0x00, 0x00, 0x00, // checkpoint sequence number + 0x00, 0x00, 0x00, 0x00, // salt + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, // checksum + 0x00, 0x00, 0x00, 0x00, + } + + // Set page size on header + binary.BigEndian.PutUint32(hdr[8:], uint32(w.pageSize)) + + // Set salt + binary.BigEndian.PutUint32(hdr[16:], w.Salt0) + binary.BigEndian.PutUint32(hdr[20:], w.Salt1) + + // Compute header checksum. + w.chksum0, w.chksum1 = Checksum(binary.LittleEndian, w.chksum0, w.chksum1, hdr[:24]) + binary.BigEndian.PutUint32(hdr[24:], w.chksum0) + binary.BigEndian.PutUint32(hdr[28:], w.chksum1) + + // Write header to WAL. + _, err := w.f.Write(hdr) + return err +} + +func (w *WALWriter) WriteFrame(pgno, commit uint32, data []byte) error { + // Ensure data matches page size. + if len(data) != w.pageSize { + return fmt.Errorf("data size %d must match page size %d", len(data), w.pageSize) + } + + // Write frame header. + binary.BigEndian.PutUint32(w.buf[0:], pgno) // page number + binary.BigEndian.PutUint32(w.buf[4:], commit) // commit record (page count) + binary.BigEndian.PutUint32(w.buf[8:], w.Salt0) // salt + binary.BigEndian.PutUint32(w.buf[12:], w.Salt1) + + // Copy data to frame. + copy(w.buf[WALFrameHeaderSize:], data) + + // Compute checksum for frame. + w.chksum0, w.chksum1 = Checksum(binary.LittleEndian, w.chksum0, w.chksum1, w.buf[:8]) + w.chksum0, w.chksum1 = Checksum(binary.LittleEndian, w.chksum0, w.chksum1, w.buf[24:]) + binary.BigEndian.PutUint32(w.buf[16:], w.chksum0) + binary.BigEndian.PutUint32(w.buf[20:], w.chksum1) + + // Write to local WAL + _, err := w.f.Write(w.buf) + return err +} diff --git a/wal_writer_test.go b/wal_writer_test.go new file mode 100644 index 00000000..8a38085d --- /dev/null +++ b/wal_writer_test.go @@ -0,0 +1,116 @@ +package litestream_test + +import ( + "bytes" + "database/sql" + "encoding/binary" + "io" + "os" + "path/filepath" + "testing" + + "github.com/benbjohnson/litestream" + "github.com/benbjohnson/litestream/internal/testingutil" + _ "github.com/mattn/go-sqlite3" +) + +func TestWALWriter_Static(t *testing.T) { + testDir := filepath.Join("testdata", "wal-writer", "static") + tempDir := t.TempDir() + + // Read in WAL file generated by sqlite3 + buf, err := os.ReadFile(filepath.Join(testDir, "db-wal")) + if err != nil { + t.Fatal(err) + } + + // Create new WAL file. + if err := os.WriteFile(filepath.Join(tempDir, "db-wal"), nil, 0666); err != nil { + t.Fatal(err) + } + + w := litestream.NewWALWriter(filepath.Join(tempDir, "db-wal"), 0666, 4096) + w.Salt0 = binary.BigEndian.Uint32(buf[16:]) + w.Salt1 = binary.BigEndian.Uint32(buf[20:]) + + if err := w.Open(); err != nil { + t.Fatal(err) + } else if err := w.WriteHeader(); err != nil { + t.Fatal(err) + } + + for b := buf[litestream.WALHeaderSize:]; len(b) > 0; b = b[litestream.WALFrameHeaderSize+4096:] { + pgno := binary.BigEndian.Uint32(b[0:]) + commit := binary.BigEndian.Uint32(b[4:]) + if err := w.WriteFrame(pgno, commit, b[litestream.WALFrameHeaderSize:][:4096]); err != nil { + t.Fatal(err) + } + } + + if err := w.Close(); err != nil { + t.Fatal(err) + } + + // Read generated WAL and compare with original. + if buf2, err := os.ReadFile(filepath.Join(tempDir, "db-wal")); err != nil { + t.Fatal(err) + } else if !bytes.Equal(buf, buf2) { + t.Fatal("wal file mismatch") + } +} + +func TestWALWriter_Live(t *testing.T) { + testDir := filepath.Join("testdata", "wal-writer", "live") + tempDir := t.TempDir() + + // Copy DB file into temporary dir. + testingutil.CopyFile(t, filepath.Join(testDir, "db"), filepath.Join(tempDir, "db")) + + // Open database. + db, err := sql.Open("sqlite3", filepath.Join(tempDir, "db")) + if err != nil { + t.Fatal(err) + } + defer db.Close() + + // Verify that table is empty. + var n int + if err := db.QueryRow(`SELECT COUNT(*) FROM t`).Scan(&n); err != nil { + t.Fatal(err) + } else if got, want := n, 0; got != want { + t.Fatalf("init: n=%d, want %d", got, want) + } + + // Copy WAL file into place. + testingutil.CopyFile(t, filepath.Join(testDir, "db-wal"), filepath.Join(tempDir, "db-wal")) + + // Invalidate both copies of the WAL index headers. + f, err := os.OpenFile(filepath.Join(tempDir, "db-shm"), os.O_RDWR, 0666) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + // Read index header. + idx := make([]byte, 136) + if _, err := io.ReadFull(f, idx); err != nil { + t.Fatal(err) + } + + // Invalidate "isInit" flags + idx[12], idx[48+12] = 0, 0 + + // Write header back into index. + if _, err := f.Seek(0, io.SeekStart); err != nil { + t.Fatal(err) + } else if _, err := f.Write(idx); err != nil { + t.Fatal(err) + } + + // Verify that table now has one row. + if err := db.QueryRow(`SELECT COUNT(*) FROM t`).Scan(&n); err != nil { + t.Fatal(err) + } else if got, want := n, 1; got != want { + t.Fatalf("post-wal: n=%d, want %d", got, want) + } +}