diff --git a/.codecov.yml b/.codecov.yml index 58e1eb75695..99f65b0154a 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -21,7 +21,9 @@ coverage: project: default: enabled: yes - target: 95% + # Temporarily lower threshold below 95% + # Tacked in https://github.com/jaegertracing/jaeger/issues/5194 + target: 94.4% patch: default: enabled: yes diff --git a/.github/actions/block-pr-not-on-main/action.yml b/.github/actions/block-pr-not-on-main/action.yml new file mode 100644 index 00000000000..d37fb92990c --- /dev/null +++ b/.github/actions/block-pr-not-on-main/action.yml @@ -0,0 +1,17 @@ +name: 'block-pr-not-on-main' +description: 'Blocks PRs from main branch of forked repository' +runs: + using: "composite" + steps: + - name: Ensure PR is not on main branch + shell: bash + run: | + echo "Repo: ${{ github.repository }}" + echo "Head Repo: ${{ github.event.pull_request.head.repo.full_name }}" + echo "Forked: ${{ github.event.pull_request.head.repo.fork }}" + echo "Branch: ${{ github.event.pull_request.head.ref }}" + + if [ "${{ github.event.pull_request.head.repo.fork }}" == "true" ] && [ "${{ github.event.pull_request.head.ref }}" == 'main' ]; then + echo "PRs from the main branch of forked repositories are not allowed." + exit 1 + fi diff --git a/.github/workflows/ci-all-in-one-build.yml b/.github/workflows/ci-all-in-one-build.yml index 0d29acfd486..6d7785a776c 100644 --- a/.github/workflows/ci-all-in-one-build.yml +++ b/.github/workflows/ci-all-in-one-build.yml @@ -34,7 +34,7 @@ jobs: with: egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 with: submodules: true diff --git a/.github/workflows/ci-grpc-badger.yml b/.github/workflows/ci-badger.yaml similarity index 66% rename from .github/workflows/ci-grpc-badger.yml rename to .github/workflows/ci-badger.yaml index 265d170491f..b6b7cd22f32 100644 --- a/.github/workflows/ci-grpc-badger.yml +++ b/.github/workflows/ci-badger.yaml @@ -1,4 +1,4 @@ -name: CIT gRPC And Badger +name: CIT Badger on: push: @@ -16,34 +16,43 @@ permissions: # added using https://github.com/step-security/secure-workflows contents: read jobs: - grpc-and-badger: + badger: runs-on: ubuntu-latest + strategy: + matrix: + version: [v1, v2] steps: - name: Harden Runner uses: step-security/harden-runner@63c24ba6bd7ba022e95695ff85de572c04a18142 # v2.7.0 with: egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: go-version: 1.22.x - name: Run Badger storage integration tests - run: make badger-storage-integration-test - - - name: Run gRPC storage integration tests - run: make grpc-storage-integration-test + run: | + case ${{ matrix.version }} in + v1) + make badger-storage-integration-test + ;; + v2) + STORAGE=badger \ + make jaeger-v2-storage-integration-test + ;; + esac - name: Setup CODECOV_TOKEN uses: ./.github/actions/setup-codecov - name: Upload coverage to codecov - uses: codecov/codecov-action@894ff025c7b54547a9a2a1e9f228beae737ad3c2 # v3.1.3 + uses: codecov/codecov-action@7afa10ed9b269c561c2336fd862446844e0cbf71 # v4.2.0 with: - files: cover.out,cover-badger.out + files: cover.out verbose: true - flags: grpc-badger + flags: badger fail_ci_if_error: true token: ${{ env.CODECOV_TOKEN }} diff --git a/.github/workflows/ci-build-binaries.yml b/.github/workflows/ci-build-binaries.yml index cfba6917c4e..2af5bc22d01 100644 --- a/.github/workflows/ci-build-binaries.yml +++ b/.github/workflows/ci-build-binaries.yml @@ -42,7 +42,7 @@ jobs: with: egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 with: submodules: true diff --git a/.github/workflows/ci-cassandra.yml b/.github/workflows/ci-cassandra.yml index efd4a1d0ea3..396b066420b 100644 --- a/.github/workflows/ci-cassandra.yml +++ b/.github/workflows/ci-cassandra.yml @@ -36,7 +36,7 @@ jobs: with: egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: @@ -49,7 +49,7 @@ jobs: uses: ./.github/actions/setup-codecov - name: Upload coverage to codecov - uses: codecov/codecov-action@894ff025c7b54547a9a2a1e9f228beae737ad3c2 # v3.1.3 + uses: codecov/codecov-action@7afa10ed9b269c561c2336fd862446844e0cbf71 # v4.2.0 with: file: cover.out verbose: true diff --git a/.github/workflows/ci-crossdock.yml b/.github/workflows/ci-crossdock.yml index 5e85b760499..e7b50d941b6 100644 --- a/.github/workflows/ci-crossdock.yml +++ b/.github/workflows/ci-crossdock.yml @@ -25,7 +25,7 @@ jobs: with: egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 with: submodules: true diff --git a/.github/workflows/ci-docker-build.yml b/.github/workflows/ci-docker-build.yml index 248b7252d40..3cf2a2f4e4c 100644 --- a/.github/workflows/ci-docker-build.yml +++ b/.github/workflows/ci-docker-build.yml @@ -25,7 +25,7 @@ jobs: with: egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 with: submodules: true diff --git a/.github/workflows/ci-elasticsearch.yml b/.github/workflows/ci-elasticsearch.yml index f9cd20c6eec..28301349b20 100644 --- a/.github/workflows/ci-elasticsearch.yml +++ b/.github/workflows/ci-elasticsearch.yml @@ -40,7 +40,7 @@ jobs: with: egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 with: submodules: true @@ -64,7 +64,7 @@ jobs: uses: ./.github/actions/setup-codecov - name: Upload coverage to codecov - uses: codecov/codecov-action@894ff025c7b54547a9a2a1e9f228beae737ad3c2 # v3.1.3 + uses: codecov/codecov-action@7afa10ed9b269c561c2336fd862446844e0cbf71 # v4.2.0 with: files: cover.out,cover-index-cleaner.out,cover-index-rollover.out verbose: true diff --git a/.github/workflows/ci-grpc.yml b/.github/workflows/ci-grpc.yml new file mode 100644 index 00000000000..1565a8f3009 --- /dev/null +++ b/.github/workflows/ci-grpc.yml @@ -0,0 +1,60 @@ +name: CIT gRPC + +on: + push: + branches: [main] + + pull_request: + branches: [main] + +concurrency: + group: ${{ github.workflow }}-${{ (github.event.pull_request && github.event.pull_request.number) || github.ref || github.run_id }} + cancel-in-progress: true + +# See https://github.com/ossf/scorecard/blob/main/docs/checks.md#token-permissions +permissions: # added using https://github.com/step-security/secure-workflows + contents: read + +jobs: + grpc: + runs-on: ubuntu-latest + strategy: + matrix: + version: [v1, v2] + steps: + - name: Harden Runner + uses: step-security/harden-runner@63c24ba6bd7ba022e95695ff85de572c04a18142 # v2.7.0 + with: + egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs + + - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + + - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + with: + go-version: 1.22.x + + - name: Run gRPC storage integration tests + run: | + case ${{ matrix.version }} in + v1) + SPAN_STORAGE_TYPE=memory \ + make grpc-storage-integration-test + ;; + v2) + STORAGE=grpc \ + SPAN_STORAGE_TYPE=memory \ + make jaeger-v2-storage-integration-test + ;; + esac + + - name: Setup CODECOV_TOKEN + uses: ./.github/actions/setup-codecov + + - name: Upload coverage to codecov + uses: codecov/codecov-action@7afa10ed9b269c561c2336fd862446844e0cbf71 # v4.2.0 + with: + files: cover.out + verbose: true + flags: grpc + fail_ci_if_error: true + token: ${{ env.CODECOV_TOKEN }} diff --git a/.github/workflows/ci-hotrod.yml b/.github/workflows/ci-hotrod.yml index 3f559b3e82a..29b6a25e481 100644 --- a/.github/workflows/ci-hotrod.yml +++ b/.github/workflows/ci-hotrod.yml @@ -24,7 +24,7 @@ jobs: with: egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 with: submodules: true diff --git a/.github/workflows/ci-kafka.yml b/.github/workflows/ci-kafka.yml index 69617f23e9b..6b46dd84592 100644 --- a/.github/workflows/ci-kafka.yml +++ b/.github/workflows/ci-kafka.yml @@ -24,7 +24,7 @@ jobs: with: egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: @@ -41,7 +41,7 @@ jobs: uses: ./.github/actions/setup-codecov - name: Upload coverage to codecov - uses: codecov/codecov-action@894ff025c7b54547a9a2a1e9f228beae737ad3c2 # v3.1.3 + uses: codecov/codecov-action@7afa10ed9b269c561c2336fd862446844e0cbf71 # v4.2.0 with: files: cover.out verbose: true diff --git a/.github/workflows/ci-label-check.yml b/.github/workflows/ci-label-check.yml index 943d9135ed6..0fd32fbd4fd 100644 --- a/.github/workflows/ci-label-check.yml +++ b/.github/workflows/ci-label-check.yml @@ -10,6 +10,9 @@ on: - labeled - unlabeled +permissions: + contents: read + jobs: check-label: runs-on: ubuntu-latest diff --git a/.github/workflows/ci-lint-checks.yaml b/.github/workflows/ci-lint-checks.yaml index 1525be60037..616495c67b0 100644 --- a/.github/workflows/ci-lint-checks.yaml +++ b/.github/workflows/ci-lint-checks.yaml @@ -24,7 +24,7 @@ jobs: with: egress-policy: audit # TODO: change to 'egress-policy: block' after a couple of runs - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: @@ -35,3 +35,6 @@ jobs: - name: Lint run: make lint + + - name: Ensure PR is not on main branch + uses: ./.github/actions/block-pr-not-on-main \ No newline at end of file diff --git a/.github/workflows/ci-opensearch.yml b/.github/workflows/ci-opensearch.yml index 158845e8100..92ec85222de 100644 --- a/.github/workflows/ci-opensearch.yml +++ b/.github/workflows/ci-opensearch.yml @@ -34,7 +34,7 @@ jobs: with: egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 with: submodules: true @@ -58,7 +58,7 @@ jobs: uses: ./.github/actions/setup-codecov - name: Upload coverage to codecov - uses: codecov/codecov-action@894ff025c7b54547a9a2a1e9f228beae737ad3c2 # v3.1.3 + uses: codecov/codecov-action@7afa10ed9b269c561c2336fd862446844e0cbf71 # v4.2.0 with: files: cover.out,cover-index-cleaner.out,cover-index-rollover.out verbose: true diff --git a/.github/workflows/ci-protogen-tests.yml b/.github/workflows/ci-protogen-tests.yml index 313a064c997..4a6fdad546b 100644 --- a/.github/workflows/ci-protogen-tests.yml +++ b/.github/workflows/ci-protogen-tests.yml @@ -24,7 +24,7 @@ jobs: with: egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 with: submodules: recursive diff --git a/.github/workflows/ci-release.yml b/.github/workflows/ci-release.yml index 3b1efe5d166..33cd2bd0855 100644 --- a/.github/workflows/ci-release.yml +++ b/.github/workflows/ci-release.yml @@ -8,13 +8,14 @@ on: workflow_dispatch: # See https://github.com/jaegertracing/jaeger/issues/4017 -# and https://github.com/ossf/scorecard/blob/main/docs/checks.md#token-permissions permissions: - deployments: write - contents: write + contents: read jobs: publish-release: + permissions: + contents: write + deployments: write if: github.repository == 'jaegertracing/jaeger' runs-on: ubuntu-latest @@ -33,7 +34,7 @@ jobs: with: egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 with: submodules: true @@ -111,7 +112,7 @@ jobs: QUAY_TOKEN: ${{ secrets.QUAY_TOKEN }} - name: Generate SBOM - uses: anchore/sbom-action@719133684c7d294116626d1344fe64f0d2ff3e9e # v0.15.2 + uses: anchore/sbom-action@ab5d7b5f48981941c4c5d6bf33aeb98fe3bae38c # v0.15.10 with: output-file: jaeger-SBOM.spdx.json upload-release-assets: false diff --git a/.github/workflows/ci-unit-tests-go-tip.yml b/.github/workflows/ci-unit-tests-go-tip.yml index 38fd316ba9e..e3bcadbb238 100644 --- a/.github/workflows/ci-unit-tests-go-tip.yml +++ b/.github/workflows/ci-unit-tests-go-tip.yml @@ -4,13 +4,13 @@ on: push: branches: [main] -# See https://github.com/ossf/scorecard/blob/main/docs/checks.md#token-permissions -permissions: # added using https://github.com/step-security/secure-workflows +permissions: contents: read - checks: write jobs: unit-tests-go-tip: + permissions: + checks: write runs-on: ubuntu-latest steps: - name: Harden Runner @@ -18,7 +18,7 @@ jobs: with: egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - name: Install Go Tip uses: ./.github/actions/setup-go-tip diff --git a/.github/workflows/ci-unit-tests.yml b/.github/workflows/ci-unit-tests.yml index 6e85579bcb9..aa9961533c9 100644 --- a/.github/workflows/ci-unit-tests.yml +++ b/.github/workflows/ci-unit-tests.yml @@ -11,13 +11,13 @@ concurrency: group: ${{ github.workflow }}-${{ (github.event.pull_request && github.event.pull_request.number) || github.ref || github.run_id }} cancel-in-progress: true -# See https://github.com/ossf/scorecard/blob/main/docs/checks.md#token-permissions -permissions: # added using https://github.com/step-security/secure-workflows +permissions: contents: read - checks: write jobs: unit-tests: + permissions: + checks: write runs-on: ubuntu-latest steps: - name: Harden Runner @@ -25,7 +25,7 @@ jobs: with: egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: @@ -43,7 +43,7 @@ jobs: uses: ./.github/actions/setup-codecov - name: Upload coverage to codecov - uses: codecov/codecov-action@894ff025c7b54547a9a2a1e9f228beae737ad3c2 # v3.1.3 + uses: codecov/codecov-action@7afa10ed9b269c561c2336fd862446844e0cbf71 # v4.2.0 with: file: cover.out verbose: true diff --git a/.github/workflows/ci-validation-of-shell-scripts.yml b/.github/workflows/ci-validation-of-shell-scripts.yml index 809b4785ad6..4ed566a344a 100644 --- a/.github/workflows/ci-validation-of-shell-scripts.yml +++ b/.github/workflows/ci-validation-of-shell-scripts.yml @@ -21,7 +21,7 @@ jobs: egress-policy: audit - name: check out code - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - name: Install shellcheck run: sudo apt-get install shellcheck diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index a113793c394..cedb3bd88eb 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -40,7 +40,7 @@ jobs: egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - name: Checkout repository - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index fc3a42dfaa0..97323a31371 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -22,6 +22,6 @@ jobs: egress-policy: audit - name: 'Checkout Repository' - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - name: 'Dependency Review' - uses: actions/dependency-review-action@4901385134134e04cec5fbe5ddfe3b2c5bd5d976 # v4.0.0 + uses: actions/dependency-review-action@5bbc3ba658137598168acb2ab73b21c432dd411b # v4.2.5 diff --git a/.github/workflows/fossa.yml b/.github/workflows/fossa.yml index 8e93b11bec0..cfb4a109af8 100644 --- a/.github/workflows/fossa.yml +++ b/.github/workflows/fossa.yml @@ -25,7 +25,7 @@ jobs: with: egress-policy: audit # TODO: change to 'egress-policy: block' after couple of runs - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index 0e8532a7550..86b617092e1 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -37,7 +37,7 @@ jobs: egress-policy: audit - name: "Checkout code" - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 with: persist-credentials: false diff --git a/.gitignore b/.gitignore index a66dcb760cd..63cd11b81bd 100644 --- a/.gitignore +++ b/.gitignore @@ -27,6 +27,7 @@ cmd/collector/collector cmd/collector/collector-* cmd/ingester/ingester cmd/ingester/ingester-* +cmd/jaeger/internal/integration/results cmd/remote-storage/remote-storage cmd/remote-storage/remote-storage-* cmd/es-index-cleaner/es-index-cleaner-* diff --git a/CHANGELOG.md b/CHANGELOG.md index 5ded593c4ba..6136e01b040 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -### A message to people of Russia 🇷🇺 +### 🇷🇺 A message to people of Russia If you currently live in Russia, please read [this message](./_To_People_of_Russia.md). @@ -15,12 +15,72 @@ next release (yyyy-mm-dd) run `make changelog` to generate content -### UI Changes +### 📊 UI Changes ... +1.56.0 (2024-04-02) +------------------- + +### Backend Changes + +#### ⛔ Breaking Changes + +* Fix hotrod instructions ([@yurishkuro](https://github.com/yurishkuro) in [#5273](https://github.com/jaegertracing/jaeger/pull/5273)) + +#### 🐞 Bug fixes, Minor Improvements + +* Refactor healthcheck signalling between server and service ([@WillSewell](https://github.com/WillSewell) in [#5308](https://github.com/jaegertracing/jaeger/pull/5308)) +* Docs: badger file permission as non-root service ([@tico88612](https://github.com/tico88612) in [#5282](https://github.com/jaegertracing/jaeger/pull/5282)) +* [kafka-consumer] add support for setting fetch message max bytes ([@sappusaketh](https://github.com/sappusaketh) in [#5283](https://github.com/jaegertracing/jaeger/pull/5283)) +* [chore] remove repetitive words ([@tgolang](https://github.com/tgolang) in [#5265](https://github.com/jaegertracing/jaeger/pull/5265)) +* Fix zipkin spanformat ([@fyuan1316](https://github.com/fyuan1316) in [#5261](https://github.com/jaegertracing/jaeger/pull/5261)) +* [kafka-producer] support setting max message size ([@sappusaketh](https://github.com/sappusaketh) in [#5263](https://github.com/jaegertracing/jaeger/pull/5263)) + +#### 🚧 Experimental Features + +* [jaeger-v2] add support for opensearch ([@akagami-harsh](https://github.com/akagami-harsh) in [#5257](https://github.com/jaegertracing/jaeger/pull/5257)) +* [jaeger-v2] add support for cassandra ([@Pushkarm029](https://github.com/Pushkarm029) in [#5253](https://github.com/jaegertracing/jaeger/pull/5253)) + +#### 👷 CI Improvements + +* Allow go-leak linter to fail ci ([@yurishkuro](https://github.com/yurishkuro) in [#5316](https://github.com/jaegertracing/jaeger/pull/5316)) +* [jaeger-v2] add grpc storage backend integration test ([@james-ryans](https://github.com/james-ryans) in [#5259](https://github.com/jaegertracing/jaeger/pull/5259)) +* Github actions added to block prs from fork/main branch ([@varshith257](https://github.com/varshith257) in [#5272](https://github.com/jaegertracing/jaeger/pull/5272)) + + +### 📊 UI Changes + +* UI pinned to version [1.40.0](https://github.com/jaegertracing/jaeger-ui/blob/main/CHANGELOG.md#v1400-2024-04-02). + + +1.55.0 (2024-03-04) +------------------- +### Backend Changes + +#### ✨ New Features: + +* Support uploading traces to UI in OpenTelemetry format (OTLP/JSON) ([@NavinShrinivas](https://github.com/NavinShrinivas) in [#5155](https://github.com/jaegertracing/jaeger/pull/5155)) +* Add Elasticsearch storage support for adaptive sampling ([@Pushkarm029](https://github.com/Pushkarm029) in [#5158](https://github.com/jaegertracing/jaeger/pull/5158)) + +#### 🐞 Bug fixes, Minor Improvements: + +* Add the `print-config` subcommand ([@gmafrac](https://github.com/gmafrac) in [#5200](https://github.com/jaegertracing/jaeger/pull/5200)) +* Return more detailed errors from ES storage ([@yurishkuro](https://github.com/yurishkuro) in [#5209](https://github.com/jaegertracing/jaeger/pull/5209)) +* Bump go version ([@yurishkuro](https://github.com/yurishkuro) in [#5180](https://github.com/jaegertracing/jaeger/pull/5180)) + +#### 🚧 Experimental Features: + +* [jaeger-v2] Add support for gRPC storarge ([@james-ryans](https://github.com/james-ryans) in [#5228](https://github.com/jaegertracing/jaeger/pull/5228)) +* [jaeger-v2] Add support for Elasticsearch ([@akagami-harsh](https://github.com/akagami-harsh) in [#5152](https://github.com/jaegertracing/jaeger/pull/5152)) + +### 📊 UI Changes + +* UI pinned to version [1.39.0](https://github.com/jaegertracing/jaeger-ui/blob/main/CHANGELOG.md#v1390-2024-03-04). + + 1.54.0 (2024-02-06) ------------------- diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 7d925c306d1..b41f752b29e 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,3 +1,5 @@ ## Community Code of Conduct Jaeger follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). + +Please contact the [Jaeger Maintainers](mailto:cncf-jaeger-maintainers@lists.cncf.io) or the [CNCF Code of Conduct Committee](mailto:conduct@cncf.io) in order to report violations of the Code of Conduct. diff --git a/GOVERNANCE.md b/GOVERNANCE.md index 1f046b0d9e3..2a1ef599bbe 100644 --- a/GOVERNANCE.md +++ b/GOVERNANCE.md @@ -5,7 +5,7 @@ This document defines governance policies for the Jaeger project. ## Maintainers Jaeger Maintainers have write access to the Jaeger GitHub repository https://github.com/jaegertracing/jaeger. -They can merge their own patches or patches from others. The current maintainers can be found in [CODEOWNERS](./CODEOWNERS). +They can merge their own patches or patches from others. The current maintainers can be found in [MAINTAINERS](./MAINTAINERS.md). This privilege is granted with some expectation of responsibility: maintainers are people who care about the Jaeger project and want to help it grow and improve. A maintainer is not just someone who can make changes, but someone who has demonstrated his or her ability to collaborate with the team, get the most knowledgeable people to review code, contribute high-quality code, and follow through to fix issues (in code or tests). diff --git a/MAINTAINERS.md b/MAINTAINERS.md new file mode 100644 index 00000000000..fcc48358467 --- /dev/null +++ b/MAINTAINERS.md @@ -0,0 +1,13 @@ +The current Maintainers Group for the Jaeger Project consists of: + +| Name | Employer | Responsibilities | +| ---- | -------- | ---------------- | +| [@albertteoh](https://github.com/albertteoh) | PackSmith | ALL | +| [@jkowall](https://github.com/jkowall) | Aiven | ALL | +| [@joe-elliott](https://github.com/joe-elliott) | Grafana Labs | ALL | +| [@pavolloffay](https://github.com/pavolloffay) | RedHat | ALL | +| [@yurishkuro](https://github.com/yurishkuro) | Meta | ALL | + +This list must be kept in sync with the [CNCF Project Maintainers list](https://github.com/cncf/foundation/blob/master/project-maintainers.csv). + +See [the project Governance](./GOVERNANCE.md) for how maintainers are selected and replaced. diff --git a/Makefile b/Makefile index 40d8905848e..7cf87a2770f 100644 --- a/Makefile +++ b/Makefile @@ -4,6 +4,7 @@ SHELL := /bin/bash JAEGER_IMPORT_PATH = github.com/jaegertracing/jaeger STORAGE_PKGS = ./plugin/storage/integration/... +JAEGER_V2_STORAGE_PKGS = ./cmd/jaeger/internal/integration # These DOCKER_xxx vars are used when building Docker images. DOCKER_NAMESPACE?=jaegertracing @@ -56,6 +57,7 @@ GOARCH ?= $(shell $(GO) env GOARCH) GOBUILD=CGO_ENABLED=0 installsuffix=cgo $(GO) build -trimpath GOTEST_QUIET=$(GO) test $(RACE) GOTEST=$(GOTEST_QUIET) -v +COVEROUT=cover.out GOFMT=gofmt GOFUMPT=gofumpt FMT_LOG=.fmt.log @@ -111,39 +113,48 @@ test: all-in-one-integration-test: TEST_MODE=integration $(GOTEST) ./cmd/all-in-one/ +# A general integration tests for jaeger-v2 storage backends, +# these tests placed at `./cmd/jaeger/internal/integration/*_test.go`. +# The integration tests are filtered by STORAGE env, +# currently the available STORAGE variable is: +# - grpc +.PHONY: jaeger-v2-storage-integration-test +jaeger-v2-storage-integration-test: + (cd cmd/jaeger/ && go build .) + # Expire tests results for jaeger storage integration tests since the environment might change + # even though the code remains the same. + go clean -testcache + bash -c "set -e; set -o pipefail; $(GOTEST) -coverpkg=./... -coverprofile $(COVEROUT) $(JAEGER_V2_STORAGE_PKGS) $(COLORIZE)" + .PHONY: storage-integration-test storage-integration-test: # Expire tests results for storage integration tests since the environment might change # even though the code remains the same. go clean -testcache - bash -c "set -e; set -o pipefail; $(GOTEST) -coverpkg=./... -coverprofile cover.out $(STORAGE_PKGS) $(COLORIZE)" + bash -c "set -e; set -o pipefail; $(GOTEST) -coverpkg=./... -coverprofile $(COVEROUT) $(STORAGE_PKGS) $(COLORIZE)" .PHONY: badger-storage-integration-test badger-storage-integration-test: - bash -c "set -e; set -o pipefail; $(GOTEST) -tags=badger_storage_integration -coverpkg=./... -coverprofile cover-badger.out $(STORAGE_PKGS) $(COLORIZE)" + STORAGE=badger $(MAKE) storage-integration-test .PHONY: grpc-storage-integration-test grpc-storage-integration-test: (cd examples/memstore-plugin/ && go build .) - bash -c "set -e; set -o pipefail; $(GOTEST) -tags=grpc_storage_integration -coverpkg=./... -coverprofile cover.out $(STORAGE_PKGS) $(COLORIZE)" + STORAGE=grpc $(MAKE) storage-integration-test +# this test assumes STORAGE environment variable is set to elasticsearch|opensearch .PHONY: index-cleaner-integration-test index-cleaner-integration-test: docker-images-elastic - # Expire test results for storage integration tests since the environment might change - # even though the code remains the same. - go clean -testcache - bash -c "set -e; set -o pipefail; $(GOTEST) -tags index_cleaner -coverpkg=./... -coverprofile cover-index-cleaner.out $(STORAGE_PKGS) $(COLORIZE)" + $(MAKE) storage-integration-test COVEROUT=cover-index-cleaner.out +# this test assumes STORAGE environment variable is set to elasticsearch|opensearch .PHONY: index-rollover-integration-test index-rollover-integration-test: docker-images-elastic - # Expire test results for storage integration tests since the environment might change - # even though the code remains the same. - go clean -testcache - bash -c "set -e; set -o pipefail; $(GOTEST) -tags index_rollover -coverpkg=./... -coverprofile cover-index-rollover.out $(STORAGE_PKGS) $(COLORIZE)" + $(MAKE) storage-integration-test COVEROUT=cover-index-rollover.out .PHONY: cover cover: nocover - bash -c "set -e; set -o pipefail; $(GOTEST) -tags=memory_storage_integration -timeout 5m -coverprofile cover.out ./... | tee test-results.json" + bash -c "set -e; set -o pipefail; STORAGE=memory $(GOTEST) -timeout 5m -coverprofile $(COVEROUT) ./... | tee test-results.json" go tool cover -html=cover.out -o cover.html .PHONY: nocover diff --git a/README.md b/README.md index 1d92b305ca4..a306942b4dc 100644 --- a/README.md +++ b/README.md @@ -6,8 +6,11 @@ [![Project+Community stats][community-badge]][community-stats] [![Unit Tests][ci-img]][ci] [![Coverage Status][cov-img]][cov] -[![FOSSA Status][fossa-img]](https://app.fossa.io/projects/git%2Bgithub.com%2Fjaegertracing%2Fjaeger?ref=badge_shield) -[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/jaegertracing/jaeger/badge)](https://securityscorecards.dev/viewer/?uri=github.com/jaegertracing/jaeger) +[![FOSSA Status][fossa-img]][fossa] +[![OpenSSF Scorecard][openssf-img]][openssf] +[![OpenSSF Best Practices][openssf-bp-img]][openssf-bp] +[![CLOMonitor][clomonitor-img]][clomonitor] +[![Artifact Hub][artifacthub-img]][artifacthub] @@ -240,6 +243,17 @@ Copyright (c) The Jaeger Authors. [Apache 2.0 License](./LICENSE). [cov-img]: https://codecov.io/gh/jaegertracing/jaeger/branch/main/graph/badge.svg [cov]: https://codecov.io/gh/jaegertracing/jaeger/branch/main/ [fossa-img]: https://app.fossa.com/api/projects/git%2Bgithub.com%2Fjaegertracing%2Fjaeger.svg?type=shield +[fossa]: https://app.fossa.io/projects/git%2Bgithub.com%2Fjaegertracing%2Fjaeger?ref=badge_shield +[openssf-img]: https://api.securityscorecards.dev/projects/github.com/jaegertracing/jaeger/badge +[openssf]: https://securityscorecards.dev/viewer/?uri=github.com/jaegertracing/jaeger +[openssf-bp-img]: https://www.bestpractices.dev/projects/1273/badge +[openssf-bp]: https://www.bestpractices.dev/projects/1273 +[clomonitor-img]: https://img.shields.io/endpoint?url=https://clomonitor.io/api/projects/cncf/jaeger/badge +[clomonitor]: https://clomonitor.io/projects/cncf/jaeger +[artifacthub-img]: https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/jaegertracing +[artifacthub]: https://artifacthub.io/packages/search?repo=jaegertracing + + [dapper]: https://research.google.com/pubs/pub36356.html [ubeross]: https://uber.github.io [community-badge]: https://img.shields.io/badge/Project+Community-stats-blue.svg diff --git a/RELEASE.md b/RELEASE.md index d0011f699eb..84ca3d25d47 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -7,9 +7,11 @@ * The section can be split into sub-section if necessary, e.g. UI Changes, Backend Changes, Bug Fixes, etc. * If the jaeger-ui submodule has changes cut a new release and also upgrade the submodule versions then commit, for example: ``` + git submodule init + git submodule update cd jaeger-ui - git ls-remote --tags origin - git fetch + git checkout main + git pull git checkout {new_version} //e.g. v1.5.0 ``` * If there are no changes, indicate this with "No changes" ([example](https://github.com/jaegertracing/jaeger/pull/4131/files)). @@ -21,6 +23,11 @@ * Title "Release X.Y.Z" * Tag `vX.Y.Z` (note the `v` prefix) and choose appropriate branch * Copy the new CHANGELOG.md section into the release notes + * Extra: GitHub has a button "generate release notes". Those are not formatted as we want, + but it has a nice feature of explicitly listing first-time contributors. + Before doing the previous step, you can click that button and then remove everything + except the New Contributors section. Change the header to `### 👏 New Contributors`, + then copy the main changelog above it. [Example](https://github.com/jaegertracing/jaeger/releases/tag/v1.55.0). 3. The release tag will trigger a build of the docker images. Since forks don't have jaegertracingbot dockerhub token, they can never publish images to jaegertracing organisation. 1. Check the images are available on [Docker Hub](https://hub.docker.com/r/jaegertracing/). 2. For monitoring and troubleshooting, refer to the [jaegertracing/jaeger GithubActions tab](https://github.com/jaegertracing/jaeger/actions). @@ -53,8 +60,8 @@ Here are the release managers for future versions with the tentative release dat | Version | Release Manager | Tentative release date | |---------|-----------------|------------------------| -| 1.55.0 | @jkowall | 6 March 2024 | -| 1.56.0 | @yurishkuro | 3 April 2024 | | 1.57.0 | @albertteoh | 1 May 2024 | | 1.58.0 | @pavolloffay | 5 June 2024 | | 1.59.0 | @joe-elliott | 3 July 2024 | +| 1.60.0 | @jkowall | 7 August 2024 | +| 1.61.0 | @yurishkuro | 3 Sep 2024 | diff --git a/cmd/agent/app/configmanager/grpc/manager.go b/cmd/agent/app/configmanager/grpc/manager.go index 8361a77f6f7..f941a178e6c 100644 --- a/cmd/agent/app/configmanager/grpc/manager.go +++ b/cmd/agent/app/configmanager/grpc/manager.go @@ -17,6 +17,7 @@ package grpc import ( "context" "errors" + "fmt" "google.golang.org/grpc" @@ -38,7 +39,11 @@ func NewConfigManager(conn *grpc.ClientConn) *ConfigManagerProxy { // GetSamplingStrategy returns sampling strategies from collector. func (s *ConfigManagerProxy) GetSamplingStrategy(ctx context.Context, serviceName string) (*api_v2.SamplingStrategyResponse, error) { - return s.client.GetSamplingStrategy(ctx, &api_v2.SamplingStrategyParameters{ServiceName: serviceName}) + resp, err := s.client.GetSamplingStrategy(ctx, &api_v2.SamplingStrategyParameters{ServiceName: serviceName}) + if err != nil { + return nil, fmt.Errorf("failed to get sampling strategy: %w", err) + } + return resp, nil } // GetBaggageRestrictions returns baggage restrictions from collector. diff --git a/cmd/agent/app/configmanager/grpc/manager_test.go b/cmd/agent/app/configmanager/grpc/manager_test.go index e1eee2ded8f..dd14a81f70e 100644 --- a/cmd/agent/app/configmanager/grpc/manager_test.go +++ b/cmd/agent/app/configmanager/grpc/manager_test.go @@ -37,7 +37,7 @@ func TestSamplingManager_GetSamplingStrategy(t *testing.T) { s, addr := initializeGRPCTestServer(t, func(s *grpc.Server) { api_v2.RegisterSamplingManagerServer(s, &mockSamplingHandler{}) }) - conn, err := grpc.Dial(addr.String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + conn, err := grpc.NewClient(addr.String(), grpc.WithTransportCredentials(insecure.NewCredentials())) defer close(t, conn) require.NoError(t, err) defer s.GracefulStop() @@ -48,14 +48,14 @@ func TestSamplingManager_GetSamplingStrategy(t *testing.T) { } func TestSamplingManager_GetSamplingStrategy_error(t *testing.T) { - conn, err := grpc.Dial("foo", grpc.WithTransportCredentials(insecure.NewCredentials())) + conn, err := grpc.NewClient("foo", grpc.WithTransportCredentials(insecure.NewCredentials())) defer close(t, conn) require.NoError(t, err) manager := NewConfigManager(conn) resp, err := manager.GetSamplingStrategy(context.Background(), "any") require.Nil(t, resp) require.Error(t, err) - assert.Contains(t, err.Error(), "Error while dialing: dial tcp: address foo: missing port in address") + assert.Contains(t, err.Error(), "failed to get sampling strategy") } func TestSamplingManager_GetBaggageRestrictions(t *testing.T) { diff --git a/cmd/agent/app/processors/thrift_processor_test.go b/cmd/agent/app/processors/thrift_processor_test.go index 97ea396fdac..f3315f26430 100644 --- a/cmd/agent/app/processors/thrift_processor_test.go +++ b/cmd/agent/app/processors/thrift_processor_test.go @@ -81,7 +81,7 @@ func createProcessor(t *testing.T, mFactory metrics.Factory, tFactory thrift.TPr func initCollectorAndReporter(t *testing.T) (*metricstest.Factory, *testutils.GrpcCollector, reporter.Reporter, *grpc.ClientConn) { grpcCollector := testutils.StartGRPCCollector(t) - conn, err := grpc.Dial(grpcCollector.Listener().Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + conn, err := grpc.NewClient(grpcCollector.Listener().Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) require.NoError(t, err) rep := grpcrep.NewReporter(conn, map[string]string{}, zaptest.NewLogger(t)) metricsFactory := metricstest.NewFactory(0) diff --git a/cmd/agent/app/reporter/grpc/builder.go b/cmd/agent/app/reporter/grpc/builder.go index 0ec32a5d2e3..5853ef7912b 100644 --- a/cmd/agent/app/reporter/grpc/builder.go +++ b/cmd/agent/app/reporter/grpc/builder.go @@ -102,6 +102,7 @@ func (b *ConnBuilder) CreateConnection(ctx context.Context, logger *zap.Logger, dialOptions = append(dialOptions, grpc.WithUnaryInterceptor(grpc_retry.UnaryClientInterceptor(grpc_retry.WithMax(b.MaxRetry)))) dialOptions = append(dialOptions, b.AdditionalDialOptions...) + // TODO: Need to replace grpc.Dial with grpc.NewClient and pass test conn, err := grpc.Dial(dialTarget, dialOptions...) if err != nil { return nil, err diff --git a/cmd/agent/app/reporter/grpc/reporter_test.go b/cmd/agent/app/reporter/grpc/reporter_test.go index c15ed85c4d8..7d901134784 100644 --- a/cmd/agent/app/reporter/grpc/reporter_test.go +++ b/cmd/agent/app/reporter/grpc/reporter_test.go @@ -59,7 +59,7 @@ func TestReporter_EmitZipkinBatch(t *testing.T) { api_v2.RegisterCollectorServiceServer(s, handler) }) defer s.Stop() - conn, err := grpc.Dial(addr.String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + conn, err := grpc.NewClient(addr.String(), grpc.WithTransportCredentials(insecure.NewCredentials())) //nolint:staticcheck // don't care about errors require.NoError(t, err) defer conn.Close() @@ -102,7 +102,7 @@ func TestReporter_EmitBatch(t *testing.T) { api_v2.RegisterCollectorServiceServer(s, handler) }) defer s.Stop() - conn, err := grpc.Dial(addr.String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + conn, err := grpc.NewClient(addr.String(), grpc.WithTransportCredentials(insecure.NewCredentials())) //nolint:staticcheck // don't care about errors require.NoError(t, err) defer conn.Close() @@ -131,7 +131,7 @@ func TestReporter_EmitBatch(t *testing.T) { } func TestReporter_SendFailure(t *testing.T) { - conn, err := grpc.Dial("invalid-host-name-blah:12345", grpc.WithTransportCredentials(insecure.NewCredentials())) + conn, err := grpc.NewClient("invalid-host-name-blah:12345", grpc.WithTransportCredentials(insecure.NewCredentials())) require.NoError(t, err) defer conn.Close() rep := NewReporter(conn, nil, zap.NewNop()) @@ -207,7 +207,7 @@ func TestReporter_MultitenantEmitBatch(t *testing.T) { api_v2.RegisterCollectorServiceServer(s, handler) }) defer s.Stop() - conn, err := grpc.Dial(addr.String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + conn, err := grpc.NewClient(addr.String(), grpc.WithTransportCredentials(insecure.NewCredentials())) require.NoError(t, err) defer func() { require.NoError(t, conn.Close()) }() rep := NewReporter(conn, nil, zap.NewNop()) diff --git a/cmd/all-in-one/all_in_one_test.go b/cmd/all-in-one/all_in_one_test.go index b557dfcff22..f5186001976 100644 --- a/cmd/all-in-one/all_in_one_test.go +++ b/cmd/all-in-one/all_in_one_test.go @@ -21,6 +21,7 @@ import ( "io" "net/http" "os" + "regexp" "strings" "testing" "time" @@ -85,22 +86,29 @@ func healthCheck(t *testing.T) { } func checkWebUI(t *testing.T) { - t.Run("logo", func(t *testing.T) { - resp, err := http.Get(queryAddr + "/static/jaeger-logo-jWbKFHZJ.svg") + resp, err := http.Get(queryAddr + "/") + require.NoError(t, err) + require.NotNil(t, resp) + defer resp.Body.Close() + assert.Equal(t, http.StatusOK, resp.StatusCode) + bodyBytes, err := io.ReadAll(resp.Body) + require.NoError(t, err) + body := string(bodyBytes) + t.Run("Static_files", func(t *testing.T) { + pattern := regexp.MustCompile(``) + t.Run("React_app", func(t *testing.T) { + assert.Contains(t, body, `
`) }) } diff --git a/cmd/all-in-one/main.go b/cmd/all-in-one/main.go index 721757a6079..08ac4313dff 100644 --- a/cmd/all-in-one/main.go +++ b/cmd/all-in-one/main.go @@ -283,15 +283,10 @@ func startQuery( ) *queryApp.Server { spanReader = storageMetrics.NewReadMetricsDecorator(spanReader, metricsFactory) qs := querysvc.NewQueryService(spanReader, depReader, *queryOpts) - server, err := queryApp.NewServer(svc.Logger, qs, metricsQueryService, qOpts, tm, jt) + server, err := queryApp.NewServer(svc.Logger, svc.HC(), qs, metricsQueryService, qOpts, tm, jt) if err != nil { svc.Logger.Fatal("Could not create jaeger-query", zap.Error(err)) } - go func() { - for s := range server.HealthCheckStatus() { - svc.SetHealthCheckStatus(s) - } - }() if err := server.Start(); err != nil { svc.Logger.Fatal("Could not start jaeger-query", zap.Error(err)) } diff --git a/cmd/anonymizer/app/anonymizer/anonymizer.go b/cmd/anonymizer/app/anonymizer/anonymizer.go index a60d4c56f52..b3a893c702d 100644 --- a/cmd/anonymizer/app/anonymizer/anonymizer.go +++ b/cmd/anonymizer/app/anonymizer/anonymizer.go @@ -15,6 +15,7 @@ package anonymizer import ( + "context" "encoding/json" "fmt" "hash/fnv" @@ -57,6 +58,8 @@ type Anonymizer struct { lock sync.Mutex mapping mapping options Options + cancel context.CancelFunc + wg sync.WaitGroup } // Options represents the various options with which the anonymizer can be configured. @@ -70,6 +73,7 @@ type Options struct { // New creates new Anonymizer. The mappingFile stores the mapping from original to // obfuscated strings, in case later investigations require looking at the original traces. func New(mappingFile string, options Options, logger *zap.Logger) *Anonymizer { + ctx, cancel := context.WithCancel(context.Background()) a := &Anonymizer{ mappingFile: mappingFile, logger: logger, @@ -78,6 +82,7 @@ func New(mappingFile string, options Options, logger *zap.Logger) *Anonymizer { Operations: make(map[string]string), }, options: options, + cancel: cancel, } if _, err := os.Stat(filepath.Clean(mappingFile)); err == nil { dat, err := os.ReadFile(filepath.Clean(mappingFile)) @@ -88,14 +93,28 @@ func New(mappingFile string, options Options, logger *zap.Logger) *Anonymizer { logger.Fatal("Cannot unmarshal previous mapping", zap.Error(err)) } } + a.wg.Add(1) go func() { - for range time.NewTicker(10 * time.Second).C { - a.SaveMapping() + defer a.wg.Done() + ticker := time.NewTicker(10 * time.Second) + defer ticker.Stop() + for { + select { + case <-ticker.C: + a.SaveMapping() + case <-ctx.Done(): + return + } } }() return a } +func (a *Anonymizer) Stop() { + a.cancel() + a.wg.Wait() +} + // SaveMapping writes the mapping from original to obfuscated strings to a file. // It is called by the anonymizer itself periodically, and should be called at // the end of the extraction run. diff --git a/cmd/anonymizer/app/anonymizer/anonymizer_test.go b/cmd/anonymizer/app/anonymizer/anonymizer_test.go index bce80da7ecf..b6690745e73 100644 --- a/cmd/anonymizer/app/anonymizer/anonymizer_test.go +++ b/cmd/anonymizer/app/anonymizer/anonymizer_test.go @@ -83,6 +83,7 @@ func TestNew(t *testing.T) { file, err := os.CreateTemp(tempDir, "mapping.json") require.NoError(t, err) + defer file.Close() _, err = file.Write([]byte(` { @@ -97,6 +98,7 @@ func TestNew(t *testing.T) { require.NoError(t, err) anonymizer := New(file.Name(), Options{}, nopLogger) + defer anonymizer.Stop() assert.NotNil(t, anonymizer) } diff --git a/cmd/anonymizer/app/query/query.go b/cmd/anonymizer/app/query/query.go index e928ceeb043..e8465d27033 100644 --- a/cmd/anonymizer/app/query/query.go +++ b/cmd/anonymizer/app/query/query.go @@ -19,7 +19,6 @@ import ( "errors" "fmt" "io" - "time" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" @@ -38,10 +37,7 @@ type Query struct { // New creates a Query object func New(addr string) (*Query, error) { - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) - defer cancel() - - conn, err := grpc.DialContext(ctx, addr, grpc.WithTransportCredentials(insecure.NewCredentials())) + conn, err := grpc.NewClient(addr, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return nil, fmt.Errorf("failed to connect with the jaeger-query service: %w", err) } diff --git a/cmd/anonymizer/app/writer/package_test.go b/cmd/anonymizer/app/writer/package_test.go new file mode 100644 index 00000000000..a77bb0fefe7 --- /dev/null +++ b/cmd/anonymizer/app/writer/package_test.go @@ -0,0 +1,14 @@ +// Copyright (c) 2024 The Jaeger Authors. +// SPDX-License-Identifier: Apache-2.0 + +package writer + +import ( + "testing" + + "github.com/jaegertracing/jaeger/pkg/testutils" +) + +func TestMain(m *testing.M) { + testutils.VerifyGoLeaks(m) +} diff --git a/cmd/anonymizer/app/writer/writer.go b/cmd/anonymizer/app/writer/writer.go index 5b22a371c4b..485f16533b9 100644 --- a/cmd/anonymizer/app/writer/writer.go +++ b/cmd/anonymizer/app/writer/writer.go @@ -145,5 +145,6 @@ func (w *Writer) Close() { w.capturedFile.Close() w.anonymizedFile.WriteString("\n]\n") w.anonymizedFile.Close() + w.anonymizer.Stop() w.anonymizer.SaveMapping() } diff --git a/cmd/anonymizer/app/writer/writer_test.go b/cmd/anonymizer/app/writer/writer_test.go index 73391682e30..c6e11434ccc 100644 --- a/cmd/anonymizer/app/writer/writer_test.go +++ b/cmd/anonymizer/app/writer/writer_test.go @@ -53,8 +53,9 @@ func TestNew(t *testing.T) { AnonymizedFile: tempDir + "/anonymized.json", MappingFile: tempDir + "/mapping.json", } - _, err := New(config, nopLogger) + writer, err := New(config, nopLogger) require.NoError(t, err) + defer writer.Close() }) t.Run("CapturedFile does not exist", func(t *testing.T) { diff --git a/cmd/collector/app/collector.go b/cmd/collector/app/collector.go index 34f543d0dee..b8cfdc3b3cb 100644 --- a/cmd/collector/app/collector.go +++ b/cmd/collector/app/collector.go @@ -218,9 +218,15 @@ func (c *Collector) Close() error { } // watchers actually never return errors from Close - _ = c.tlsGRPCCertWatcherCloser.Close() - _ = c.tlsHTTPCertWatcherCloser.Close() - _ = c.tlsZipkinCertWatcherCloser.Close() + if c.tlsGRPCCertWatcherCloser != nil { + _ = c.tlsGRPCCertWatcherCloser.Close() + } + if c.tlsHTTPCertWatcherCloser != nil { + _ = c.tlsHTTPCertWatcherCloser.Close() + } + if c.tlsZipkinCertWatcherCloser != nil { + _ = c.tlsZipkinCertWatcherCloser.Close() + } return nil } diff --git a/cmd/collector/app/collector_test.go b/cmd/collector/app/collector_test.go index fbaad803e75..8c92cd81e53 100644 --- a/cmd/collector/app/collector_test.go +++ b/cmd/collector/app/collector_test.go @@ -52,6 +52,7 @@ func TestNewCollector(t *testing.T) { hc := healthcheck.New() logger := zap.NewNop() baseMetrics := metricstest.NewFactory(time.Hour) + defer baseMetrics.Backend.Stop() spanWriter := &fakeSpanWriter{} strategyStore := &mockStrategyStore{} tm := &tenancy.Manager{} @@ -78,6 +79,7 @@ func TestCollector_StartErrors(t *testing.T) { hc := healthcheck.New() logger := zap.NewNop() baseMetrics := metricstest.NewFactory(time.Hour) + defer baseMetrics.Backend.Stop() spanWriter := &fakeSpanWriter{} strategyStore := &mockStrategyStore{} tm := &tenancy.Manager{} @@ -94,6 +96,7 @@ func TestCollector_StartErrors(t *testing.T) { err := c.Start(options) require.Error(t, err) assert.Contains(t, err.Error(), expErr) + require.NoError(t, c.Close()) }) } @@ -126,12 +129,18 @@ func (m *mockStrategyStore) GetSamplingStrategy(_ context.Context, serviceName s return &api_v2.SamplingStrategyResponse{}, nil } +func (m *mockStrategyStore) Close() error { + return nil +} + func TestCollector_PublishOpts(t *testing.T) { // prepare hc := healthcheck.New() logger := zap.NewNop() baseMetrics := metricstest.NewFactory(time.Second) + defer baseMetrics.Backend.Stop() forkFactory := metricstest.NewFactory(time.Second) + defer forkFactory.Backend.Stop() metricsFactory := fork.New("internal", forkFactory, baseMetrics) spanWriter := &fakeSpanWriter{} strategyStore := &mockStrategyStore{} @@ -168,6 +177,7 @@ func TestAggregator(t *testing.T) { hc := healthcheck.New() logger := zap.NewNop() baseMetrics := metricstest.NewFactory(time.Hour) + defer baseMetrics.Backend.Stop() spanWriter := &fakeSpanWriter{} strategyStore := &mockStrategyStore{} agg := &mockAggregator{} diff --git a/cmd/collector/app/handler/grpc_handler_test.go b/cmd/collector/app/handler/grpc_handler_test.go index b8a9ae242d1..ef4f53ee081 100644 --- a/cmd/collector/app/handler/grpc_handler_test.go +++ b/cmd/collector/app/handler/grpc_handler_test.go @@ -108,7 +108,7 @@ func initializeGRPCTestServer(t *testing.T, beforeServe func(s *grpc.Server)) (* } func newClient(t *testing.T, addr net.Addr) (api_v2.CollectorServiceClient, *grpc.ClientConn) { - conn, err := grpc.Dial(addr.String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + conn, err := grpc.NewClient(addr.String(), grpc.WithTransportCredentials(insecure.NewCredentials())) require.NoError(t, err) return api_v2.NewCollectorServiceClient(conn), conn } diff --git a/cmd/collector/app/handler/otlp_receiver.go b/cmd/collector/app/handler/otlp_receiver.go index b17acb6f61a..84177b69592 100644 --- a/cmd/collector/app/handler/otlp_receiver.go +++ b/cmd/collector/app/handler/otlp_receiver.go @@ -106,7 +106,7 @@ func startOTLPReceiver( return otlpReceiver, nil } -func applyGRPCSettings(cfg *configgrpc.GRPCServerSettings, opts *flags.GRPCOptions) { +func applyGRPCSettings(cfg *configgrpc.ServerConfig, opts *flags.GRPCOptions) { if opts.HostPort != "" { cfg.NetAddr.Endpoint = opts.HostPort } @@ -126,7 +126,7 @@ func applyGRPCSettings(cfg *configgrpc.GRPCServerSettings, opts *flags.GRPCOptio } } -func applyHTTPSettings(cfg *confighttp.HTTPServerSettings, opts *flags.HTTPOptions) { +func applyHTTPSettings(cfg *confighttp.ServerConfig, opts *flags.HTTPOptions) { if opts.HostPort != "" { cfg.Endpoint = opts.HostPort } @@ -134,15 +134,15 @@ func applyHTTPSettings(cfg *confighttp.HTTPServerSettings, opts *flags.HTTPOptio cfg.TLSSetting = applyTLSSettings(&opts.TLS) } - cfg.CORS = &confighttp.CORSSettings{ + cfg.CORS = &confighttp.CORSConfig{ AllowedOrigins: opts.CORS.AllowedOrigins, AllowedHeaders: opts.CORS.AllowedHeaders, } } -func applyTLSSettings(opts *tlscfg.Options) *configtls.TLSServerSetting { - return &configtls.TLSServerSetting{ - TLSSetting: configtls.TLSSetting{ +func applyTLSSettings(opts *tlscfg.Options) *configtls.ServerConfig { + return &configtls.ServerConfig{ + Config: configtls.Config{ CAFile: opts.CAPath, CertFile: opts.CertPath, KeyFile: opts.KeyPath, diff --git a/cmd/collector/app/handler/zipkin_receiver.go b/cmd/collector/app/handler/zipkin_receiver.go index f2cbff9160e..a477c970bf1 100644 --- a/cmd/collector/app/handler/zipkin_receiver.go +++ b/cmd/collector/app/handler/zipkin_receiver.go @@ -69,6 +69,9 @@ func startZipkinReceiver( } consumerAdapter := newConsumerDelegate(logger, spanProcessor, tm) + // reset Zipkin spanFormat + consumerAdapter.batchConsumer.spanOptions.SpanFormat = processor.ZipkinSpanFormat + nextConsumer, err := newTraces(consumerAdapter.consume) if err != nil { return nil, fmt.Errorf("could not create Zipkin consumer: %w", err) diff --git a/cmd/collector/app/handler/zipkin_receiver_test.go b/cmd/collector/app/handler/zipkin_receiver_test.go index 8d783588de6..54abedd1964 100644 --- a/cmd/collector/app/handler/zipkin_receiver_test.go +++ b/cmd/collector/app/handler/zipkin_receiver_test.go @@ -23,6 +23,7 @@ import ( "go.opentelemetry.io/collector/receiver" "github.com/jaegertracing/jaeger/cmd/collector/app/flags" + "github.com/jaegertracing/jaeger/cmd/collector/app/processor" zipkinthrift "github.com/jaegertracing/jaeger/model/converter/thrift/zipkin" "github.com/jaegertracing/jaeger/pkg/tenancy" "github.com/jaegertracing/jaeger/pkg/testutils" @@ -126,6 +127,7 @@ func TestZipkinReceiver(t *testing.T) { t.Logf("response: %s %s", response.Status, string(bodyBytes)) } require.NoError(t, response.Body.Close()) + require.Equal(t, processor.ZipkinSpanFormat, spanProcessor.getSpanFormat()) }) } } diff --git a/cmd/collector/app/metrics_test.go b/cmd/collector/app/metrics_test.go index 06c644b8a76..8c0990f76d9 100644 --- a/cmd/collector/app/metrics_test.go +++ b/cmd/collector/app/metrics_test.go @@ -29,6 +29,7 @@ import ( func TestProcessorMetrics(t *testing.T) { baseMetrics := metricstest.NewFactory(time.Hour) + defer baseMetrics.Backend.Stop() serviceMetrics := baseMetrics.Namespace(jaegerM.NSOptions{Name: "service", Tags: nil}) hostMetrics := baseMetrics.Namespace(jaegerM.NSOptions{Name: "host", Tags: nil}) spm := NewSpanProcessorMetrics(serviceMetrics, hostMetrics, []processor.SpanFormat{processor.SpanFormat("scruffy")}) @@ -63,6 +64,7 @@ func TestProcessorMetrics(t *testing.T) { func TestNewTraceCountsBySvc(t *testing.T) { baseMetrics := metricstest.NewFactory(time.Hour) + defer baseMetrics.Backend.Stop() metrics := newTraceCountsBySvc(baseMetrics, "not_on_my_level", 3) metrics.countByServiceName("fry", false, model.SamplerTypeUnrecognized) @@ -95,6 +97,7 @@ func TestNewTraceCountsBySvc(t *testing.T) { func TestNewSpanCountsBySvc(t *testing.T) { baseMetrics := metricstest.NewFactory(time.Hour) + defer baseMetrics.Backend.Stop() metrics := newSpanCountsBySvc(baseMetrics, "not_on_my_level", 3) metrics.countByServiceName("fry", false) metrics.countByServiceName("leela", false) diff --git a/cmd/collector/app/package_test.go b/cmd/collector/app/package_test.go new file mode 100644 index 00000000000..5946e183ad1 --- /dev/null +++ b/cmd/collector/app/package_test.go @@ -0,0 +1,14 @@ +// Copyright (c) 2024 The Jaeger Authors. +// SPDX-License-Identifier: Apache-2.0 + +package app + +import ( + "testing" + + "github.com/jaegertracing/jaeger/pkg/testutils" +) + +func TestMain(m *testing.M) { + testutils.VerifyGoLeaks(m) +} diff --git a/cmd/collector/app/sampling/grpc_handler_test.go b/cmd/collector/app/sampling/grpc_handler_test.go index 3a6590bf1ad..439e3bef646 100644 --- a/cmd/collector/app/sampling/grpc_handler_test.go +++ b/cmd/collector/app/sampling/grpc_handler_test.go @@ -37,6 +37,10 @@ func (s mockSamplingStore) GetSamplingStrategy(ctx context.Context, serviceName return &api_v2.SamplingStrategyResponse{StrategyType: api_v2.SamplingStrategyType_PROBABILISTIC}, nil } +func (s mockSamplingStore) Close() error { + return nil +} + func TestNewGRPCHandler(t *testing.T) { tests := []struct { req *api_v2.SamplingStrategyParameters diff --git a/cmd/collector/app/sampling/strategystore/interface.go b/cmd/collector/app/sampling/strategystore/interface.go index 8ee99491110..90d9464918d 100644 --- a/cmd/collector/app/sampling/strategystore/interface.go +++ b/cmd/collector/app/sampling/strategystore/interface.go @@ -24,6 +24,9 @@ import ( // StrategyStore keeps track of service specific sampling strategies. type StrategyStore interface { + // Close() from io.Closer stops the processor from calculating probabilities. + io.Closer + // GetSamplingStrategy retrieves the sampling strategy for the specified service. GetSamplingStrategy(ctx context.Context, serviceName string) (*api_v2.SamplingStrategyResponse, error) } diff --git a/cmd/collector/app/server/grpc_test.go b/cmd/collector/app/server/grpc_test.go index ac58626577f..647445a2b5b 100644 --- a/cmd/collector/app/server/grpc_test.go +++ b/cmd/collector/app/server/grpc_test.go @@ -84,7 +84,7 @@ func TestSpanCollector(t *testing.T) { require.NoError(t, err) defer server.Stop() - conn, err := grpc.Dial( + conn, err := grpc.NewClient( params.HostPortActual, grpc.WithTransportCredentials(insecure.NewCredentials())) require.NoError(t, err) diff --git a/cmd/collector/app/server/test.go b/cmd/collector/app/server/test.go index 4f78e75866c..b670934a785 100644 --- a/cmd/collector/app/server/test.go +++ b/cmd/collector/app/server/test.go @@ -28,6 +28,10 @@ func (s mockSamplingStore) GetSamplingStrategy(_ context.Context, serviceName st return nil, nil } +func (s mockSamplingStore) Close() error { + return nil +} + type mockSpanProcessor struct{} func (p *mockSpanProcessor) Close() error { diff --git a/cmd/collector/app/span_handler_builder_test.go b/cmd/collector/app/span_handler_builder_test.go index 3dbbeb8fced..93b994bd744 100644 --- a/cmd/collector/app/span_handler_builder_test.go +++ b/cmd/collector/app/span_handler_builder_test.go @@ -61,6 +61,7 @@ func TestNewSpanHandlerBuilder(t *testing.T) { assert.NotNil(t, spanHandlers.JaegerBatchesHandler) assert.NotNil(t, spanHandlers.GRPCHandler) assert.NotNil(t, spanProcessor) + require.NoError(t, spanProcessor.Close()) } func TestDefaultSpanFilter(t *testing.T) { diff --git a/cmd/collector/app/span_processor_test.go b/cmd/collector/app/span_processor_test.go index 1334b08a953..9d7b5f21bb9 100644 --- a/cmd/collector/app/span_processor_test.go +++ b/cmd/collector/app/span_processor_test.go @@ -83,6 +83,7 @@ func TestBySvcMetrics(t *testing.T) { for _, test := range tests { mb := metricstest.NewFactory(time.Hour) + defer mb.Backend.Stop() logger := zap.NewNop() serviceMetrics := mb.Namespace(metrics.NSOptions{Name: "service", Tags: nil}) hostMetrics := mb.Namespace(metrics.NSOptions{Name: "host", Tags: nil}) @@ -258,6 +259,7 @@ func TestSpanProcessorErrors(t *testing.T) { err: fmt.Errorf("some-error"), } mb := metricstest.NewFactory(time.Hour) + defer mb.Backend.Stop() serviceMetrics := mb.Namespace(metrics.NSOptions{Name: "service", Tags: nil}) p := NewSpanProcessor(w, nil, @@ -342,6 +344,7 @@ func TestSpanProcessorBusy(t *testing.T) { func TestSpanProcessorWithNilProcess(t *testing.T) { mb := metricstest.NewFactory(time.Hour) + defer mb.Backend.Stop() serviceMetrics := mb.Namespace(metrics.NSOptions{Name: "service", Tags: nil}) w := &fakeSpanWriter{} @@ -440,6 +443,7 @@ func TestSpanProcessorCountSpan(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { mb := metricstest.NewFactory(time.Hour) + defer mb.Backend.Stop() m := mb.Namespace(metrics.NSOptions{}) w := &fakeSpanWriter{} @@ -606,6 +610,7 @@ func TestStartDynQueueSizeUpdater(t *testing.T) { } assert.EqualValues(t, 104857, p.queue.Capacity()) + require.NoError(t, p.Close()) } func TestAdditionalProcessors(t *testing.T) { diff --git a/cmd/es-index-cleaner/app/index_filter.go b/cmd/es-index-cleaner/app/index_filter.go index 3f77dc18f54..60c28970b64 100644 --- a/cmd/es-index-cleaner/app/index_filter.go +++ b/cmd/es-index-cleaner/app/index_filter.go @@ -50,9 +50,9 @@ func (i *IndexFilter) filter(indices []client.Index) []client.Index { // archive works only for rollover reg, _ = regexp.Compile(fmt.Sprintf("^%sjaeger-span-archive-\\d{6}", i.IndexPrefix)) case i.Rollover: - reg, _ = regexp.Compile(fmt.Sprintf("^%sjaeger-(span|service|dependencies)-\\d{6}", i.IndexPrefix)) + reg, _ = regexp.Compile(fmt.Sprintf("^%sjaeger-(span|service|dependencies|sampling)-\\d{6}", i.IndexPrefix)) default: - reg, _ = regexp.Compile(fmt.Sprintf("^%sjaeger-(span|service|dependencies)-\\d{4}%s\\d{2}%s\\d{2}", i.IndexPrefix, i.IndexDateSeparator, i.IndexDateSeparator)) + reg, _ = regexp.Compile(fmt.Sprintf("^%sjaeger-(span|service|dependencies|sampling)-\\d{4}%s\\d{2}%s\\d{2}", i.IndexPrefix, i.IndexDateSeparator, i.IndexDateSeparator)) } var filtered []client.Index @@ -62,7 +62,8 @@ func (i *IndexFilter) filter(indices []client.Index) []client.Index { if in.Aliases[i.IndexPrefix+"jaeger-span-write"] || in.Aliases[i.IndexPrefix+"jaeger-service-write"] || in.Aliases[i.IndexPrefix+"jaeger-span-archive-write"] || - in.Aliases[i.IndexPrefix+"jaeger-dependencies-write"] { + in.Aliases[i.IndexPrefix+"jaeger-dependencies-write"] || + in.Aliases[i.IndexPrefix+"jaeger-sampling-write"] { continue } filtered = append(filtered, in) diff --git a/cmd/es-index-cleaner/app/index_filter_test.go b/cmd/es-index-cleaner/app/index_filter_test.go index ec03e2376ec..6a72970c359 100644 --- a/cmd/es-index-cleaner/app/index_filter_test.go +++ b/cmd/es-index-cleaner/app/index_filter_test.go @@ -64,6 +64,16 @@ func testIndexFilter(t *testing.T, prefix string) { CreationTime: time.Date(2020, time.August, 0o5, 15, 0, 0, 0, time.UTC), Aliases: map[string]bool{}, }, + { + Index: prefix + "jaeger-sampling-2020-08-06", + CreationTime: time.Date(2020, time.August, 0o6, 15, 0, 0, 0, time.UTC), + Aliases: map[string]bool{}, + }, + { + Index: prefix + "jaeger-sampling-2020-08-05", + CreationTime: time.Date(2020, time.August, 0o5, 15, 0, 0, 0, time.UTC), + Aliases: map[string]bool{}, + }, { Index: prefix + "jaeger-span-archive", CreationTime: time.Date(2020, time.August, 0, 15, 0, 0, 0, time.UTC), @@ -186,6 +196,11 @@ func testIndexFilter(t *testing.T, prefix string) { CreationTime: time.Date(2020, time.August, 0o5, 15, 0, 0, 0, time.UTC), Aliases: map[string]bool{}, }, + { + Index: prefix + "jaeger-sampling-2020-08-05", + CreationTime: time.Date(2020, time.August, 0o5, 15, 0, 0, 0, time.UTC), + Aliases: map[string]bool{}, + }, }, }, { @@ -228,6 +243,16 @@ func testIndexFilter(t *testing.T, prefix string) { CreationTime: time.Date(2020, time.August, 0o5, 15, 0, 0, 0, time.UTC), Aliases: map[string]bool{}, }, + { + Index: prefix + "jaeger-sampling-2020-08-06", + CreationTime: time.Date(2020, time.August, 0o6, 15, 0, 0, 0, time.UTC), + Aliases: map[string]bool{}, + }, + { + Index: prefix + "jaeger-sampling-2020-08-05", + CreationTime: time.Date(2020, time.August, 0o5, 15, 0, 0, 0, time.UTC), + Aliases: map[string]bool{}, + }, }, }, { diff --git a/cmd/es-rollover/app/flags.go b/cmd/es-rollover/app/flags.go index 582fe08c81e..56f3acce644 100644 --- a/cmd/es-rollover/app/flags.go +++ b/cmd/es-rollover/app/flags.go @@ -29,6 +29,7 @@ const ( ilmPolicyName = "es.ilm-policy-name" timeout = "timeout" skipDependencies = "skip-dependencies" + adaptiveSampling = "adaptive-sampling" ) // Config holds the global configurations for the es rollover, common to all actions @@ -42,6 +43,7 @@ type Config struct { UseILM bool Timeout int SkipDependencies bool + AdaptiveSampling bool } // AddFlags adds flags @@ -54,6 +56,7 @@ func AddFlags(flags *flag.FlagSet) { flags.String(ilmPolicyName, "jaeger-ilm-policy", "The name of the ILM policy to use if ILM is active") flags.Int(timeout, 120, "Number of seconds to wait for master node response") flags.Bool(skipDependencies, false, "Disable rollover for dependencies index") + flags.Bool(adaptiveSampling, false, "Enable rollover for adaptive sampling index") } // InitFromViper initializes config from viper.Viper. @@ -69,4 +72,5 @@ func (c *Config) InitFromViper(v *viper.Viper) { c.UseILM = v.GetBool(useILM) c.Timeout = v.GetInt(timeout) c.SkipDependencies = v.GetBool(skipDependencies) + c.AdaptiveSampling = v.GetBool(adaptiveSampling) } diff --git a/cmd/es-rollover/app/flags_test.go b/cmd/es-rollover/app/flags_test.go index 3c0e174c033..6bb8f31720e 100644 --- a/cmd/es-rollover/app/flags_test.go +++ b/cmd/es-rollover/app/flags_test.go @@ -42,6 +42,7 @@ func TestBindFlags(t *testing.T) { "--es.use-ilm=true", "--es.ilm-policy-name=jaeger-ilm", "--skip-dependencies=true", + "--adaptive-sampling=true", }) require.NoError(t, err) @@ -53,4 +54,5 @@ func TestBindFlags(t *testing.T) { assert.Equal(t, "qwerty123", c.Password) assert.Equal(t, "jaeger-ilm", c.ILMPolicyName) assert.True(t, c.SkipDependencies) + assert.True(t, c.AdaptiveSampling) } diff --git a/cmd/es-rollover/app/index_options.go b/cmd/es-rollover/app/index_options.go index d34c20187c5..f732ba22101 100644 --- a/cmd/es-rollover/app/index_options.go +++ b/cmd/es-rollover/app/index_options.go @@ -33,7 +33,7 @@ type IndexOption struct { } // RolloverIndices return an array of indices to rollover -func RolloverIndices(archive bool, skipDependencies bool, prefix string) []IndexOption { +func RolloverIndices(archive bool, skipDependencies bool, adaptiveSampling bool, prefix string) []IndexOption { if archive { return []IndexOption{ { @@ -65,6 +65,14 @@ func RolloverIndices(archive bool, skipDependencies bool, prefix string) []Index }) } + if adaptiveSampling { + indexOptions = append(indexOptions, IndexOption{ + prefix: prefix, + Mapping: "jaeger-sampling", + indexType: "jaeger-sampling", + }) + } + return indexOptions } diff --git a/cmd/es-rollover/app/index_options_test.go b/cmd/es-rollover/app/index_options_test.go index 0579062b6ec..f16f6607a9c 100644 --- a/cmd/es-rollover/app/index_options_test.go +++ b/cmd/es-rollover/app/index_options_test.go @@ -34,6 +34,7 @@ func TestRolloverIndices(t *testing.T) { archive bool prefix string skipDependencies bool + adaptiveSampling bool expected []expectedValues }{ { @@ -74,13 +75,6 @@ func TestRolloverIndices(t *testing.T) { writeAliasName: "mytenant-jaeger-span-archive-write", initialRolloverIndex: "mytenant-jaeger-span-archive-000001", }, - { - mapping: "jaeger-dependencies", - templateName: "mytenant-jaeger-dependencies", - readAliasName: "mytenant-jaeger-dependencies-read", - writeAliasName: "mytenant-jaeger-dependencies-write", - initialRolloverIndex: "mytenant-jaeger-dependencies-000001", - }, }, }, { @@ -97,8 +91,9 @@ func TestRolloverIndices(t *testing.T) { }, }, { - name: "with prefix", - prefix: "mytenant", + name: "with prefix", + prefix: "mytenant", + adaptiveSampling: true, expected: []expectedValues{ { mapping: "jaeger-span", @@ -121,12 +116,41 @@ func TestRolloverIndices(t *testing.T) { writeAliasName: "mytenant-jaeger-dependencies-write", initialRolloverIndex: "mytenant-jaeger-dependencies-000001", }, + { + mapping: "jaeger-sampling", + templateName: "mytenant-jaeger-sampling", + readAliasName: "mytenant-jaeger-sampling-read", + writeAliasName: "mytenant-jaeger-sampling-write", + initialRolloverIndex: "mytenant-jaeger-sampling-000001", + }, + }, + }, + { + name: "skip-dependency enable", + prefix: "mytenant", + skipDependencies: true, + expected: []expectedValues{ + { + mapping: "jaeger-span", + templateName: "mytenant-jaeger-span", + readAliasName: "mytenant-jaeger-span-read", + writeAliasName: "mytenant-jaeger-span-write", + initialRolloverIndex: "mytenant-jaeger-span-000001", + }, + { + mapping: "jaeger-service", + templateName: "mytenant-jaeger-service", + readAliasName: "mytenant-jaeger-service-read", + writeAliasName: "mytenant-jaeger-service-write", + initialRolloverIndex: "mytenant-jaeger-service-000001", + }, }, }, { - name: "dependency enable", + name: "adaptive sampling enable", prefix: "mytenant", skipDependencies: true, + adaptiveSampling: true, expected: []expectedValues{ { mapping: "jaeger-span", @@ -142,6 +166,13 @@ func TestRolloverIndices(t *testing.T) { writeAliasName: "mytenant-jaeger-service-write", initialRolloverIndex: "mytenant-jaeger-service-000001", }, + { + mapping: "jaeger-sampling", + templateName: "mytenant-jaeger-sampling", + readAliasName: "mytenant-jaeger-sampling-read", + writeAliasName: "mytenant-jaeger-sampling-write", + initialRolloverIndex: "mytenant-jaeger-sampling-000001", + }, }, }, } @@ -151,7 +182,8 @@ func TestRolloverIndices(t *testing.T) { if test.prefix != "" { test.prefix += "-" } - result := RolloverIndices(test.archive, test.skipDependencies, test.prefix) + result := RolloverIndices(test.archive, test.skipDependencies, test.adaptiveSampling, test.prefix) + assert.Equal(t, len(test.expected), len(result)) for i, r := range result { assert.Equal(t, test.expected[i].templateName, r.TemplateName()) assert.Equal(t, test.expected[i].mapping, r.Mapping) diff --git a/cmd/es-rollover/app/init/action.go b/cmd/es-rollover/app/init/action.go index 99134eb61f7..28ee6dea2d5 100644 --- a/cmd/es-rollover/app/init/action.go +++ b/cmd/es-rollover/app/init/action.go @@ -44,6 +44,7 @@ func (c Action) getMapping(version uint, templateName string) (string, error) { PrioritySpanTemplate: int64(c.Config.PrioritySpanTemplate), PriorityServiceTemplate: int64(c.Config.PriorityServiceTemplate), PriorityDependenciesTemplate: int64(c.Config.PriorityDependenciesTemplate), + PrioritySamplingTemplate: int64(c.Config.PrioritySamplingTemplate), Shards: int64(c.Config.Shards), Replicas: int64(c.Config.Replicas), IndexPrefix: c.Config.IndexPrefix, @@ -73,7 +74,7 @@ func (c Action) Do() error { return fmt.Errorf("ILM is supported only for ES version 7+") } } - rolloverIndices := app.RolloverIndices(c.Config.Archive, c.Config.SkipDependencies, c.Config.IndexPrefix) + rolloverIndices := app.RolloverIndices(c.Config.Archive, c.Config.SkipDependencies, c.Config.AdaptiveSampling, c.Config.IndexPrefix) for _, indexName := range rolloverIndices { if err := c.init(version, indexName); err != nil { return err diff --git a/cmd/es-rollover/app/init/flags.go b/cmd/es-rollover/app/init/flags.go index 27fe910e569..6b51e1ebce5 100644 --- a/cmd/es-rollover/app/init/flags.go +++ b/cmd/es-rollover/app/init/flags.go @@ -28,6 +28,7 @@ const ( prioritySpanTemplate = "priority-span-template" priorityServiceTemplate = "priority-service-template" priorityDependenciesTemplate = "priority-dependencies-template" + prioritySamplingTemplate = "priority-sampling-template" ) // Config holds configuration for index cleaner binary. @@ -38,6 +39,7 @@ type Config struct { PrioritySpanTemplate int PriorityServiceTemplate int PriorityDependenciesTemplate int + PrioritySamplingTemplate int } // AddFlags adds flags for TLS to the FlagSet. @@ -46,7 +48,8 @@ func (c *Config) AddFlags(flags *flag.FlagSet) { flags.Int(replicas, 1, "Number of replicas") flags.Int(prioritySpanTemplate, 0, "Priority of jaeger-span index template (ESv8 only)") flags.Int(priorityServiceTemplate, 0, "Priority of jaeger-service index template (ESv8 only)") - flags.Int(priorityDependenciesTemplate, 0, "Priority of jaeger-dependecies index template (ESv8 only)") + flags.Int(priorityDependenciesTemplate, 0, "Priority of jaeger-dependencies index template (ESv8 only)") + flags.Int(prioritySamplingTemplate, 0, "Priority of jaeger-sampling index template (ESv8 only)") } // InitFromViper initializes config from viper.Viper. @@ -56,4 +59,5 @@ func (c *Config) InitFromViper(v *viper.Viper) { c.PrioritySpanTemplate = v.GetInt(prioritySpanTemplate) c.PriorityServiceTemplate = v.GetInt(priorityServiceTemplate) c.PriorityDependenciesTemplate = v.GetInt(priorityDependenciesTemplate) + c.PrioritySamplingTemplate = v.GetInt(prioritySamplingTemplate) } diff --git a/cmd/es-rollover/app/init/flags_test.go b/cmd/es-rollover/app/init/flags_test.go index 4104f71e5ff..8856ca6f549 100644 --- a/cmd/es-rollover/app/init/flags_test.go +++ b/cmd/es-rollover/app/init/flags_test.go @@ -39,6 +39,7 @@ func TestBindFlags(t *testing.T) { "--priority-span-template=300", "--priority-service-template=301", "--priority-dependencies-template=302", + "--priority-sampling-template=303", }) require.NoError(t, err) @@ -48,4 +49,5 @@ func TestBindFlags(t *testing.T) { assert.Equal(t, 300, c.PrioritySpanTemplate) assert.Equal(t, 301, c.PriorityServiceTemplate) assert.Equal(t, 302, c.PriorityDependenciesTemplate) + assert.Equal(t, 303, c.PrioritySamplingTemplate) } diff --git a/cmd/es-rollover/app/lookback/action.go b/cmd/es-rollover/app/lookback/action.go index f944ff59523..6d2e5dcf4c7 100644 --- a/cmd/es-rollover/app/lookback/action.go +++ b/cmd/es-rollover/app/lookback/action.go @@ -35,7 +35,7 @@ type Action struct { // Do the lookback action func (a *Action) Do() error { - rolloverIndices := app.RolloverIndices(a.Config.Archive, a.Config.SkipDependencies, a.Config.IndexPrefix) + rolloverIndices := app.RolloverIndices(a.Config.Archive, a.Config.SkipDependencies, a.Config.AdaptiveSampling, a.Config.IndexPrefix) for _, indexName := range rolloverIndices { if err := a.lookback(indexName); err != nil { return err diff --git a/cmd/es-rollover/app/rollover/action.go b/cmd/es-rollover/app/rollover/action.go index 2ed612ee6ee..594a488f0f3 100644 --- a/cmd/es-rollover/app/rollover/action.go +++ b/cmd/es-rollover/app/rollover/action.go @@ -30,7 +30,7 @@ type Action struct { // Do the rollover action func (a *Action) Do() error { - rolloverIndices := app.RolloverIndices(a.Config.Archive, a.Config.SkipDependencies, a.Config.IndexPrefix) + rolloverIndices := app.RolloverIndices(a.Config.Archive, a.Config.SkipDependencies, a.Config.AdaptiveSampling, a.Config.IndexPrefix) for _, indexName := range rolloverIndices { if err := a.rollover(indexName); err != nil { return err diff --git a/cmd/esmapping-generator/app/renderer/render.go b/cmd/esmapping-generator/app/renderer/render.go index e9d1ed134eb..74bdfa3cb66 100644 --- a/cmd/esmapping-generator/app/renderer/render.go +++ b/cmd/esmapping-generator/app/renderer/render.go @@ -26,6 +26,7 @@ var supportedMappings = map[string]struct{}{ "jaeger-span": {}, "jaeger-service": {}, "jaeger-dependencies": {}, + "jaeger-sampling": {}, } // GetMappingAsString returns rendered index templates as string diff --git a/cmd/ingester/app/builder/builder.go b/cmd/ingester/app/builder/builder.go index f1ca36461ef..d89a2964ed3 100644 --- a/cmd/ingester/app/builder/builder.go +++ b/cmd/ingester/app/builder/builder.go @@ -58,6 +58,8 @@ func CreateConsumer(logger *zap.Logger, metricsFactory metrics.Factory, spanWrit ClientID: options.ClientID, ProtocolVersion: options.ProtocolVersion, AuthenticationConfig: options.AuthenticationConfig, + RackID: options.RackID, + FetchMaxMessageBytes: options.FetchMaxMessageBytes, } saramaConsumer, err := consumerConfig.NewConsumer(logger) if err != nil { diff --git a/cmd/ingester/app/flags.go b/cmd/ingester/app/flags.go index ec84344d99e..ac4cbd79ea8 100644 --- a/cmd/ingester/app/flags.go +++ b/cmd/ingester/app/flags.go @@ -39,6 +39,8 @@ const ( SuffixTopic = ".topic" // SuffixRackID is a suffix for the consumer rack-id flag SuffixRackID = ".rack-id" + // SuffixFetchMaxMessageBytes is a suffix for the consumer fetch-max-message-bytes flag + SuffixFetchMaxMessageBytes = ".fetch-max-message-bytes" // SuffixGroupID is a suffix for the group-id flag SuffixGroupID = ".group-id" // SuffixClientID is a suffix for the client-id flag @@ -67,6 +69,8 @@ const ( DefaultEncoding = kafka.EncodingProto // DefaultDeadlockInterval is the default deadlock interval DefaultDeadlockInterval = time.Duration(0) + // DefaultFetchMaxMessageBytes is the default for kafka.consumer.fetch-max-message-bytes flag + DefaultFetchMaxMessageBytes = 1024 * 1024 // 1MB ) // Options stores the configuration options for the Ingester @@ -117,6 +121,10 @@ func AddFlags(flagSet *flag.FlagSet) { KafkaConsumerConfigPrefix+SuffixRackID, "", "Rack identifier for this client. This can be any string value which indicates where this client is located. It corresponds with the broker config `broker.rack`") + flagSet.Int( + KafkaConsumerConfigPrefix+SuffixFetchMaxMessageBytes, + DefaultFetchMaxMessageBytes, + "The maximum number of message bytes to fetch from the broker in a single request. So you must be sure this is at least as large as your largest message.") auth.AddFlags(KafkaConsumerConfigPrefix, flagSet) } @@ -130,6 +138,7 @@ func (o *Options) InitFromViper(v *viper.Viper) { o.ProtocolVersion = v.GetString(KafkaConsumerConfigPrefix + SuffixProtocolVersion) o.Encoding = v.GetString(KafkaConsumerConfigPrefix + SuffixEncoding) o.RackID = v.GetString(KafkaConsumerConfigPrefix + SuffixRackID) + o.FetchMaxMessageBytes = v.GetInt32(KafkaConsumerConfigPrefix + SuffixFetchMaxMessageBytes) o.Parallelism = v.GetInt(ConfigPrefix + SuffixParallelism) o.DeadlockInterval = v.GetDuration(ConfigPrefix + SuffixDeadlockInterval) diff --git a/cmd/ingester/app/flags_test.go b/cmd/ingester/app/flags_test.go index 456a82fa165..bb6cf549014 100644 --- a/cmd/ingester/app/flags_test.go +++ b/cmd/ingester/app/flags_test.go @@ -38,6 +38,7 @@ func TestOptionsWithFlags(t *testing.T) { "--kafka.consumer.group-id=group1", "--kafka.consumer.client-id=client-id1", "--kafka.consumer.rack-id=rack1", + "--kafka.consumer.fetch-max-message-bytes=10485760", "--kafka.consumer.encoding=json", "--kafka.consumer.protocol-version=1.0.0", "--ingester.parallelism=5", @@ -49,6 +50,7 @@ func TestOptionsWithFlags(t *testing.T) { assert.Equal(t, []string{"127.0.0.1:9092", "0.0.0:1234"}, o.Brokers) assert.Equal(t, "group1", o.GroupID) assert.Equal(t, "rack1", o.RackID) + assert.Equal(t, int32(10485760), o.FetchMaxMessageBytes) assert.Equal(t, "client-id1", o.ClientID) assert.Equal(t, "1.0.0", o.ProtocolVersion) assert.Equal(t, 5, o.Parallelism) @@ -108,6 +110,7 @@ func TestFlagDefaults(t *testing.T) { assert.Equal(t, DefaultGroupID, o.GroupID) assert.Equal(t, DefaultClientID, o.ClientID) assert.Equal(t, DefaultParallelism, o.Parallelism) + assert.Equal(t, int32(DefaultFetchMaxMessageBytes), o.FetchMaxMessageBytes) assert.Equal(t, DefaultEncoding, o.Encoding) assert.Equal(t, DefaultDeadlockInterval, o.DeadlockInterval) } diff --git a/cmd/internal/flags/service.go b/cmd/internal/flags/service.go index 291659573b8..622a960a4e2 100644 --- a/cmd/internal/flags/service.go +++ b/cmd/internal/flags/service.go @@ -50,20 +50,16 @@ type Service struct { MetricsFactory metrics.Factory signalsChannel chan os.Signal - - hcStatusChannel chan healthcheck.Status } // NewService creates a new Service. func NewService(adminPort int) *Service { signalsChannel := make(chan os.Signal, 1) - hcStatusChannel := make(chan healthcheck.Status) signal.Notify(signalsChannel, os.Interrupt, syscall.SIGTERM) return &Service{ - Admin: NewAdminServer(ports.PortToHostPort(adminPort)), - signalsChannel: signalsChannel, - hcStatusChannel: hcStatusChannel, + Admin: NewAdminServer(ports.PortToHostPort(adminPort)), + signalsChannel: signalsChannel, } } @@ -79,11 +75,6 @@ func (s *Service) AddFlags(flagSet *flag.FlagSet) { s.Admin.AddFlags(flagSet) } -// SetHealthCheckStatus sets status of healthcheck -func (s *Service) SetHealthCheckStatus(status healthcheck.Status) { - s.hcStatusChannel <- status -} - // Start bootstraps the service and starts the admin server. func (s *Service) Start(v *viper.Viper) error { if err := TryLoadConfigFile(v); err != nil { @@ -143,15 +134,7 @@ func (s *Service) HC() *healthcheck.HealthCheck { func (s *Service) RunAndThen(shutdown func()) { s.HC().Ready() -statusLoop: - for { - select { - case status := <-s.hcStatusChannel: - s.HC().Set(status) - case <-s.signalsChannel: - break statusLoop - } - } + <-s.signalsChannel s.Logger.Info("Shutting down") s.HC().Set(healthcheck.Unavailable) diff --git a/cmd/internal/flags/service_test.go b/cmd/internal/flags/service_test.go index 4194b1eab1f..fbdd13fb3cf 100644 --- a/cmd/internal/flags/service_test.go +++ b/cmd/internal/flags/service_test.go @@ -93,7 +93,7 @@ func TestStartErrors(t *testing.T) { go s.RunAndThen(shutdown) waitForEqual(t, healthcheck.Ready, func() interface{} { return s.HC().Get() }) - s.SetHealthCheckStatus(healthcheck.Unavailable) + s.HC().Set(healthcheck.Unavailable) waitForEqual(t, healthcheck.Unavailable, func() interface{} { return s.HC().Get() }) s.signalsChannel <- os.Interrupt diff --git a/cmd/internal/printconfig/command_test.go b/cmd/internal/printconfig/command_test.go index 155730581d4..b02ca23b2d0 100644 --- a/cmd/internal/printconfig/command_test.go +++ b/cmd/internal/printconfig/command_test.go @@ -27,6 +27,7 @@ import ( "github.com/jaegertracing/jaeger/pkg/config" "github.com/jaegertracing/jaeger/pkg/config/tlscfg" "github.com/jaegertracing/jaeger/pkg/tenancy" + "github.com/jaegertracing/jaeger/pkg/testutils" ) const ( @@ -126,3 +127,7 @@ func TestPrintConfigCommand(t *testing.T) { actual := runPrintConfigCommand(v, t, false) assert.Equal(t, expected, actual) } + +func TestMain(m *testing.M) { + testutils.VerifyGoLeaks(m) +} diff --git a/cmd/jaeger/badger_config.yaml b/cmd/jaeger/badger_config.yaml index 950be451250..4643c9cc75a 100644 --- a/cmd/jaeger/badger_config.yaml +++ b/cmd/jaeger/badger_config.yaml @@ -20,12 +20,14 @@ extensions: ephemeral: false maintenance_interval: 5 metrics_update_interval: 10 + span_store_ttl: 72h badger_archive: directory_key: "/tmp/jaeger_archive/" directory_value: "/tmp/jaeger_archive/" ephemeral: false maintenance_interval: 5 metrics_update_interval: 10 + span_store_ttl: 720h receivers: otlp: diff --git a/cmd/jaeger/cassandra_config.yaml b/cmd/jaeger/cassandra_config.yaml new file mode 100644 index 00000000000..86efdcfaf29 --- /dev/null +++ b/cmd/jaeger/cassandra_config.yaml @@ -0,0 +1,41 @@ +service: + extensions: [jaeger_storage, jaeger_query] + pipelines: + traces: + receivers: [otlp] + processors: [batch] + exporters: [jaeger_storage_exporter] + +extensions: + jaeger_query: + trace_storage: cassandra_main + trace_storage_archive: cassandra_archive + ui_config: ./cmd/jaeger/config-ui.json + + jaeger_storage: + cassandra: + cassandra_main: + servers: 127.0.0.1 + port: 9042 + cassandra_archive: + servers: 127.0.0.1 + port: 9042 +receivers: + otlp: + protocols: + grpc: + http: + + jaeger: + protocols: + grpc: + thrift_binary: + thrift_compact: + thrift_http: + +processors: + batch: + +exporters: + jaeger_storage_exporter: + trace_storage: cassandra_main \ No newline at end of file diff --git a/cmd/jaeger/config-elasticsearch.yaml b/cmd/jaeger/config-elasticsearch.yaml new file mode 100644 index 00000000000..7297f7c4009 --- /dev/null +++ b/cmd/jaeger/config-elasticsearch.yaml @@ -0,0 +1,38 @@ +service: + extensions: [jaeger_storage, jaeger_query] + pipelines: + traces: + receivers: [otlp] + processors: [batch] + exporters: [jaeger_storage_exporter] + +extensions: + jaeger_query: + trace_storage: es_main + trace_storage_archive: es_archive + ui_config: ./cmd/jaeger/config-ui.json + + jaeger_storage: + elasticsearch: + es_main: + server_urls: http://localhost:9200 + log_level: "error" + index_prefix: "jaeger-main" + use_aliases: true + es_archive: + server_urls: http://localhost:9200 + log_level: "error" + index_prefix: "jaeger-archive" + use_aliases: true +receivers: + otlp: + protocols: + grpc: + http: + +processors: + batch: + +exporters: + jaeger_storage_exporter: + trace_storage: es_main diff --git a/cmd/jaeger/config-opensearch.yaml b/cmd/jaeger/config-opensearch.yaml new file mode 100644 index 00000000000..7debd780ee6 --- /dev/null +++ b/cmd/jaeger/config-opensearch.yaml @@ -0,0 +1,54 @@ +service: + extensions: [jaeger_storage, jaeger_query] + pipelines: + traces: + receivers: [otlp] + processors: [batch] + exporters: [jaeger_storage_exporter] + +extensions: + jaeger_query: + trace_storage: os_main + trace_storage_archive: os_archive + ui_config: ./cmd/jaeger/config-ui.json + + jaeger_storage: + opensearch: + os_main: + server_urls: https://localhost:9200 + log_level: "error" + index_prefix: "jaeger-main" + use_aliases: true + username: "admin" + password: "admin" + tls: + enabled: true + skip_host_verify: true + tags_as_fields: + all: true + + os_archive: + server_urls: https://localhost:9200 + log_level: "error" + index_prefix: "jaeger-archive" + use_aliases: true + username: "admin" + password: "admin" + tls: + enabled: true + skip_host_verify: true + tags_as_fields: + all: true + +receivers: + otlp: + protocols: + grpc: + http: + +processors: + batch: + +exporters: + jaeger_storage_exporter: + trace_storage: os_main diff --git a/cmd/jaeger/grpc_config.yaml b/cmd/jaeger/grpc_config.yaml new file mode 100644 index 00000000000..0da873500bd --- /dev/null +++ b/cmd/jaeger/grpc_config.yaml @@ -0,0 +1,31 @@ +service: + extensions: [jaeger_storage, jaeger_query] + pipelines: + traces: + receivers: [otlp] + processors: [batch] + exporters: [jaeger_storage_exporter] + +extensions: + jaeger_query: + trace_storage: external-storage + ui_config: ./cmd/jaeger/config-ui.json + + jaeger_storage: + grpc: + external-storage: + server: localhost:17271 + connection-timeout: 5s + +receivers: + otlp: + protocols: + grpc: + http: + +processors: + batch: + +exporters: + jaeger_storage_exporter: + trace_storage: external-storage diff --git a/cmd/jaeger/internal/command.go b/cmd/jaeger/internal/command.go index 072a05e8cc1..b53e22e4307 100644 --- a/cmd/jaeger/internal/command.go +++ b/cmd/jaeger/internal/command.go @@ -29,7 +29,7 @@ func Command() *cobra.Command { settings := otelcol.CollectorSettings{ BuildInfo: info, - Factories: components, + Factories: Components, } cmd := otelcol.NewCommand(settings) diff --git a/cmd/jaeger/internal/components.go b/cmd/jaeger/internal/components.go index 2b758a55990..c7b9957dbd8 100644 --- a/cmd/jaeger/internal/components.go +++ b/cmd/jaeger/internal/components.go @@ -13,7 +13,7 @@ import ( "go.opentelemetry.io/collector/connector" "go.opentelemetry.io/collector/connector/forwardconnector" "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/exporter/loggingexporter" + "go.opentelemetry.io/collector/exporter/debugexporter" "go.opentelemetry.io/collector/exporter/otlpexporter" "go.opentelemetry.io/collector/exporter/otlphttpexporter" "go.opentelemetry.io/collector/extension" @@ -29,6 +29,7 @@ import ( "github.com/jaegertracing/jaeger/cmd/jaeger/internal/exporters/storageexporter" "github.com/jaegertracing/jaeger/cmd/jaeger/internal/extension/jaegerquery" "github.com/jaegertracing/jaeger/cmd/jaeger/internal/extension/jaegerstorage" + "github.com/jaegertracing/jaeger/cmd/jaeger/internal/integration/storagecleaner" ) type builders struct { @@ -60,6 +61,7 @@ func (b builders) build() (otelcol.Factories, error) { // add-ons jaegerquery.NewFactory(), jaegerstorage.NewFactory(), + storagecleaner.NewFactory(), // TODO add adaptive sampling ) if err != nil { @@ -80,7 +82,7 @@ func (b builders) build() (otelcol.Factories, error) { factories.Exporters, err = b.exporter( // standard - loggingexporter.NewFactory(), + debugexporter.NewFactory(), otlpexporter.NewFactory(), otlphttpexporter.NewFactory(), // add-ons @@ -116,6 +118,6 @@ func (b builders) build() (otelcol.Factories, error) { return factories, nil } -func components() (otelcol.Factories, error) { +func Components() (otelcol.Factories, error) { return defaultBuilders().build() } diff --git a/cmd/jaeger/internal/components_test.go b/cmd/jaeger/internal/components_test.go index fed360d0bdd..02916f28f04 100644 --- a/cmd/jaeger/internal/components_test.go +++ b/cmd/jaeger/internal/components_test.go @@ -29,7 +29,7 @@ import ( ) func TestComponents(t *testing.T) { - factories, err := components() + factories, err := Components() require.NoError(t, err) @@ -39,7 +39,7 @@ func TestComponents(t *testing.T) { assert.NotNil(t, factories.Processors) assert.NotNil(t, factories.Connectors) - _, jaegerReceiverFactoryExists := factories.Receivers["jaeger"] + _, jaegerReceiverFactoryExists := factories.Receivers[component.MustNewType("jaeger")] assert.True(t, jaegerReceiverFactoryExists) } diff --git a/cmd/jaeger/internal/exporters/storageexporter/factory.go b/cmd/jaeger/internal/exporters/storageexporter/factory.go index 398e9c38efe..2638a949dbe 100644 --- a/cmd/jaeger/internal/exporters/storageexporter/factory.go +++ b/cmd/jaeger/internal/exporters/storageexporter/factory.go @@ -14,7 +14,7 @@ import ( ) // componentType is the name of this extension in configuration. -const componentType = component.Type("jaeger_storage_exporter") +var componentType = component.MustNewType("jaeger_storage_exporter") // ID is the identifier of this extension. var ID = component.NewID(componentType) diff --git a/cmd/jaeger/internal/extension/jaegerquery/config.go b/cmd/jaeger/internal/extension/jaegerquery/config.go index cb089f24f80..e5640c2710b 100644 --- a/cmd/jaeger/internal/extension/jaegerquery/config.go +++ b/cmd/jaeger/internal/extension/jaegerquery/config.go @@ -18,10 +18,10 @@ var _ component.ConfigValidator = (*Config)(nil) type Config struct { queryApp.QueryOptionsBase `mapstructure:",squash"` - TraceStoragePrimary string `valid:"required" mapstructure:"trace_storage"` - TraceStorageArchive string `valid:"optional" mapstructure:"trace_storage_archive"` - confighttp.HTTPServerSettings `mapstructure:",squash"` - Tenancy tenancy.Options `mapstructure:"multi_tenancy"` + TraceStoragePrimary string `valid:"required" mapstructure:"trace_storage"` + TraceStorageArchive string `valid:"optional" mapstructure:"trace_storage_archive"` + confighttp.ServerConfig `mapstructure:",squash"` + Tenancy tenancy.Options `mapstructure:"multi_tenancy"` } func (cfg *Config) Validate() error { diff --git a/cmd/jaeger/internal/extension/jaegerquery/factory.go b/cmd/jaeger/internal/extension/jaegerquery/factory.go index bef4b296f05..fcaaf2e6b3e 100644 --- a/cmd/jaeger/internal/extension/jaegerquery/factory.go +++ b/cmd/jaeger/internal/extension/jaegerquery/factory.go @@ -14,7 +14,7 @@ import ( ) // componentType is the name of this extension in configuration. -const componentType = component.Type("jaeger_query") +var componentType = component.MustNewType("jaeger_query") // ID is the identifier of this extension. var ID = component.NewID(componentType) @@ -25,7 +25,7 @@ func NewFactory() extension.Factory { func createDefaultConfig() component.Config { return &Config{ - HTTPServerSettings: confighttp.HTTPServerSettings{ + ServerConfig: confighttp.ServerConfig{ Endpoint: ports.PortToHostPort(ports.QueryHTTP), }, } diff --git a/cmd/jaeger/internal/extension/jaegerquery/server.go b/cmd/jaeger/internal/extension/jaegerquery/server.go index bc83ba06ef6..91c282ca333 100644 --- a/cmd/jaeger/internal/extension/jaegerquery/server.go +++ b/cmd/jaeger/internal/extension/jaegerquery/server.go @@ -14,6 +14,7 @@ import ( "github.com/jaegertracing/jaeger/cmd/jaeger/internal/extension/jaegerstorage" queryApp "github.com/jaegertracing/jaeger/cmd/query/app" "github.com/jaegertracing/jaeger/cmd/query/app/querysvc" + "github.com/jaegertracing/jaeger/pkg/healthcheck" "github.com/jaegertracing/jaeger/pkg/jtracer" "github.com/jaegertracing/jaeger/pkg/tenancy" "github.com/jaegertracing/jaeger/plugin/metrics/disabled" @@ -81,6 +82,8 @@ func (s *server) Start(ctx context.Context, host component.Host) error { //nolint s.server, err = queryApp.NewServer( s.logger, + // TODO propagate healthcheck updates up to the collector's runtime + healthcheck.New(), qs, metricsQueryService, s.makeQueryOptions(), diff --git a/cmd/jaeger/internal/extension/jaegerstorage/config.go b/cmd/jaeger/internal/extension/jaegerstorage/config.go index db32f6c79cd..10afd448c9e 100644 --- a/cmd/jaeger/internal/extension/jaegerstorage/config.go +++ b/cmd/jaeger/internal/extension/jaegerstorage/config.go @@ -7,14 +7,21 @@ import ( "fmt" "reflect" + cassandraCfg "github.com/jaegertracing/jaeger/pkg/cassandra/config" + esCfg "github.com/jaegertracing/jaeger/pkg/es/config" memoryCfg "github.com/jaegertracing/jaeger/pkg/memory/config" badgerCfg "github.com/jaegertracing/jaeger/plugin/storage/badger" + grpcCfg "github.com/jaegertracing/jaeger/plugin/storage/grpc/config" ) // Config has the configuration for jaeger-query, type Config struct { - Memory map[string]memoryCfg.Configuration `mapstructure:"memory"` - Badger map[string]badgerCfg.NamespaceConfig `mapstructure:"badger"` + Memory map[string]memoryCfg.Configuration `mapstructure:"memory"` + Badger map[string]badgerCfg.NamespaceConfig `mapstructure:"badger"` + GRPC map[string]grpcCfg.Configuration `mapstructure:"grpc"` + Opensearch map[string]esCfg.Configuration `mapstructure:"opensearch"` + Elasticsearch map[string]esCfg.Configuration `mapstructure:"elasticsearch"` + Cassandra map[string]cassandraCfg.Configuration `mapstructure:"cassandra"` // TODO add other storage types here // TODO how will this work with 3rd party storage implementations? // Option: instead of looking for specific name, check interface. @@ -27,6 +34,7 @@ type MemoryStorage struct { func (cfg *Config) Validate() error { emptyCfg := createDefaultConfig().(*Config) + //nolint:govet // The remoteRPCClient field in GRPC.Configuration contains error type if reflect.DeepEqual(*cfg, *emptyCfg) { return fmt.Errorf("%s: no storage type present in config", ID) } else { diff --git a/cmd/jaeger/internal/extension/jaegerstorage/extension.go b/cmd/jaeger/internal/extension/jaegerstorage/extension.go index a1ac028e772..f718ca8cef0 100644 --- a/cmd/jaeger/internal/extension/jaegerstorage/extension.go +++ b/cmd/jaeger/internal/extension/jaegerstorage/extension.go @@ -13,15 +13,26 @@ import ( "go.opentelemetry.io/collector/extension" "go.uber.org/zap" + cassandraCfg "github.com/jaegertracing/jaeger/pkg/cassandra/config" + esCfg "github.com/jaegertracing/jaeger/pkg/es/config" memoryCfg "github.com/jaegertracing/jaeger/pkg/memory/config" "github.com/jaegertracing/jaeger/pkg/metrics" "github.com/jaegertracing/jaeger/plugin/storage/badger" badgerCfg "github.com/jaegertracing/jaeger/plugin/storage/badger" + "github.com/jaegertracing/jaeger/plugin/storage/cassandra" + "github.com/jaegertracing/jaeger/plugin/storage/es" + "github.com/jaegertracing/jaeger/plugin/storage/grpc" + grpcCfg "github.com/jaegertracing/jaeger/plugin/storage/grpc/config" "github.com/jaegertracing/jaeger/plugin/storage/memory" "github.com/jaegertracing/jaeger/storage" ) -var _ extension.Extension = (*storageExt)(nil) +var _ Extension = (*storageExt)(nil) + +type Extension interface { + extension.Extension + Factory(name string) (storage.Factory, bool) +} type storageExt struct { config *Config @@ -44,7 +55,7 @@ func GetStorageFactory(name string, host component.Host) (storage.Factory, error componentType, ) } - f, ok := comp.(*storageExt).factories[name] + f, ok := comp.(Extension).Factory(name) if !ok { return nil, fmt.Errorf( "cannot find storage '%s' declared with '%s' extension", @@ -107,10 +118,38 @@ func (s *storageExt) Start(ctx context.Context, host component.Host) error { cfg: s.config.Badger, builder: badger.NewFactoryWithConfig, } + grpcStarter := &starter[grpcCfg.Configuration, *grpc.Factory]{ + ext: s, + storageKind: "grpc", + cfg: s.config.GRPC, + builder: grpc.NewFactoryWithConfig, + } + esStarter := &starter[esCfg.Configuration, *es.Factory]{ + ext: s, + storageKind: "elasticsearch", + cfg: s.config.Elasticsearch, + builder: es.NewFactoryWithConfig, + } + osStarter := &starter[esCfg.Configuration, *es.Factory]{ + ext: s, + storageKind: "opensearch", + cfg: s.config.Opensearch, + builder: es.NewFactoryWithConfig, + } + cassandraStarter := &starter[cassandraCfg.Configuration, *cassandra.Factory]{ + ext: s, + storageKind: "cassandra", + cfg: s.config.Cassandra, + builder: cassandra.NewFactoryWithConfig, + } builders := []func(ctx context.Context, host component.Host) error{ memStarter.build, badgerStarter.build, + grpcStarter.build, + esStarter.build, + osStarter.build, + cassandraStarter.build, // TODO add support for other backends } for _, builder := range builders { @@ -133,3 +172,8 @@ func (s *storageExt) Shutdown(ctx context.Context) error { } return errors.Join(errs...) } + +func (s *storageExt) Factory(name string) (storage.Factory, bool) { + f, ok := s.factories[name] + return f, ok +} diff --git a/cmd/jaeger/internal/extension/jaegerstorage/extension_test.go b/cmd/jaeger/internal/extension/jaegerstorage/extension_test.go index 9f65f782895..915d554efeb 100644 --- a/cmd/jaeger/internal/extension/jaegerstorage/extension_test.go +++ b/cmd/jaeger/internal/extension/jaegerstorage/extension_test.go @@ -6,6 +6,8 @@ package jaegerstorage import ( "context" "fmt" + "net/http" + "net/http/httptest" "testing" "github.com/stretchr/testify/require" @@ -16,8 +18,10 @@ import ( nooptrace "go.opentelemetry.io/otel/trace/noop" "go.uber.org/zap" + esCfg "github.com/jaegertracing/jaeger/pkg/es/config" memoryCfg "github.com/jaegertracing/jaeger/pkg/memory/config" "github.com/jaegertracing/jaeger/pkg/metrics" + "github.com/jaegertracing/jaeger/pkg/testutils" badgerCfg "github.com/jaegertracing/jaeger/plugin/storage/badger" "github.com/jaegertracing/jaeger/storage" "github.com/jaegertracing/jaeger/storage/dependencystore" @@ -151,6 +155,48 @@ func TestBadgerStorageExtensionError(t *testing.T) { require.ErrorContains(t, err, "/bad/path") } +func TestESStorageExtension(t *testing.T) { + mockEsServerResponse := []byte(` + { + "Version": { + "Number": "6" + } + } + `) + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write(mockEsServerResponse) + })) + defer server.Close() + storageExtension := makeStorageExtenion(t, &Config{ + Elasticsearch: map[string]esCfg.Configuration{ + "foo": { + Servers: []string{server.URL}, + LogLevel: "error", + }, + }, + }) + ctx := context.Background() + err := storageExtension.Start(ctx, componenttest.NewNopHost()) + require.NoError(t, err) + require.NoError(t, storageExtension.Shutdown(ctx)) +} + +func TestESStorageExtensionError(t *testing.T) { + defer testutils.VerifyGoLeaksOnce(t) + + ext := makeStorageExtenion(t, &Config{ + Elasticsearch: map[string]esCfg.Configuration{ + "foo": { + Servers: []string{"http://127.0.0.1:65535"}, + LogLevel: "error", + }, + }, + }) + err := ext.Start(context.Background(), componenttest.NewNopHost()) + require.ErrorContains(t, err, "failed to initialize elasticsearch storage") + require.ErrorContains(t, err, "http://127.0.0.1:65535") +} + func noopTelemetrySettings() component.TelemetrySettings { return component.TelemetrySettings{ Logger: zap.L(), diff --git a/cmd/jaeger/internal/extension/jaegerstorage/factory.go b/cmd/jaeger/internal/extension/jaegerstorage/factory.go index 627372915b9..aef19427e12 100644 --- a/cmd/jaeger/internal/extension/jaegerstorage/factory.go +++ b/cmd/jaeger/internal/extension/jaegerstorage/factory.go @@ -11,7 +11,7 @@ import ( ) // componentType is the name of this extension in configuration. -const componentType = component.Type("jaeger_storage") +var componentType = component.MustNewType("jaeger_storage") // ID is the identifier of this extension. var ID = component.NewID(componentType) diff --git a/cmd/jaeger/internal/integration/README.md b/cmd/jaeger/internal/integration/README.md new file mode 100644 index 00000000000..b124984b607 --- /dev/null +++ b/cmd/jaeger/internal/integration/README.md @@ -0,0 +1,42 @@ +# Integration + +The Jaeger v2 integration test is an extension of the existing `integration.StorageIntegration` designed to test the Jaeger-v2 OtelCol binary; currently, it only tests the span store. The existing tests at `plugin/storage/integration` (also called "unit mode") test by writing and reading span data directly to the storage API. In contrast, these tests (or "e2e mode") read and write span data through the RPC client to the Jaeger-v2 OtelCol binary. E2E mode tests read from the jaeger_query extension and write to the receiver in OTLP formats. For details, see the [Architecture](#architecture) section below. + +## Architecture + +```mermaid +flowchart LR + Test -->|writeSpan| SpanWriter + SpanWriter --> RPCW[RPC_client] + RPCW --> Receiver + Receiver --> Exporter + Exporter --> B(StorageBackend) + Test -->|readSpan| SpanReader + SpanReader --> RPCR[RPC_client] + RPCR --> jaeger_query + jaeger_query --> B + + subgraph Integration Test Executable + Test + SpanWriter + SpanReader + RPCW + RPCR + end + + subgraph jaeger-v2 + Receiver + Exporter + jaeger_query + end +``` + +## gRPC Integration Test + +To conduct the tests, run the following command: + +``` +STORAGE=grpc \ + SPAN_STORAGE_TYPE=memory \ + make jaeger-v2-storage-integration-test +``` diff --git a/cmd/jaeger/internal/integration/badger_test.go b/cmd/jaeger/internal/integration/badger_test.go new file mode 100644 index 00000000000..4f3526197c4 --- /dev/null +++ b/cmd/jaeger/internal/integration/badger_test.go @@ -0,0 +1,51 @@ +// Copyright (c) 2024 The Jaeger Authors. +// SPDX-License-Identifier: Apache-2.0 + +package integration + +import ( + "fmt" + "net/http" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/jaegertracing/jaeger/cmd/jaeger/internal/integration/storagecleaner" + "github.com/jaegertracing/jaeger/plugin/storage/integration" +) + +func cleanUp(t *testing.T) { + Addr := fmt.Sprintf("http://0.0.0.0:%s%s", storagecleaner.Port, storagecleaner.URL) + r, err := http.NewRequest(http.MethodPost, Addr, nil) + require.NoError(t, err) + + client := &http.Client{} + + resp, err := client.Do(r) + require.NoError(t, err) + defer resp.Body.Close() + + require.Equal(t, http.StatusOK, resp.StatusCode) +} + +func TestBadgerStorage(t *testing.T) { + integration.SkipUnlessEnv(t, "badger") + + s := &E2EStorageIntegration{ + ConfigFile: "../../badger_config.yaml", + StorageIntegration: integration.StorageIntegration{ + SkipBinaryAttrs: true, + SkipArchiveTest: true, + CleanUp: cleanUp, + + // TODO: remove this once badger supports returning spanKind from GetOperations + // Cf https://github.com/jaegertracing/jaeger/issues/1922 + GetOperationsMissingSpanKind: true, + }, + } + s.e2eInitialize(t) + t.Cleanup(func() { + s.e2eCleanUp(t) + }) + s.RunAll(t) +} diff --git a/cmd/jaeger/internal/integration/grpc_test.go b/cmd/jaeger/internal/integration/grpc_test.go new file mode 100644 index 00000000000..22f4bbbd067 --- /dev/null +++ b/cmd/jaeger/internal/integration/grpc_test.go @@ -0,0 +1,46 @@ +// Copyright (c) 2024 The Jaeger Authors. +// SPDX-License-Identifier: Apache-2.0 + +package integration + +import ( + "testing" + + "github.com/jaegertracing/jaeger/pkg/testutils" + "github.com/jaegertracing/jaeger/plugin/storage/integration" +) + +type GRPCStorageIntegration struct { + E2EStorageIntegration + + remoteStorage *integration.RemoteMemoryStorage +} + +func (s *GRPCStorageIntegration) initialize(t *testing.T) { + logger, _ := testutils.NewLogger() + + s.remoteStorage = integration.StartNewRemoteMemoryStorage(t, logger) + + s.CleanUp = s.cleanUp +} + +func (s *GRPCStorageIntegration) cleanUp(t *testing.T) { + s.remoteStorage.Close(t) + s.initialize(t) +} + +func TestGRPCStorage(t *testing.T) { + integration.SkipUnlessEnv(t, "grpc") + + s := &GRPCStorageIntegration{} + s.ConfigFile = "../../grpc_config.yaml" + s.SkipBinaryAttrs = true + + s.initialize(t) + s.e2eInitialize(t) + t.Cleanup(func() { + s.e2eCleanUp(t) + s.remoteStorage.Close(t) + }) + s.RunSpanStoreTests(t) +} diff --git a/cmd/jaeger/internal/integration/integration.go b/cmd/jaeger/internal/integration/integration.go new file mode 100644 index 00000000000..e6404c3b9ed --- /dev/null +++ b/cmd/jaeger/internal/integration/integration.go @@ -0,0 +1,99 @@ +// Copyright (c) 2024 The Jaeger Authors. +// SPDX-License-Identifier: Apache-2.0 + +package integration + +import ( + "io" + "os" + "os/exec" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v3" + + "github.com/jaegertracing/jaeger/pkg/testutils" + "github.com/jaegertracing/jaeger/plugin/storage/integration" + "github.com/jaegertracing/jaeger/ports" +) + +const otlpPort = 4317 + +// E2EStorageIntegration holds components for e2e mode of Jaeger-v2 +// storage integration test. The intended usage is as follows: +// - Initialize a specific storage implementation declares its own test functions +// (e.g. starts remote-storage). +// - Then, instantiates with e2eInitialize() to run the Jaeger-v2 collector +// and also the SpanWriter and SpanReader. +// - After that, calls RunSpanStoreTests(). +// - Clean up with e2eCleanup() to close the SpanReader and SpanWriter connections. +// - At last, clean up anything declared in its own test functions. +// (e.g. close remote-storage) +type E2EStorageIntegration struct { + integration.StorageIntegration + ConfigFile string +} + +// e2eInitialize starts the Jaeger-v2 collector with the provided config file, +// it also initialize the SpanWriter and SpanReader below. +// This function should be called before any of the tests start. +func (s *E2EStorageIntegration) e2eInitialize(t *testing.T) { + logger, _ := testutils.NewLogger() + configFile := createStorageCleanerConfig(t, s.ConfigFile) + + cmd := exec.Cmd{ + Path: "./cmd/jaeger/jaeger", + Args: []string{"jaeger", "--config", configFile}, + // Change the working directory to the root of this project + // since the binary config file jaeger_query's ui_config points to + // "./cmd/jaeger/config-ui.json" + Dir: "../../../..", + Stdout: os.Stderr, + Stderr: os.Stderr, + } + require.NoError(t, cmd.Start()) + t.Cleanup(func() { + require.NoError(t, cmd.Process.Kill()) + }) + + var err error + s.SpanWriter, err = createSpanWriter(logger, otlpPort) + require.NoError(t, err) + s.SpanReader, err = createSpanReader(ports.QueryGRPC) + require.NoError(t, err) +} + +// e2eCleanUp closes the SpanReader and SpanWriter gRPC connection. +// This function should be called after all the tests are finished. +func (s *E2EStorageIntegration) e2eCleanUp(t *testing.T) { + require.NoError(t, s.SpanReader.(io.Closer).Close()) + require.NoError(t, s.SpanWriter.(io.Closer).Close()) +} + +func createStorageCleanerConfig(t *testing.T, configFile string) string { + data, err := os.ReadFile(configFile) + require.NoError(t, err) + var config map[string]interface{} + err = yaml.Unmarshal(data, &config) + require.NoError(t, err) + + service, ok := config["service"].(map[string]interface{}) + require.True(t, ok) + service["extensions"] = append(service["extensions"].([]interface{}), "storage_cleaner") + + extensions, ok := config["extensions"].(map[string]interface{}) + require.True(t, ok) + query, ok := extensions["jaeger_query"].(map[string]interface{}) + require.True(t, ok) + trace_storage := query["trace_storage"].(string) + extensions["storage_cleaner"] = map[string]string{"trace_storage": trace_storage} + + newData, err := yaml.Marshal(config) + require.NoError(t, err) + tempFile := filepath.Join(t.TempDir(), "storageCleaner_config.yaml") + err = os.WriteFile(tempFile, newData, 0o600) + require.NoError(t, err) + + return tempFile +} diff --git a/cmd/jaeger/internal/integration/package_test.go b/cmd/jaeger/internal/integration/package_test.go new file mode 100644 index 00000000000..f4fb9b5f972 --- /dev/null +++ b/cmd/jaeger/internal/integration/package_test.go @@ -0,0 +1,14 @@ +// Copyright (c) 2024 The Jaeger Authors. +// SPDX-License-Identifier: Apache-2.0 + +package integration + +import ( + "testing" + + "github.com/jaegertracing/jaeger/pkg/testutils" +) + +func TestMain(m *testing.M) { + testutils.VerifyGoLeaks(m) +} diff --git a/cmd/jaeger/internal/integration/receivers/storagereceiver/README.md b/cmd/jaeger/internal/integration/receivers/storagereceiver/README.md new file mode 100644 index 00000000000..30931adaf0f --- /dev/null +++ b/cmd/jaeger/internal/integration/receivers/storagereceiver/README.md @@ -0,0 +1,23 @@ +# Storage Receiver + +`storagereceiver` is a fake receiver that creates an artificial stream of traces by: + +- repeatedly querying one of Jaeger storage backends for all traces (by service). +- tracking new traces / spans and passing them to the next component in the pipeline. + +# Getting Started + +The following settings are required: + +- `trace_storage` (no default): name of a storage backend defined in `jaegerstorage` extension + +The following settings can be optionally configured: + +- `pull_interval` (default = 0s): The delay between each iteration of pulling traces. + +```yaml +receivers: + jaeger_storage_receiver: + trace_storage: external-storage + pull_interval: 0s +``` diff --git a/cmd/jaeger/internal/integration/receivers/storagereceiver/config.go b/cmd/jaeger/internal/integration/receivers/storagereceiver/config.go new file mode 100644 index 00000000000..e9319b8991d --- /dev/null +++ b/cmd/jaeger/internal/integration/receivers/storagereceiver/config.go @@ -0,0 +1,20 @@ +// Copyright (c) 2024 The Jaeger Authors. +// SPDX-License-Identifier: Apache-2.0 + +package storagereceiver + +import ( + "time" + + "github.com/asaskevich/govalidator" +) + +type Config struct { + TraceStorage string `valid:"required" mapstructure:"trace_storage"` + PullInterval time.Duration `mapstructure:"pull_interval"` +} + +func (cfg *Config) Validate() error { + _, err := govalidator.ValidateStruct(cfg) + return err +} diff --git a/cmd/jaeger/internal/integration/receivers/storagereceiver/config_test.go b/cmd/jaeger/internal/integration/receivers/storagereceiver/config_test.go new file mode 100644 index 00000000000..a347c276450 --- /dev/null +++ b/cmd/jaeger/internal/integration/receivers/storagereceiver/config_test.go @@ -0,0 +1,66 @@ +// Copyright (c) 2024 The Jaeger Authors. +// SPDX-License-Identifier: Apache-2.0 + +package storagereceiver + +import ( + "errors" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap/confmaptest" +) + +func TestLoadConfig(t *testing.T) { + t.Parallel() + + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config.yaml")) + require.NoError(t, err) + + tests := []struct { + id component.ID + expected component.Config + expectedErr error + }{ + { + id: component.NewIDWithName(componentType, ""), + expectedErr: errors.New("non zero value required"), + }, + { + id: component.NewIDWithName(componentType, "defaults"), + expected: &Config{ + TraceStorage: "storage", + PullInterval: 0, + }, + }, + { + id: component.NewIDWithName(componentType, "filled"), + expected: &Config{ + TraceStorage: "storage", + PullInterval: 2 * time.Second, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.id.String(), func(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + sub, err := cm.Sub(tt.id.String()) + require.NoError(t, err) + require.NoError(t, component.UnmarshalConfig(sub, cfg)) + + if tt.expectedErr != nil { + require.ErrorContains(t, component.ValidateConfig(cfg), tt.expectedErr.Error()) + } else { + require.NoError(t, component.ValidateConfig(cfg)) + assert.Equal(t, tt.expected, cfg) + } + }) + } +} diff --git a/cmd/jaeger/internal/integration/receivers/storagereceiver/factory.go b/cmd/jaeger/internal/integration/receivers/storagereceiver/factory.go new file mode 100644 index 00000000000..9163583b102 --- /dev/null +++ b/cmd/jaeger/internal/integration/receivers/storagereceiver/factory.go @@ -0,0 +1,36 @@ +// Copyright (c) 2024 The Jaeger Authors. +// SPDX-License-Identifier: Apache-2.0 + +package storagereceiver + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/receiver" +) + +// componentType is the name of this extension in configuration. +var componentType = component.MustNewType("jaeger_storage_receiver") + +// ID is the identifier of this extension. +var ID = component.NewID(componentType) + +func NewFactory() receiver.Factory { + return receiver.NewFactory( + componentType, + createDefaultConfig, + receiver.WithTraces(createTracesReceiver, component.StabilityLevelDevelopment), + ) +} + +func createDefaultConfig() component.Config { + return &Config{} +} + +func createTracesReceiver(ctx context.Context, set receiver.CreateSettings, config component.Config, nextConsumer consumer.Traces) (receiver.Traces, error) { + cfg := config.(*Config) + + return newTracesReceiver(cfg, set, nextConsumer) +} diff --git a/cmd/jaeger/internal/integration/receivers/storagereceiver/factory_test.go b/cmd/jaeger/internal/integration/receivers/storagereceiver/factory_test.go new file mode 100644 index 00000000000..a697a1ec4aa --- /dev/null +++ b/cmd/jaeger/internal/integration/receivers/storagereceiver/factory_test.go @@ -0,0 +1,28 @@ +// Copyright (c) 2024 The Jaeger Authors. +// SPDX-License-Identifier: Apache-2.0 + +package storagereceiver + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/receiver/receivertest" +) + +func TestCreateDefaultConfig(t *testing.T) { + cfg := createDefaultConfig().(*Config) + require.NotNil(t, cfg, "failed to create default config") + require.NoError(t, componenttest.CheckConfigStruct(cfg)) +} + +func TestCreateTracesReceiver(t *testing.T) { + cfg := createDefaultConfig().(*Config) + f := NewFactory() + r, err := f.CreateTracesReceiver(context.Background(), receivertest.NewNopCreateSettings(), cfg, nil) + require.NoError(t, err) + assert.NotNil(t, r) +} diff --git a/cmd/jaeger/internal/integration/receivers/storagereceiver/package_test.go b/cmd/jaeger/internal/integration/receivers/storagereceiver/package_test.go new file mode 100644 index 00000000000..4dbecd011d3 --- /dev/null +++ b/cmd/jaeger/internal/integration/receivers/storagereceiver/package_test.go @@ -0,0 +1,14 @@ +// Copyright (c) 2024 The Jaeger Authors. +// SPDX-License-Identifier: Apache-2.0 + +package storagereceiver + +import ( + "testing" + + "github.com/jaegertracing/jaeger/pkg/testutils" +) + +func TestMain(m *testing.M) { + testutils.VerifyGoLeaks(m) +} diff --git a/cmd/jaeger/internal/integration/receivers/storagereceiver/receiver.go b/cmd/jaeger/internal/integration/receivers/storagereceiver/receiver.go new file mode 100644 index 00000000000..1e8400660ca --- /dev/null +++ b/cmd/jaeger/internal/integration/receivers/storagereceiver/receiver.go @@ -0,0 +1,141 @@ +// Copyright (c) 2024 The Jaeger Authors. +// SPDX-License-Identifier: Apache-2.0 + +package storagereceiver + +import ( + "context" + "fmt" + "time" + + jaeger2otlp "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/receiver" + "go.uber.org/zap" + + "github.com/jaegertracing/jaeger/cmd/jaeger/internal/extension/jaegerstorage" + "github.com/jaegertracing/jaeger/model" + "github.com/jaegertracing/jaeger/storage/spanstore" +) + +type storageReceiver struct { + cancelConsumeLoop context.CancelFunc + config *Config + settings receiver.CreateSettings + consumedTraces map[model.TraceID]*consumedTrace + nextConsumer consumer.Traces + spanReader spanstore.Reader +} + +type consumedTrace struct { + spanIDs map[model.SpanID]struct{} +} + +func newTracesReceiver(config *Config, set receiver.CreateSettings, nextConsumer consumer.Traces) (*storageReceiver, error) { + return &storageReceiver{ + config: config, + settings: set, + consumedTraces: make(map[model.TraceID]*consumedTrace), + nextConsumer: nextConsumer, + }, nil +} + +func (r *storageReceiver) Start(ctx context.Context, host component.Host) error { + f, err := jaegerstorage.GetStorageFactory(r.config.TraceStorage, host) + if err != nil { + return fmt.Errorf("cannot find storage factory: %w", err) + } + + if r.spanReader, err = f.CreateSpanReader(); err != nil { + return fmt.Errorf("cannot create span reader: %w", err) + } + + ctx, cancel := context.WithCancel(ctx) + r.cancelConsumeLoop = cancel + + go func() { + if err := r.consumeLoop(ctx); err != nil { + r.settings.ReportStatus(component.NewFatalErrorEvent(err)) + } + }() + + return nil +} + +func (r *storageReceiver) consumeLoop(ctx context.Context) error { + for { + services, err := r.spanReader.GetServices(ctx) + if err != nil { + r.settings.Logger.Error("Failed to get services from consumer", zap.Error(err)) + return err + } + + for _, svc := range services { + if err := r.consumeTraces(ctx, svc); err != nil { + r.settings.Logger.Error("Failed to consume traces from consumer", zap.Error(err)) + } + } + + select { + case <-ctx.Done(): + r.settings.Logger.Info("Consumer stopped") + return nil + default: + time.Sleep(r.config.PullInterval) + } + } +} + +func (r *storageReceiver) consumeTraces(ctx context.Context, serviceName string) error { + endTime := time.Now() + traces, err := r.spanReader.FindTraces(ctx, &spanstore.TraceQueryParameters{ + ServiceName: serviceName, + StartTimeMin: endTime.Add(-1 * time.Hour), + StartTimeMax: endTime, + }) + if err != nil { + return err + } + + for _, trace := range traces { + traceID := trace.Spans[0].TraceID + if _, ok := r.consumedTraces[traceID]; !ok { + r.consumedTraces[traceID] = &consumedTrace{ + spanIDs: make(map[model.SpanID]struct{}), + } + } + r.consumeSpans(ctx, r.consumedTraces[traceID], trace.Spans) + } + + return nil +} + +func (r *storageReceiver) consumeSpans(ctx context.Context, tc *consumedTrace, spans []*model.Span) error { + // Spans are consumed one at a time because we don't know whether all spans + // in a trace have been completely exported + for _, span := range spans { + if _, ok := tc.spanIDs[span.SpanID]; !ok { + tc.spanIDs[span.SpanID] = struct{}{} + td, err := jaeger2otlp.ProtoToTraces([]*model.Batch{ + { + Spans: []*model.Span{span}, + Process: span.Process, + }, + }) + if err != nil { + return err + } + r.nextConsumer.ConsumeTraces(ctx, td) + } + } + + return nil +} + +func (r *storageReceiver) Shutdown(_ context.Context) error { + if r.cancelConsumeLoop != nil { + r.cancelConsumeLoop() + } + return nil +} diff --git a/cmd/jaeger/internal/integration/receivers/storagereceiver/receiver_test.go b/cmd/jaeger/internal/integration/receivers/storagereceiver/receiver_test.go new file mode 100644 index 00000000000..b71d03d56ea --- /dev/null +++ b/cmd/jaeger/internal/integration/receivers/storagereceiver/receiver_test.go @@ -0,0 +1,282 @@ +// Copyright (c) 2024 The Jaeger Authors. +// SPDX-License-Identifier: Apache-2.0 + +package storagereceiver + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/storagetest" + jaeger2otlp "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/pdata/ptrace" + "go.opentelemetry.io/collector/receiver/receivertest" + + "github.com/jaegertracing/jaeger/cmd/jaeger/internal/extension/jaegerstorage" + "github.com/jaegertracing/jaeger/model" + "github.com/jaegertracing/jaeger/storage" + factoryMocks "github.com/jaegertracing/jaeger/storage/mocks" + spanStoreMocks "github.com/jaegertracing/jaeger/storage/spanstore/mocks" +) + +var _ jaegerstorage.Extension = (*mockStorageExt)(nil) + +type mockStorageExt struct { + name string + factory *factoryMocks.Factory +} + +func (m *mockStorageExt) Start(ctx context.Context, host component.Host) error { + panic("not implemented") +} + +func (m *mockStorageExt) Shutdown(ctx context.Context) error { + panic("not implemented") +} + +func (m *mockStorageExt) Factory(name string) (storage.Factory, bool) { + if m.name == name { + return m.factory, true + } + return nil, false +} + +type receiverTest struct { + storageName string + receiveName string + receiveInterval time.Duration + reportStatus func(*component.StatusEvent) + + reader *spanStoreMocks.Reader + factory *factoryMocks.Factory + host *storagetest.StorageHost + receiver *storageReceiver +} + +func withReceiver( + r *receiverTest, + fn func(r *receiverTest), +) { + reader := new(spanStoreMocks.Reader) + factory := new(factoryMocks.Factory) + host := storagetest.NewStorageHost() + host.WithExtension(jaegerstorage.ID, &mockStorageExt{ + name: r.storageName, + factory: factory, + }) + cfg := createDefaultConfig().(*Config) + cfg.TraceStorage = r.receiveName + cfg.PullInterval = r.receiveInterval + receiver, _ := newTracesReceiver( + cfg, + receivertest.NewNopCreateSettings(), + consumertest.NewNop(), + ) + receiver.settings.ReportStatus = func(se *component.StatusEvent) {} + + r.reader = reader + r.factory = factory + r.host = host + r.receiver = receiver + fn(r) +} + +var ( + services = []string{"example-service-1", "example-service-2"} + spans = []*model.Span{ + { + TraceID: model.NewTraceID(0, 1), + SpanID: model.NewSpanID(1), + Process: &model.Process{ + ServiceName: services[0], + }, + }, + { + TraceID: model.NewTraceID(0, 1), + SpanID: model.NewSpanID(2), + Process: &model.Process{ + ServiceName: services[0], + }, + }, + { + TraceID: model.NewTraceID(0, 2), + SpanID: model.NewSpanID(3), + Process: &model.Process{ + ServiceName: services[1], + }, + }, + { + TraceID: model.NewTraceID(0, 2), + SpanID: model.NewSpanID(4), + Process: &model.Process{ + ServiceName: services[1], + }, + }, + } +) + +func TestReceiver_NoStorageError(t *testing.T) { + r := &receiverTest{ + storageName: "", + receiveName: "foo", + } + withReceiver(r, func(r *receiverTest) { + err := r.receiver.Start(context.Background(), r.host) + require.ErrorContains(t, err, "cannot find storage factory") + }) +} + +func TestReceiver_CreateSpanReaderError(t *testing.T) { + r := &receiverTest{ + storageName: "foo", + receiveName: "foo", + } + withReceiver(r, func(r *receiverTest) { + r.factory.On("CreateSpanReader").Return(nil, errors.New("mocked error")) + + err := r.receiver.Start(context.Background(), r.host) + require.ErrorContains(t, err, "cannot create span reader") + }) +} + +func TestReceiver_GetServiceError(t *testing.T) { + r := &receiverTest{ + storageName: "external-storage", + receiveName: "external-storage", + } + withReceiver(r, func(r *receiverTest) { + r.reader.On("GetServices", mock.AnythingOfType("*context.cancelCtx")).Return([]string{}, errors.New("mocked error")) + r.factory.On("CreateSpanReader").Return(r.reader, nil) + r.receiver.spanReader = r.reader + r.reportStatus = func(se *component.StatusEvent) { + require.ErrorContains(t, se.Err(), "mocked error") + } + + require.NoError(t, r.receiver.Start(context.Background(), r.host)) + }) +} + +func TestReceiver_Shutdown(t *testing.T) { + withReceiver(&receiverTest{}, func(r *receiverTest) { + require.NoError(t, r.receiver.Shutdown(context.Background())) + }) +} + +func TestReceiver_Start(t *testing.T) { + r := &receiverTest{ + storageName: "external-storage", + receiveName: "external-storage", + receiveInterval: 50 * time.Millisecond, + } + withReceiver(r, func(r *receiverTest) { + r.reader.On("GetServices", mock.AnythingOfType("*context.cancelCtx")).Return([]string{}, nil) + r.factory.On("CreateSpanReader").Return(r.reader, nil) + + require.NoError(t, r.receiver.Start(context.Background(), r.host)) + // let the consumeLoop to reach the end of iteration and sleep + time.Sleep(100 * time.Millisecond) + require.NoError(t, r.receiver.Shutdown(context.Background())) + }) +} + +func TestReceiver_StartConsume(t *testing.T) { + tests := []struct { + name string + services []string + traces []*model.Trace + tracesErr error + expectedTraces []*model.Trace + }{ + { + name: "empty service", + }, + { + name: "find traces error", + services: []string{"example-service"}, + tracesErr: errors.New("failed to find traces"), + }, + { + name: "consume first trace", + services: []string{services[0]}, + traces: []*model.Trace{ + {Spans: []*model.Span{spans[0]}}, + }, + expectedTraces: []*model.Trace{ + {Spans: []*model.Span{spans[0]}}, + }, + }, + { + name: "consume second trace", + services: services, + traces: []*model.Trace{ + {Spans: []*model.Span{spans[0]}}, + {Spans: []*model.Span{spans[2], spans[3]}}, + }, + expectedTraces: []*model.Trace{ + {Spans: []*model.Span{spans[0]}}, + {Spans: []*model.Span{spans[2]}}, + {Spans: []*model.Span{spans[3]}}, + }, + }, + { + name: "re-consume first trace with new spans", + services: services, + traces: []*model.Trace{ + {Spans: []*model.Span{spans[0], spans[1]}}, + {Spans: []*model.Span{spans[2], spans[3]}}, + }, + expectedTraces: []*model.Trace{ + {Spans: []*model.Span{spans[0]}}, + {Spans: []*model.Span{spans[2]}}, + {Spans: []*model.Span{spans[3]}}, + // span at index 1 is consumed last + {Spans: []*model.Span{spans[1]}}, + }, + }, + } + + withReceiver(&receiverTest{}, func(r *receiverTest) { + sink := &consumertest.TracesSink{} + r.receiver.nextConsumer = sink + + ctx, cancelFunc := context.WithCancel(context.Background()) + r.receiver.cancelConsumeLoop = cancelFunc + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + reader := new(spanStoreMocks.Reader) + reader.On("GetServices", mock.AnythingOfType("*context.cancelCtx")).Return(test.services, nil) + reader.On( + "FindTraces", + mock.AnythingOfType("*context.cancelCtx"), + mock.AnythingOfType("*spanstore.TraceQueryParameters"), + ).Return(test.traces, test.tracesErr) + r.receiver.spanReader = reader + + require.NoError(t, r.receiver.Shutdown(ctx)) + require.NoError(t, r.receiver.consumeLoop(ctx)) + + expectedTraces := make([]ptrace.Traces, 0) + for _, trace := range test.expectedTraces { + td, err := jaeger2otlp.ProtoToTraces([]*model.Batch{ + { + Spans: []*model.Span{trace.Spans[0]}, + Process: trace.Spans[0].Process, + }, + }) + require.NoError(t, err) + expectedTraces = append(expectedTraces, td) + } + actualTraces := sink.AllTraces() + assert.Equal(t, expectedTraces, actualTraces) + }) + } + }) +} diff --git a/cmd/jaeger/internal/integration/receivers/storagereceiver/testdata/config.yaml b/cmd/jaeger/internal/integration/receivers/storagereceiver/testdata/config.yaml new file mode 100644 index 00000000000..e590e8f1694 --- /dev/null +++ b/cmd/jaeger/internal/integration/receivers/storagereceiver/testdata/config.yaml @@ -0,0 +1,6 @@ +jaeger_storage_receiver: +jaeger_storage_receiver/defaults: + trace_storage: storage +jaeger_storage_receiver/filled: + trace_storage: storage + pull_interval: 2s diff --git a/cmd/jaeger/internal/integration/span_reader.go b/cmd/jaeger/internal/integration/span_reader.go new file mode 100644 index 00000000000..9d2d61b9658 --- /dev/null +++ b/cmd/jaeger/internal/integration/span_reader.go @@ -0,0 +1,165 @@ +// Copyright (c) 2024 The Jaeger Authors. +// SPDX-License-Identifier: Apache-2.0 + +package integration + +import ( + "context" + "errors" + "fmt" + "io" + "math" + "strings" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/status" + + "github.com/jaegertracing/jaeger/model" + "github.com/jaegertracing/jaeger/ports" + "github.com/jaegertracing/jaeger/proto-gen/api_v2" + "github.com/jaegertracing/jaeger/storage/spanstore" +) + +var ( + _ spanstore.Reader = (*spanReader)(nil) + _ io.Closer = (*spanReader)(nil) +) + +// SpanReader retrieve span data from Jaeger-v2 query with api_v2.QueryServiceClient. +type spanReader struct { + clientConn *grpc.ClientConn + client api_v2.QueryServiceClient +} + +func createSpanReader(port int) (*spanReader, error) { + opts := []grpc.DialOption{ + grpc.WithBlock(), + grpc.WithTransportCredentials(insecure.NewCredentials()), + } + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + cc, err := grpc.DialContext(ctx, ports.PortToHostPort(port), opts...) + if err != nil { + return nil, err + } + + return &spanReader{ + clientConn: cc, + client: api_v2.NewQueryServiceClient(cc), + }, nil +} + +func (r *spanReader) Close() error { + return r.clientConn.Close() +} + +func unwrapNotFoundErr(err error) error { + if s, _ := status.FromError(err); s != nil { + if strings.Contains(s.Message(), spanstore.ErrTraceNotFound.Error()) { + return spanstore.ErrTraceNotFound + } + } + return err +} + +func (r *spanReader) GetTrace(ctx context.Context, traceID model.TraceID) (*model.Trace, error) { + stream, err := r.client.GetTrace(ctx, &api_v2.GetTraceRequest{ + TraceID: traceID, + }) + if err != nil { + return nil, unwrapNotFoundErr(err) + } + + var spans []*model.Span + for received, err := stream.Recv(); !errors.Is(err, io.EOF); received, err = stream.Recv() { + if err != nil { + return nil, unwrapNotFoundErr(err) + } + for i := range received.Spans { + spans = append(spans, &received.Spans[i]) + } + } + + return &model.Trace{ + Spans: spans, + }, nil +} + +func (r *spanReader) GetServices(ctx context.Context) ([]string, error) { + res, err := r.client.GetServices(ctx, &api_v2.GetServicesRequest{}) + if err != nil { + return []string{}, err + } + return res.Services, nil +} + +func (r *spanReader) GetOperations(ctx context.Context, query spanstore.OperationQueryParameters) ([]spanstore.Operation, error) { + var operations []spanstore.Operation + res, err := r.client.GetOperations(ctx, &api_v2.GetOperationsRequest{ + Service: query.ServiceName, + SpanKind: query.SpanKind, + }) + if err != nil { + return operations, err + } + + for _, operation := range res.Operations { + operations = append(operations, spanstore.Operation{ + Name: operation.Name, + SpanKind: operation.SpanKind, + }) + } + return operations, nil +} + +func (r *spanReader) FindTraces(ctx context.Context, query *spanstore.TraceQueryParameters) ([]*model.Trace, error) { + var traces []*model.Trace + + if query.NumTraces > math.MaxInt32 { + return traces, fmt.Errorf("NumTraces must not greater than %d", math.MaxInt32) + } + stream, err := r.client.FindTraces(ctx, &api_v2.FindTracesRequest{ + Query: &api_v2.TraceQueryParameters{ + ServiceName: query.ServiceName, + OperationName: query.OperationName, + Tags: query.Tags, + StartTimeMin: query.StartTimeMin, + StartTimeMax: query.StartTimeMax, + DurationMin: query.DurationMin, + DurationMax: query.DurationMax, + SearchDepth: int32(query.NumTraces), + }, + }) + if err != nil { + return traces, err + } + + spanMaps := map[string][]*model.Span{} + for received, err := stream.Recv(); !errors.Is(err, io.EOF); received, err = stream.Recv() { + if err != nil { + return nil, unwrapNotFoundErr(err) + } + for i, span := range received.Spans { + traceID := span.TraceID.String() + if _, ok := spanMaps[traceID]; !ok { + spanMaps[traceID] = make([]*model.Span, 0) + } + spanMaps[traceID] = append(spanMaps[traceID], &received.Spans[i]) + } + } + + for _, spans := range spanMaps { + traces = append(traces, &model.Trace{ + Spans: spans, + }) + } + return traces, nil +} + +func (r *spanReader) FindTraceIDs(ctx context.Context, query *spanstore.TraceQueryParameters) ([]model.TraceID, error) { + panic("not implemented") +} diff --git a/cmd/jaeger/internal/integration/span_writer.go b/cmd/jaeger/internal/integration/span_writer.go new file mode 100644 index 00000000000..a6fff35e402 --- /dev/null +++ b/cmd/jaeger/internal/integration/span_writer.go @@ -0,0 +1,75 @@ +// Copyright (c) 2024 The Jaeger Authors. +// SPDX-License-Identifier: Apache-2.0 + +package integration + +import ( + "context" + "fmt" + "io" + + jaeger2otlp "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configtls" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exportertest" + "go.opentelemetry.io/collector/exporter/otlpexporter" + "go.uber.org/zap" + + "github.com/jaegertracing/jaeger/model" + "github.com/jaegertracing/jaeger/storage/spanstore" +) + +var ( + _ spanstore.Writer = (*spanWriter)(nil) + _ io.Closer = (*spanWriter)(nil) +) + +// SpanWriter utilizes the OTLP exporter to send span data to the Jaeger-v2 receiver +type spanWriter struct { + exporter exporter.Traces +} + +func createSpanWriter(logger *zap.Logger, port int) (*spanWriter, error) { + factory := otlpexporter.NewFactory() + cfg := factory.CreateDefaultConfig().(*otlpexporter.Config) + cfg.Endpoint = fmt.Sprintf("localhost:%d", port) + cfg.RetryConfig.Enabled = false + cfg.QueueConfig.Enabled = false + cfg.TLSSetting = configtls.ClientConfig{ + Insecure: true, + } + + set := exportertest.NewNopCreateSettings() + set.Logger = logger + + exporter, err := factory.CreateTracesExporter(context.Background(), set, cfg) + if err != nil { + return nil, err + } + if err := exporter.Start(context.Background(), componenttest.NewNopHost()); err != nil { + return nil, err + } + + return &spanWriter{ + exporter: exporter, + }, nil +} + +func (w *spanWriter) Close() error { + return w.exporter.Shutdown(context.Background()) +} + +func (w *spanWriter) WriteSpan(ctx context.Context, span *model.Span) error { + td, err := jaeger2otlp.ProtoToTraces([]*model.Batch{ + { + Spans: []*model.Span{span}, + Process: span.Process, + }, + }) + if err != nil { + return err + } + + return w.exporter.ConsumeTraces(ctx, td) +} diff --git a/cmd/jaeger/internal/integration/storagecleaner/README.md b/cmd/jaeger/internal/integration/storagecleaner/README.md new file mode 100644 index 00000000000..a2011cddb06 --- /dev/null +++ b/cmd/jaeger/internal/integration/storagecleaner/README.md @@ -0,0 +1,47 @@ +# storage_cleaner + +This module implements an extension that allows purging the backend storage by making an HTTP POST request to it. + +The storage_cleaner extension is intended to be used only in tests, providing a way to clear the storage between test runs. Making a POST request to the exposed endpoint will delete all data in storage. + + +```mermaid +flowchart LR + Receiver --> Processor + Processor --> Exporter + JaegerStorageExension -->|"(1) get storage"| Exporter + Exporter -->|"(2) write trace"| Storage + + E2E_test -->|"(1) POST /purge"| HTTP_endpoint + JaegerStorageExension -->|"(2) getStorage()"| HTTP_endpoint + HTTP_endpoint -.->|"(3) storage.(*storage.Purger).Purge()"| Storage + + subgraph Jaeger Collector + Receiver + Processor + Exporter + + Storage + StorageCleanerExtension + HTTP_endpoint + subgraph JaegerStorageExension + Storage + end + subgraph StorageCleanerExtension + HTTP_endpoint + end + end +``` + +# Getting Started + +The following settings are required: + +- `trace_storage` : name of a storage backend defined in `jaegerstorage` extension + +```yaml +extensions: + storage_cleaner: + trace_storage: storage_name +``` + diff --git a/cmd/jaeger/internal/integration/storagecleaner/config.go b/cmd/jaeger/internal/integration/storagecleaner/config.go new file mode 100644 index 00000000000..5b4c9d30812 --- /dev/null +++ b/cmd/jaeger/internal/integration/storagecleaner/config.go @@ -0,0 +1,18 @@ +// Copyright (c) 2024 The Jaeger Authors. +// SPDX-License-Identifier: Apache-2.0 + +package storagecleaner + +import ( + "github.com/asaskevich/govalidator" +) + +type Config struct { + TraceStorage string `valid:"required" mapstructure:"trace_storage"` + Port string `mapstructure:"port"` +} + +func (cfg *Config) Validate() error { + _, err := govalidator.ValidateStruct(cfg) + return err +} diff --git a/cmd/jaeger/internal/integration/storagecleaner/config_test.go b/cmd/jaeger/internal/integration/storagecleaner/config_test.go new file mode 100644 index 00000000000..48b49c66e88 --- /dev/null +++ b/cmd/jaeger/internal/integration/storagecleaner/config_test.go @@ -0,0 +1,23 @@ +// Copyright (c) 2024 The Jaeger Authors. +// SPDX-License-Identifier: Apache-2.0 + +package storagecleaner + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestStorageExtensionConfig(t *testing.T) { + config := createDefaultConfig().(*Config) + config.TraceStorage = "storage" + err := config.Validate() + require.NoError(t, err) +} + +func TestStorageExtensionConfigError(t *testing.T) { + config := createDefaultConfig().(*Config) + err := config.Validate() + require.ErrorContains(t, err, "non zero value required") +} diff --git a/cmd/jaeger/internal/integration/storagecleaner/extension.go b/cmd/jaeger/internal/integration/storagecleaner/extension.go new file mode 100644 index 00000000000..5ef26eb1867 --- /dev/null +++ b/cmd/jaeger/internal/integration/storagecleaner/extension.go @@ -0,0 +1,98 @@ +// Copyright (c) 2024 The Jaeger Authors. +// SPDX-License-Identifier: Apache-2.0 + +package storagecleaner + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/gorilla/mux" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/extension" + + "github.com/jaegertracing/jaeger/cmd/jaeger/internal/extension/jaegerstorage" + "github.com/jaegertracing/jaeger/storage" +) + +var ( + _ extension.Extension = (*storageCleaner)(nil) + _ extension.Dependent = (*storageCleaner)(nil) +) + +const ( + Port = "9231" + URL = "/purge" +) + +type storageCleaner struct { + config *Config + server *http.Server + settings component.TelemetrySettings +} + +func newStorageCleaner(config *Config, telemetrySettings component.TelemetrySettings) *storageCleaner { + return &storageCleaner{ + config: config, + settings: telemetrySettings, + } +} + +func (c *storageCleaner) Start(ctx context.Context, host component.Host) error { + storageFactory, err := jaegerstorage.GetStorageFactory(c.config.TraceStorage, host) + if err != nil { + return fmt.Errorf("cannot find storage factory '%s': %w", c.config.TraceStorage, err) + } + + purgeStorage := func() error { + purger, ok := storageFactory.(storage.Purger) + if !ok { + return fmt.Errorf("storage %s does not implement Purger interface", c.config.TraceStorage) + } + if err := purger.Purge(); err != nil { + return fmt.Errorf("error purging storage: %w", err) + } + return nil + } + + purgeHandler := func(w http.ResponseWriter, r *http.Request) { + if err := purgeStorage(); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusOK) + w.Write([]byte("Purge request processed successfully")) + } + + r := mux.NewRouter() + r.HandleFunc(URL, purgeHandler).Methods(http.MethodPost) + c.server = &http.Server{ + Addr: ":" + c.config.Port, + Handler: r, + ReadHeaderTimeout: 3 * time.Second, + } + go func() { + if err := c.server.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) { + err = fmt.Errorf("error starting cleaner server: %w", err) + c.settings.ReportStatus(component.NewFatalErrorEvent(err)) + } + }() + + return nil +} + +func (c *storageCleaner) Shutdown(ctx context.Context) error { + if c.server != nil { + if err := c.server.Shutdown(ctx); err != nil { + return fmt.Errorf("error shutting down cleaner server: %w", err) + } + } + return nil +} + +func (c *storageCleaner) Dependencies() []component.ID { + return []component.ID{jaegerstorage.ID} +} diff --git a/cmd/jaeger/internal/integration/storagecleaner/extension_test.go b/cmd/jaeger/internal/integration/storagecleaner/extension_test.go new file mode 100644 index 00000000000..26fb08a47e2 --- /dev/null +++ b/cmd/jaeger/internal/integration/storagecleaner/extension_test.go @@ -0,0 +1,146 @@ +// Copyright (c) 2024 The Jaeger Authors. +// SPDX-License-Identifier: Apache-2.0 + +package storagecleaner + +import ( + "context" + "fmt" + "net/http" + "sync/atomic" + "testing" + "time" + + "github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/storagetest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + + "github.com/jaegertracing/jaeger/cmd/jaeger/internal/extension/jaegerstorage" + "github.com/jaegertracing/jaeger/storage" + factoryMocks "github.com/jaegertracing/jaeger/storage/mocks" +) + +var ( + _ jaegerstorage.Extension = (*mockStorageExt)(nil) + _ storage.Factory = (*PurgerFactory)(nil) +) + +type PurgerFactory struct { + factoryMocks.Factory + err error +} + +func (f *PurgerFactory) Purge() error { + return f.err +} + +type mockStorageExt struct { + name string + factory storage.Factory +} + +func (m *mockStorageExt) Start(ctx context.Context, host component.Host) error { + panic("not implemented") +} + +func (m *mockStorageExt) Shutdown(ctx context.Context) error { + panic("not implemented") +} + +func (m *mockStorageExt) Factory(name string) (storage.Factory, bool) { + if m.name == name { + return m.factory, true + } + return nil, false +} + +func TestStorageCleanerExtension(t *testing.T) { + tests := []struct { + name string + factory storage.Factory + status int + }{ + { + name: "good storage", + factory: &PurgerFactory{}, + status: http.StatusOK, + }, + { + name: "good storage with error", + factory: &PurgerFactory{err: fmt.Errorf("error")}, + status: http.StatusInternalServerError, + }, + { + name: "bad storage", + factory: &factoryMocks.Factory{}, + status: http.StatusInternalServerError, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + config := &Config{ + TraceStorage: "storage", + Port: Port, + } + s := newStorageCleaner(config, component.TelemetrySettings{}) + require.NotEmpty(t, s.Dependencies()) + host := storagetest.NewStorageHost() + host.WithExtension(jaegerstorage.ID, &mockStorageExt{ + name: "storage", + factory: test.factory, + }) + require.NoError(t, s.Start(context.Background(), host)) + defer s.Shutdown(context.Background()) + + addr := fmt.Sprintf("http://0.0.0.0:%s%s", Port, URL) + client := &http.Client{} + require.Eventually(t, func() bool { + r, err := http.NewRequest(http.MethodPost, addr, nil) + require.NoError(t, err) + resp, err := client.Do(r) + require.NoError(t, err) + defer resp.Body.Close() + return test.status == resp.StatusCode + }, 5*time.Second, 100*time.Millisecond) + }) + } +} + +func TestGetStorageFactoryError(t *testing.T) { + config := &Config{} + s := newStorageCleaner(config, component.TelemetrySettings{}) + host := storagetest.NewStorageHost() + host.WithExtension(jaegerstorage.ID, &mockStorageExt{ + name: "storage", + factory: nil, + }) + err := s.Start(context.Background(), host) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot find storage factory") +} + +func TestStorageExtensionStartError(t *testing.T) { + config := &Config{ + TraceStorage: "storage", + Port: "invalid-port", + } + var startStatus atomic.Pointer[component.StatusEvent] + s := newStorageCleaner(config, component.TelemetrySettings{ + ReportStatus: func(status *component.StatusEvent) { + startStatus.Store(status) + }, + }) + host := storagetest.NewStorageHost().WithExtension( + jaegerstorage.ID, + &mockStorageExt{ + name: "storage", + factory: &PurgerFactory{}, + }) + require.NoError(t, s.Start(context.Background(), host)) + assert.Eventually(t, func() bool { + return startStatus.Load() != nil + }, 5*time.Second, 100*time.Millisecond) + require.Contains(t, startStatus.Load().Err().Error(), "error starting cleaner server") +} diff --git a/cmd/jaeger/internal/integration/storagecleaner/factory.go b/cmd/jaeger/internal/integration/storagecleaner/factory.go new file mode 100644 index 00000000000..66ab01324ff --- /dev/null +++ b/cmd/jaeger/internal/integration/storagecleaner/factory.go @@ -0,0 +1,40 @@ +// Copyright (c) 2024 The Jaeger Authors. +// SPDX-License-Identifier: Apache-2.0 + +package storagecleaner + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/extension" +) + +// componentType is the name of this extension in configuration. +var componentType = component.MustNewType("storage_cleaner") + +// ID is the identifier of this extension. +var ID = component.NewID(componentType) + +func NewFactory() extension.Factory { + return extension.NewFactory( + componentType, + createDefaultConfig, + createExtension, + component.StabilityLevelBeta, + ) +} + +func createDefaultConfig() component.Config { + return &Config{ + Port: Port, + } +} + +func createExtension( + _ context.Context, + set extension.CreateSettings, + cfg component.Config, +) (extension.Extension, error) { + return newStorageCleaner(cfg.(*Config), set.TelemetrySettings), nil +} diff --git a/cmd/jaeger/internal/integration/storagecleaner/factory_test.go b/cmd/jaeger/internal/integration/storagecleaner/factory_test.go new file mode 100644 index 00000000000..7b1ec51d8f3 --- /dev/null +++ b/cmd/jaeger/internal/integration/storagecleaner/factory_test.go @@ -0,0 +1,28 @@ +// Copyright (c) 2024 The Jaeger Authors. +// SPDX-License-Identifier: Apache-2.0 + +package storagecleaner + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/extension/extensiontest" +) + +func TestCreateDefaultConfig(t *testing.T) { + cfg := createDefaultConfig().(*Config) + require.NotNil(t, cfg, "failed to create default config") + require.NoError(t, componenttest.CheckConfigStruct(cfg)) +} + +func TestCreateExtension(t *testing.T) { + cfg := createDefaultConfig().(*Config) + f := NewFactory() + r, err := f.CreateExtension(context.Background(), extensiontest.NewNopCreateSettings(), cfg) + require.NoError(t, err) + assert.NotNil(t, r) +} diff --git a/cmd/jaeger/internal/integration/storagecleaner/package_test.go b/cmd/jaeger/internal/integration/storagecleaner/package_test.go new file mode 100644 index 00000000000..cec15912582 --- /dev/null +++ b/cmd/jaeger/internal/integration/storagecleaner/package_test.go @@ -0,0 +1,14 @@ +// Copyright (c) 2024 The Jaeger Authors. +// SPDX-License-Identifier: Apache-2.0 + +package storagecleaner + +import ( + "testing" + + "github.com/jaegertracing/jaeger/pkg/testutils" +) + +func TestMain(m *testing.M) { + testutils.VerifyGoLeaks(m) +} diff --git a/cmd/query/app/apiv3/grpc_handler_test.go b/cmd/query/app/apiv3/grpc_handler_test.go index 2271abb382a..bc59d3b2317 100644 --- a/cmd/query/app/apiv3/grpc_handler_test.go +++ b/cmd/query/app/apiv3/grpc_handler_test.go @@ -77,8 +77,7 @@ func newTestServerClient(t *testing.T) *testServerClient { } tsc.server, tsc.address = newGrpcServer(t, h) - conn, err := grpc.DialContext( - context.Background(), + conn, err := grpc.NewClient( tsc.address.String(), grpc.WithTransportCredentials(insecure.NewCredentials()), ) diff --git a/cmd/query/app/grpc_handler_test.go b/cmd/query/app/grpc_handler_test.go index d65ee6932fa..72c524f79af 100644 --- a/cmd/query/app/grpc_handler_test.go +++ b/cmd/query/app/grpc_handler_test.go @@ -176,6 +176,7 @@ func newGRPCServer(t *testing.T, q *querysvc.QueryService, mq querysvc.MetricsQu func newGRPCClient(t *testing.T, addr string) *grpcClient { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() + // TODO: Need to replace grpc.DialContext with grpc.NewClient and pass test conn, err := grpc.DialContext(ctx, addr, grpc.WithTransportCredentials(insecure.NewCredentials())) require.NoError(t, err) diff --git a/cmd/query/app/http_handler_test.go b/cmd/query/app/http_handler_test.go index f49ac6783cc..e74dd2ce23a 100644 --- a/cmd/query/app/http_handler_test.go +++ b/cmd/query/app/http_handler_test.go @@ -33,7 +33,6 @@ import ( "github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/types" "github.com/stretchr/testify/assert" - testHttp "github.com/stretchr/testify/http" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" sdktrace "go.opentelemetry.io/otel/sdk/trace" @@ -197,7 +196,7 @@ func TestLogOnServerError(t *testing.T) { } h := NewAPIHandler(qs, &tenancy.Manager{}, apiHandlerOptions...) e := errors.New("test error") - h.handleError(&testHttp.TestResponseWriter{}, e, http.StatusInternalServerError) + h.handleError(&httptest.ResponseRecorder{}, e, http.StatusInternalServerError) require.Len(t, *l.logs, 1) assert.Equal(t, "HTTP handler, Internal Server Error", (*l.logs)[0].e.Message) assert.Len(t, (*l.logs)[0].f, 1) @@ -987,12 +986,14 @@ func TestSearchTenancyRejectionHTTP(t *testing.T) { // We don't set tenant header resp, err := httpClient.Do(req) require.NoError(t, err) + defer resp.Body.Close() assert.Equal(t, http.StatusUnauthorized, resp.StatusCode) tm := tenancy.NewManager(&tenancyOptions) req.Header.Set(tm.Header, "acme") resp, err = http.DefaultClient.Do(req) require.NoError(t, err) + defer resp.Body.Close() require.Equal(t, http.StatusOK, resp.StatusCode) // Skip unmarshal of response; it is enough that it succeeded } diff --git a/cmd/query/app/package_test.go b/cmd/query/app/package_test.go new file mode 100644 index 00000000000..5946e183ad1 --- /dev/null +++ b/cmd/query/app/package_test.go @@ -0,0 +1,14 @@ +// Copyright (c) 2024 The Jaeger Authors. +// SPDX-License-Identifier: Apache-2.0 + +package app + +import ( + "testing" + + "github.com/jaegertracing/jaeger/pkg/testutils" +) + +func TestMain(m *testing.M) { + testutils.VerifyGoLeaks(m) +} diff --git a/cmd/query/app/server.go b/cmd/query/app/server.go index 1b8e811ebab..9c34bc26018 100644 --- a/cmd/query/app/server.go +++ b/cmd/query/app/server.go @@ -17,9 +17,11 @@ package app import ( "errors" "fmt" + "io" "net" "net/http" "strings" + "sync" "time" "github.com/gorilla/handlers" @@ -48,23 +50,24 @@ import ( // Server runs HTTP, Mux and a grpc server type Server struct { logger *zap.Logger + healthCheck *healthcheck.HealthCheck querySvc *querysvc.QueryService queryOptions *QueryOptions tracer *jtracer.JTracer // TODO make part of flags.Service - conn net.Listener - grpcConn net.Listener - httpConn net.Listener - cmuxServer cmux.CMux - grpcServer *grpc.Server - httpServer *http.Server - separatePorts bool - unavailableChannel chan healthcheck.Status + conn net.Listener + grpcConn net.Listener + httpConn net.Listener + cmuxServer cmux.CMux + grpcServer *grpc.Server + httpServer *httpServer + separatePorts bool + bgFinished sync.WaitGroup } // NewServer creates and initializes Server -func NewServer(logger *zap.Logger, querySvc *querysvc.QueryService, metricsQuerySvc querysvc.MetricsQueryService, options *QueryOptions, tm *tenancy.Manager, tracer *jtracer.JTracer) (*Server, error) { +func NewServer(logger *zap.Logger, healthCheck *healthcheck.HealthCheck, querySvc *querysvc.QueryService, metricsQuerySvc querysvc.MetricsQueryService, options *QueryOptions, tm *tenancy.Manager, tracer *jtracer.JTracer) (*Server, error) { _, httpPort, err := net.SplitHostPort(options.HTTPHostPort) if err != nil { return nil, fmt.Errorf("invalid HTTP server host:port: %w", err) @@ -89,22 +92,17 @@ func NewServer(logger *zap.Logger, querySvc *querysvc.QueryService, metricsQuery } return &Server{ - logger: logger, - querySvc: querySvc, - queryOptions: options, - tracer: tracer, - grpcServer: grpcServer, - httpServer: httpServer, - separatePorts: grpcPort != httpPort, - unavailableChannel: make(chan healthcheck.Status), + logger: logger, + healthCheck: healthCheck, + querySvc: querySvc, + queryOptions: options, + tracer: tracer, + grpcServer: grpcServer, + httpServer: httpServer, + separatePorts: grpcPort != httpPort, }, nil } -// HealthCheckStatus returns health check status channel a client can subscribe to -func (s Server) HealthCheckStatus() chan healthcheck.Status { - return s.unavailableChannel -} - func createGRPCServer(querySvc *querysvc.QueryService, metricsQuerySvc querysvc.MetricsQueryService, options *QueryOptions, tm *tenancy.Manager, logger *zap.Logger, tracer *jtracer.JTracer) (*grpc.Server, error) { var grpcOpts []grpc.ServerOption @@ -146,6 +144,13 @@ func createGRPCServer(querySvc *querysvc.QueryService, metricsQuerySvc querysvc. return server, nil } +type httpServer struct { + *http.Server + staticHandlerCloser io.Closer +} + +var _ io.Closer = (*httpServer)(nil) + func createHTTPServer( querySvc *querysvc.QueryService, metricsQuerySvc querysvc.MetricsQueryService, @@ -153,7 +158,7 @@ func createHTTPServer( tm *tenancy.Manager, tracer *jtracer.JTracer, logger *zap.Logger, -) (*http.Server, error) { +) (*httpServer, error) { apiHandlerOptions := []HandlerOption{ HandlerOptions.Logger(logger), HandlerOptions.Tracer(tracer), @@ -177,7 +182,6 @@ func createHTTPServer( }).RegisterRoutes(r) apiHandler.RegisterRoutes(r) - RegisterStaticHandler(r, logger, queryOpts, querySvc.GetCapabilities()) var handler http.Handler = r handler = additionalHeadersHandler(handler, queryOpts.AdditionalHeaders) if queryOpts.BearerTokenPropagation { @@ -187,10 +191,12 @@ func createHTTPServer( recoveryHandler := recoveryhandler.NewRecoveryHandler(logger, true) errorLog, _ := zap.NewStdLogAt(logger, zapcore.ErrorLevel) - server := &http.Server{ - Handler: recoveryHandler(handler), - ErrorLog: errorLog, - ReadHeaderTimeout: 2 * time.Second, + server := &httpServer{ + Server: &http.Server{ + Handler: recoveryHandler(handler), + ErrorLog: errorLog, + ReadHeaderTimeout: 2 * time.Second, + }, } if queryOpts.TLSHTTP.Enabled { @@ -201,9 +207,19 @@ func createHTTPServer( server.TLSConfig = tlsCfg } + + server.staticHandlerCloser = RegisterStaticHandler(r, logger, queryOpts, querySvc.GetCapabilities()) + return server, nil } +func (hS httpServer) Close() error { + var errs []error + errs = append(errs, hS.Server.Close()) + errs = append(errs, hS.staticHandlerCloser.Close()) + return errors.Join(errs...) +} + // initListener initialises listeners of the server func (s *Server) initListener() (cmux.CMux, error) { if s.separatePorts { // use separate ports and listeners each for gRPC and HTTP requests @@ -280,6 +296,7 @@ func (s *Server) Start() error { grpcPort = port } + s.bgFinished.Add(1) go func() { s.logger.Info("Starting HTTP server", zap.Int("port", httpPort), zap.String("addr", s.queryOptions.HTTPHostPort)) var err error @@ -292,21 +309,25 @@ func (s *Server) Start() error { s.logger.Error("Could not start HTTP server", zap.Error(err)) } - s.unavailableChannel <- healthcheck.Unavailable + s.healthCheck.Set(healthcheck.Unavailable) + s.bgFinished.Done() }() // Start GRPC server concurrently + s.bgFinished.Add(1) go func() { s.logger.Info("Starting GRPC server", zap.Int("port", grpcPort), zap.String("addr", s.queryOptions.GRPCHostPort)) if err := s.grpcServer.Serve(s.grpcConn); err != nil { s.logger.Error("Could not start GRPC server", zap.Error(err)) } - s.unavailableChannel <- healthcheck.Unavailable + s.healthCheck.Set(healthcheck.Unavailable) + s.bgFinished.Done() }() // Start cmux server concurrently. if !s.separatePorts { + s.bgFinished.Add(1) go func() { s.logger.Info("Starting CMUX server", zap.Int("port", tcpPort), zap.String("addr", s.queryOptions.HTTPHostPort)) @@ -315,7 +336,8 @@ func (s *Server) Start() error { if err != nil && !strings.Contains(err.Error(), "use of closed network connection") { s.logger.Error("Could not start multiplexed server", zap.Error(err)) } - s.unavailableChannel <- healthcheck.Unavailable + s.healthCheck.Set(healthcheck.Unavailable) + s.bgFinished.Done() }() } @@ -329,12 +351,9 @@ func (s *Server) Close() error { errs = append(errs, s.queryOptions.TLSHTTP.Close()) s.grpcServer.Stop() errs = append(errs, s.httpServer.Close()) - if s.separatePorts { - errs = append(errs, s.httpConn.Close()) - errs = append(errs, s.grpcConn.Close()) - } else { + if !s.separatePorts { s.cmuxServer.Close() - errs = append(errs, s.conn.Close()) } + s.bgFinished.Wait() return errors.Join(errs...) } diff --git a/cmd/query/app/server_test.go b/cmd/query/app/server_test.go index 43e0bcc53a4..4a08c7b4f29 100644 --- a/cmd/query/app/server_test.go +++ b/cmd/query/app/server_test.go @@ -20,7 +20,6 @@ import ( "fmt" "net" "net/http" - "sync" "testing" "time" @@ -67,7 +66,7 @@ func TestCreateTLSServerSinglePortError(t *testing.T) { ClientCAPath: testCertKeyLocation + "/example-CA-cert.pem", } - _, err := NewServer(zap.NewNop(), &querysvc.QueryService{}, nil, + _, err := NewServer(zap.NewNop(), healthcheck.New(), &querysvc.QueryService{}, nil, &QueryOptions{HTTPHostPort: ":8080", GRPCHostPort: ":8080", TLSGRPC: tlsCfg, TLSHTTP: tlsCfg}, tenancy.NewManager(&tenancy.Options{}), jtracer.NoOp()) require.Error(t, err) @@ -81,7 +80,7 @@ func TestCreateTLSGrpcServerError(t *testing.T) { ClientCAPath: "invalid/path", } - _, err := NewServer(zap.NewNop(), &querysvc.QueryService{}, nil, + _, err := NewServer(zap.NewNop(), healthcheck.New(), &querysvc.QueryService{}, nil, &QueryOptions{HTTPHostPort: ":8080", GRPCHostPort: ":8081", TLSGRPC: tlsCfg}, tenancy.NewManager(&tenancy.Options{}), jtracer.NoOp()) require.Error(t, err) @@ -95,7 +94,7 @@ func TestCreateTLSHttpServerError(t *testing.T) { ClientCAPath: "invalid/path", } - _, err := NewServer(zap.NewNop(), &querysvc.QueryService{}, nil, + _, err := NewServer(zap.NewNop(), healthcheck.New(), &querysvc.QueryService{}, nil, &QueryOptions{HTTPHostPort: ":8080", GRPCHostPort: ":8081", TLSHTTP: tlsCfg}, tenancy.NewManager(&tenancy.Options{}), jtracer.NoOp()) require.Error(t, err) @@ -340,27 +339,12 @@ func TestServerHTTPTLS(t *testing.T) { spanReader.On("GetServices", mock.AnythingOfType("*context.valueCtx")).Return(expectedServices, nil) querySvc := querysvc.NewQueryService(spanReader, dependencyReader, querysvc.QueryServiceOptions{}) - server, err := NewServer(flagsSvc.Logger, querySvc, nil, - serverOptions, tenancy.NewManager(&tenancy.Options{}), + server, err := NewServer(flagsSvc.Logger, flagsSvc.HC(), querySvc, + nil, serverOptions, tenancy.NewManager(&tenancy.Options{}), jtracer.NoOp()) require.NoError(t, err) require.NoError(t, server.Start()) - var wg sync.WaitGroup - wg.Add(1) - once := sync.Once{} - - go func() { - for s := range server.HealthCheckStatus() { - flagsSvc.HC().Set(s) - if s == healthcheck.Unavailable { - once.Do(func() { - wg.Done() - }) - } - } - }() - var clientError error var clientClose func() error var clientTLSCfg *tls.Config @@ -370,6 +354,8 @@ func TestServerHTTPTLS(t *testing.T) { var err0 error clientTLSCfg, err0 = test.clientTLS.Config(zap.NewNop()) + defer test.clientTLS.Close() + require.NoError(t, err0) dialer := &net.Dialer{Timeout: 2 * time.Second} conn, err1 := tls.DialWithDialer(dialer, "tcp", "localhost:"+fmt.Sprintf("%d", ports.QueryHTTP), clientTLSCfg) @@ -423,7 +409,6 @@ func TestServerHTTPTLS(t *testing.T) { } } server.Close() - wg.Wait() assert.Equal(t, healthcheck.Unavailable, flagsSvc.HC().Get()) }) } @@ -436,8 +421,10 @@ func newGRPCClientWithTLS(t *testing.T, addr string, creds credentials.Transport var err error if creds != nil { + // TODO: Need to replace grpc.DialContext with grpc.NewClient and pass test conn, err = grpc.DialContext(ctx, addr, grpc.WithTransportCredentials(creds)) } else { + // TODO: Need to replace grpc.DialContext with grpc.NewClient and pass test conn, err = grpc.DialContext(ctx, addr, grpc.WithTransportCredentials(insecure.NewCredentials())) } @@ -502,33 +489,19 @@ func TestServerGRPCTLS(t *testing.T) { spanReader.On("GetServices", mock.AnythingOfType("*context.valueCtx")).Return(expectedServices, nil) querySvc := querysvc.NewQueryService(spanReader, dependencyReader, querysvc.QueryServiceOptions{}) - server, err := NewServer(flagsSvc.Logger, querySvc, nil, - serverOptions, tenancy.NewManager(&tenancy.Options{}), + server, err := NewServer(flagsSvc.Logger, flagsSvc.HC(), querySvc, + nil, serverOptions, tenancy.NewManager(&tenancy.Options{}), jtracer.NoOp()) require.NoError(t, err) require.NoError(t, server.Start()) - var wg sync.WaitGroup - wg.Add(1) - once := sync.Once{} - - go func() { - for s := range server.HealthCheckStatus() { - flagsSvc.HC().Set(s) - if s == healthcheck.Unavailable { - once.Do(func() { - wg.Done() - }) - } - } - }() - var clientError error var client *grpcClient if serverOptions.TLSGRPC.Enabled { clientTLSCfg, err0 := test.clientTLS.Config(zap.NewNop()) require.NoError(t, err0) + defer test.clientTLS.Close() creds := credentials.NewTLS(clientTLSCfg) client = newGRPCClientWithTLS(t, ports.PortToHostPort(ports.QueryGRPC), creds) @@ -549,14 +522,13 @@ func TestServerGRPCTLS(t *testing.T) { } require.NoError(t, client.conn.Close()) server.Close() - wg.Wait() assert.Equal(t, healthcheck.Unavailable, flagsSvc.HC().Get()) }) } } func TestServerBadHostPort(t *testing.T) { - _, err := NewServer(zap.NewNop(), &querysvc.QueryService{}, nil, + _, err := NewServer(zap.NewNop(), healthcheck.New(), &querysvc.QueryService{}, nil, &QueryOptions{ HTTPHostPort: "8080", GRPCHostPort: "127.0.0.1:8081", @@ -568,7 +540,7 @@ func TestServerBadHostPort(t *testing.T) { jtracer.NoOp()) require.Error(t, err) - _, err = NewServer(zap.NewNop(), &querysvc.QueryService{}, nil, + _, err = NewServer(zap.NewNop(), healthcheck.New(), &querysvc.QueryService{}, nil, &QueryOptions{ HTTPHostPort: "127.0.0.1:8081", GRPCHostPort: "9123", @@ -600,6 +572,7 @@ func TestServerInUseHostPort(t *testing.T) { t.Run(tc.name, func(t *testing.T) { server, err := NewServer( zap.NewNop(), + healthcheck.New(), &querysvc.QueryService{}, nil, &QueryOptions{ @@ -617,12 +590,7 @@ func TestServerInUseHostPort(t *testing.T) { err = server.Start() require.Error(t, err) - if server.grpcConn != nil { - server.grpcConn.Close() - } - if server.httpConn != nil { - server.httpConn.Close() - } + server.Close() }) } } @@ -637,7 +605,7 @@ func TestServerSinglePort(t *testing.T) { spanReader.On("GetServices", mock.AnythingOfType("*context.valueCtx")).Return(expectedServices, nil) querySvc := querysvc.NewQueryService(spanReader, dependencyReader, querysvc.QueryServiceOptions{}) - server, err := NewServer(flagsSvc.Logger, querySvc, nil, + server, err := NewServer(flagsSvc.Logger, flagsSvc.HC(), querySvc, nil, &QueryOptions{ GRPCHostPort: hostPort, HTTPHostPort: hostPort, @@ -650,23 +618,6 @@ func TestServerSinglePort(t *testing.T) { require.NoError(t, err) require.NoError(t, server.Start()) - var wg sync.WaitGroup - wg.Add(1) - once := sync.Once{} - - go func() { - for s := range server.HealthCheckStatus() { - flagsSvc.HC().Set(s) - if s == healthcheck.Unavailable { - once.Do(func() { - wg.Done() - }) - } - - } - wg.Done() - }() - client := newGRPCClient(t, hostPort) defer client.conn.Close() @@ -678,7 +629,6 @@ func TestServerSinglePort(t *testing.T) { assert.Equal(t, expectedServices, res.Services) server.Close() - wg.Wait() assert.Equal(t, healthcheck.Unavailable, flagsSvc.HC().Get()) } @@ -694,15 +644,11 @@ func TestServerGracefulExit(t *testing.T) { querySvc := &querysvc.QueryService{} tracer := jtracer.NoOp() - server, err := NewServer(flagsSvc.Logger, querySvc, nil, &QueryOptions{GRPCHostPort: hostPort, HTTPHostPort: hostPort}, + server, err := NewServer(flagsSvc.Logger, flagsSvc.HC(), querySvc, nil, + &QueryOptions{GRPCHostPort: hostPort, HTTPHostPort: hostPort}, tenancy.NewManager(&tenancy.Options{}), tracer) require.NoError(t, err) require.NoError(t, server.Start()) - go func() { - for s := range server.HealthCheckStatus() { - flagsSvc.HC().Set(s) - } - }() // Wait for servers to come up before we can call .Close() // TODO Find a way to wait only as long as necessary. Unconditional sleep slows down the tests. @@ -722,7 +668,7 @@ func TestServerHandlesPortZero(t *testing.T) { querySvc := &querysvc.QueryService{} tracer := jtracer.NoOp() - server, err := NewServer(flagsSvc.Logger, querySvc, nil, + server, err := NewServer(flagsSvc.Logger, flagsSvc.HC(), querySvc, nil, &QueryOptions{GRPCHostPort: ":0", HTTPHostPort: ":0"}, tenancy.NewManager(&tenancy.Options{}), tracer) @@ -783,9 +729,8 @@ func TestServerHTTPTenancy(t *testing.T) { dependencyReader := &depsmocks.Reader{} querySvc := querysvc.NewQueryService(spanReader, dependencyReader, querysvc.QueryServiceOptions{}) - server, err := NewServer(zap.NewNop(), querySvc, nil, - serverOptions, tenancyMgr, - jtracer.NoOp()) + server, err := NewServer(zap.NewNop(), healthcheck.New(), querySvc, + nil, serverOptions, tenancyMgr, jtracer.NoOp()) require.NoError(t, err) require.NoError(t, server.Start()) diff --git a/cmd/query/app/static_handler.go b/cmd/query/app/static_handler.go index 3b6d2315942..5ed67963f91 100644 --- a/cmd/query/app/static_handler.go +++ b/cmd/query/app/static_handler.go @@ -46,7 +46,7 @@ var ( ) // RegisterStaticHandler adds handler for static assets to the router. -func RegisterStaticHandler(r *mux.Router, logger *zap.Logger, qOpts *QueryOptions, qCapabilities querysvc.StorageCapabilities) { +func RegisterStaticHandler(r *mux.Router, logger *zap.Logger, qOpts *QueryOptions, qCapabilities querysvc.StorageCapabilities) io.Closer { staticHandler, err := NewStaticAssetsHandler(qOpts.StaticAssets.Path, StaticAssetsHandlerOptions{ BasePath: qOpts.BasePath, UIBasePath: qOpts.UIBasePath, // Added UIBasePath here @@ -60,6 +60,8 @@ func RegisterStaticHandler(r *mux.Router, logger *zap.Logger, qOpts *QueryOption } staticHandler.RegisterRoutes(r) + + return staticHandler } // StaticAssetsHandler handles static assets @@ -241,3 +243,7 @@ func (sH *StaticAssetsHandler) notFound(w http.ResponseWriter, r *http.Request) w.Header().Set("Content-Type", "text/html; charset=utf-8") w.Write(sH.indexHTML.Load().([]byte)) } + +func (sH *StaticAssetsHandler) Close() error { + return sH.watcher.Close() +} diff --git a/cmd/query/app/static_handler_test.go b/cmd/query/app/static_handler_test.go index 30a8aeaaca4..461f7a44f3b 100644 --- a/cmd/query/app/static_handler_test.go +++ b/cmd/query/app/static_handler_test.go @@ -49,7 +49,7 @@ func TestNotExistingUiConfig(t *testing.T) { func TestRegisterStaticHandlerPanic(t *testing.T) { logger, buf := testutils.NewLogger() assert.Panics(t, func() { - RegisterStaticHandler( + closer := RegisterStaticHandler( mux.NewRouter(), logger, &QueryOptions{ @@ -61,6 +61,7 @@ func TestRegisterStaticHandlerPanic(t *testing.T) { }, querysvc.StorageCapabilities{ArchiveStorage: false}, ) + defer closer.Close() }) assert.Contains(t, buf.String(), "Could not create static assets handler") assert.Contains(t, buf.String(), "no such file or directory") @@ -118,7 +119,7 @@ func TestRegisterStaticHandler(t *testing.T) { if testCase.subroute { r = r.PathPrefix(testCase.basePath).Subrouter() } - RegisterStaticHandler(r, logger, &QueryOptions{ + closer := RegisterStaticHandler(r, logger, &QueryOptions{ QueryOptionsBase: QueryOptionsBase{ StaticAssets: QueryOptionsStaticAssets{ Path: "fixture", @@ -130,6 +131,7 @@ func TestRegisterStaticHandler(t *testing.T) { }, querysvc.StorageCapabilities{ArchiveStorage: testCase.archiveStorage}, ) + defer closer.Close() server := httptest.NewServer(r) defer server.Close() @@ -199,6 +201,7 @@ func TestHotReloadUIConfig(t *testing.T) { Logger: logger, }) require.NoError(t, err) + defer h.Close() c := string(h.indexHTML.Load().([]byte)) assert.Contains(t, c, "About Jaeger") diff --git a/cmd/query/app/token_propagation_test.go b/cmd/query/app/token_propagation_test.go index 9fd3aca71fa..1ae5d5c0ef0 100644 --- a/cmd/query/app/token_propagation_test.go +++ b/cmd/query/app/token_propagation_test.go @@ -85,12 +85,13 @@ func runQueryService(t *testing.T, esURL string) *Server { // set AllowTokenFromContext manually because we don't register the respective CLI flag from query svc f.Options.Primary.AllowTokenFromContext = true require.NoError(t, f.Initialize(metrics.NullFactory, flagsSvc.Logger)) + defer f.Close() spanReader, err := f.CreateSpanReader() require.NoError(t, err) querySvc := querysvc.NewQueryService(spanReader, nil, querysvc.QueryServiceOptions{}) - server, err := NewServer(flagsSvc.Logger, querySvc, nil, + server, err := NewServer(flagsSvc.Logger, flagsSvc.HC(), querySvc, nil, &QueryOptions{ GRPCHostPort: ":0", HTTPHostPort: ":0", @@ -137,6 +138,7 @@ func TestBearerTokenPropagation(t *testing.T) { resp, err := client.Do(req) require.NoError(t, err) require.NotNil(t, resp) + defer resp.Body.Close() assert.Equal(t, http.StatusOK, resp.StatusCode) }) diff --git a/cmd/query/main.go b/cmd/query/main.go index ccc9ab81933..23310ee64b1 100644 --- a/cmd/query/main.go +++ b/cmd/query/main.go @@ -113,17 +113,11 @@ func main() { dependencyReader, *queryServiceOptions) tm := tenancy.NewManager(&queryOpts.Tenancy) - server, err := app.NewServer(svc.Logger, queryService, metricsQueryService, queryOpts, tm, jt) + server, err := app.NewServer(svc.Logger, svc.HC(), queryService, metricsQueryService, queryOpts, tm, jt) if err != nil { logger.Fatal("Failed to create server", zap.Error(err)) } - go func() { - for s := range server.HealthCheckStatus() { - svc.SetHealthCheckStatus(s) - } - }() - if err := server.Start(); err != nil { logger.Fatal("Could not start servers", zap.Error(err)) } diff --git a/cmd/remote-storage/app/package_test.go b/cmd/remote-storage/app/package_test.go new file mode 100644 index 00000000000..5946e183ad1 --- /dev/null +++ b/cmd/remote-storage/app/package_test.go @@ -0,0 +1,14 @@ +// Copyright (c) 2024 The Jaeger Authors. +// SPDX-License-Identifier: Apache-2.0 + +package app + +import ( + "testing" + + "github.com/jaegertracing/jaeger/pkg/testutils" +) + +func TestMain(m *testing.M) { + testutils.VerifyGoLeaks(m) +} diff --git a/cmd/remote-storage/app/server.go b/cmd/remote-storage/app/server.go index b7ccbf596da..ecc29f0ff9b 100644 --- a/cmd/remote-storage/app/server.go +++ b/cmd/remote-storage/app/server.go @@ -17,6 +17,7 @@ package app import ( "fmt" "net" + "sync" "go.uber.org/zap" "google.golang.org/grpc" @@ -34,16 +35,17 @@ import ( // Server runs a gRPC server type Server struct { - logger *zap.Logger - opts *Options + logger *zap.Logger + healthcheck *healthcheck.HealthCheck + opts *Options - grpcConn net.Listener - grpcServer *grpc.Server - unavailableChannel chan healthcheck.Status // used to signal to admin server that gRPC server is unavailable + grpcConn net.Listener + grpcServer *grpc.Server + wg sync.WaitGroup } // NewServer creates and initializes Server. -func NewServer(options *Options, storageFactory storage.Factory, tm *tenancy.Manager, logger *zap.Logger) (*Server, error) { +func NewServer(options *Options, storageFactory storage.Factory, tm *tenancy.Manager, logger *zap.Logger, healthcheck *healthcheck.HealthCheck) (*Server, error) { handler, err := createGRPCHandler(storageFactory, logger) if err != nil { return nil, err @@ -55,10 +57,10 @@ func NewServer(options *Options, storageFactory storage.Factory, tm *tenancy.Man } return &Server{ - logger: logger, - opts: options, - grpcServer: grpcServer, - unavailableChannel: make(chan healthcheck.Status), + logger: logger, + healthcheck: healthcheck, + opts: options, + grpcServer: grpcServer, }, nil } @@ -94,11 +96,6 @@ func createGRPCHandler(f storage.Factory, logger *zap.Logger) (*shared.GRPCHandl return handler, nil } -// HealthCheckStatus returns health check status channel a client can subscribe to -func (s Server) HealthCheckStatus() chan healthcheck.Status { - return s.unavailableChannel -} - func createGRPCServer(opts *Options, tm *tenancy.Manager, handler *shared.GRPCHandler, logger *zap.Logger) (*grpc.Server, error) { var grpcOpts []grpc.ServerOption @@ -132,11 +129,13 @@ func (s *Server) Start() error { } s.logger.Info("Starting GRPC server", zap.Stringer("addr", listener.Addr())) s.grpcConn = listener + s.wg.Add(1) go func() { + defer s.wg.Done() if err := s.grpcServer.Serve(s.grpcConn); err != nil { s.logger.Error("GRPC server exited", zap.Error(err)) } - s.unavailableChannel <- healthcheck.Unavailable + s.healthcheck.Set(healthcheck.Unavailable) }() return nil @@ -147,5 +146,6 @@ func (s *Server) Close() error { s.grpcServer.Stop() s.grpcConn.Close() s.opts.TLSGRPC.Close() + s.wg.Wait() return nil } diff --git a/cmd/remote-storage/app/server_test.go b/cmd/remote-storage/app/server_test.go index 2e9ba793d48..ae8c65adfb3 100644 --- a/cmd/remote-storage/app/server_test.go +++ b/cmd/remote-storage/app/server_test.go @@ -17,7 +17,6 @@ package app import ( "context" "errors" - "sync" "testing" "time" @@ -59,6 +58,7 @@ func TestNewServer_CreateStorageErrors(t *testing.T) { factory, tenancy.NewManager(&tenancy.Options{}), zap.NewNop(), + healthcheck.New(), ) } _, err := f() @@ -80,7 +80,6 @@ func TestNewServer_CreateStorageErrors(t *testing.T) { validateGRPCServer(t, s.grpcConn.Addr().String(), s.grpcServer) s.grpcConn.Close() // causes logged error - <-s.HealthCheckStatus() } func TestServerStart_BadPortErrors(t *testing.T) { @@ -130,6 +129,7 @@ func TestNewServer_TLSConfigError(t *testing.T) { storageMocks.factory, tenancy.NewManager(&tenancy.Options{}), zap.NewNop(), + healthcheck.New(), ) require.Error(t, err) assert.Contains(t, err.Error(), "invalid TLS config") @@ -294,9 +294,6 @@ type grpcClient struct { } func newGRPCClient(t *testing.T, addr string, creds credentials.TransportCredentials, tm *tenancy.Manager) *grpcClient { - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) - defer cancel() - dialOpts := []grpc.DialOption{ grpc.WithUnaryInterceptor(tenancy.NewClientUnaryInterceptor(tm)), } @@ -306,7 +303,7 @@ func newGRPCClient(t *testing.T, addr string, creds credentials.TransportCredent dialOpts = append(dialOpts, grpc.WithTransportCredentials(insecure.NewCredentials())) } - conn, err := grpc.DialContext(ctx, addr, dialOpts...) + conn, err := grpc.NewClient(addr, dialOpts...) require.NoError(t, err) return &grpcClient{ @@ -322,6 +319,8 @@ func TestServerGRPCTLS(t *testing.T) { GRPCHostPort: ":0", TLSGRPC: test.TLS, } + defer serverOptions.TLSGRPC.Close() + defer test.clientTLS.Close() flagsSvc := flags.NewService(ports.QueryAdminHTTP) flagsSvc.Logger = zap.NewNop() @@ -335,23 +334,11 @@ func TestServerGRPCTLS(t *testing.T) { storageMocks.factory, tm, flagsSvc.Logger, + flagsSvc.HC(), ) require.NoError(t, err) require.NoError(t, server.Start()) - var wg sync.WaitGroup - wg.Add(1) - once := sync.Once{} - - go func() { - for s := range server.HealthCheckStatus() { - flagsSvc.HC().Set(s) - if s == healthcheck.Unavailable { - once.Do(wg.Done) - } - } - }() - var clientError error var client *grpcClient @@ -378,7 +365,6 @@ func TestServerGRPCTLS(t *testing.T) { } require.NoError(t, client.conn.Close()) server.Close() - wg.Wait() assert.Equal(t, healthcheck.Unavailable, flagsSvc.HC().Get()) }) } @@ -395,10 +381,11 @@ func TestServerHandlesPortZero(t *testing.T) { storageMocks.factory, tenancy.NewManager(&tenancy.Options{}), flagsSvc.Logger, + flagsSvc.HC(), ) require.NoError(t, err) + require.NoError(t, server.Start()) - defer server.Close() const line = "Starting GRPC server" message := logs.FilterMessage(line) @@ -407,6 +394,10 @@ func TestServerHandlesPortZero(t *testing.T) { onlyEntry := message.All()[0] hostPort := onlyEntry.ContextMap()["addr"].(string) validateGRPCServer(t, hostPort, server.grpcServer) + + server.Close() + + assert.Equal(t, healthcheck.Unavailable, flagsSvc.HC().Get()) } func validateGRPCServer(t *testing.T, hostPort string, server *grpc.Server) { diff --git a/cmd/remote-storage/main.go b/cmd/remote-storage/main.go index a4f7759a3aa..51166025f6a 100644 --- a/cmd/remote-storage/main.go +++ b/cmd/remote-storage/main.go @@ -77,17 +77,11 @@ func main() { } tm := tenancy.NewManager(&opts.Tenancy) - server, err := app.NewServer(opts, storageFactory, tm, svc.Logger) + server, err := app.NewServer(opts, storageFactory, tm, svc.Logger, svc.HC()) if err != nil { logger.Fatal("Failed to create server", zap.Error(err)) } - go func() { - for s := range server.HealthCheckStatus() { - svc.SetHealthCheckStatus(s) - } - }() - if err := server.Start(); err != nil { logger.Fatal("Could not start servers", zap.Error(err)) } diff --git a/crossdock/rules.mk b/crossdock/rules.mk index 92d3652c8a8..8d4ca66fb3d 100644 --- a/crossdock/rules.mk +++ b/crossdock/rules.mk @@ -5,16 +5,15 @@ JAEGER_COLLECTOR_HC_PORT ?= 14269 .PHONY: crossdock crossdock: - docker-compose -f $(JAEGER_COMPOSE_YAML) -f $(XDOCK_YAML) kill - docker-compose -f $(JAEGER_COMPOSE_YAML) -f $(XDOCK_YAML) rm -f test_driver - JAEGER_COLLECTOR_HC_PORT=${JAEGER_COLLECTOR_HC_PORT} docker-compose -f $(JAEGER_COMPOSE_YAML) -f $(XDOCK_YAML) run crossdock 2>&1 | tee run-crossdock.log + docker compose -f $(JAEGER_COMPOSE_YAML) -f $(XDOCK_YAML) kill + docker compose -f $(JAEGER_COMPOSE_YAML) -f $(XDOCK_YAML) rm -f test_driver + JAEGER_COLLECTOR_HC_PORT=${JAEGER_COLLECTOR_HC_PORT} docker compose -f $(JAEGER_COMPOSE_YAML) -f $(XDOCK_YAML) run crossdock 2>&1 | tee run-crossdock.log grep 'Tests passed!' run-crossdock.log .PHONE: crossdock-logs crossdock-logs: - docker-compose -f $(JAEGER_COMPOSE_YAML) -f $(XDOCK_YAML) logs + docker compose -f $(JAEGER_COMPOSE_YAML) -f $(XDOCK_YAML) logs .PHONE: crossdock-clean crossdock-clean: - docker-compose -f $(JAEGER_COMPOSE_YAML) -f $(XDOCK_YAML) down - + docker compose -f $(JAEGER_COMPOSE_YAML) -f $(XDOCK_YAML) down diff --git a/docker-compose/kafka/README.md b/docker-compose/kafka/README.md index 0f18aad2e11..286b9f96a1d 100644 --- a/docker-compose/kafka/README.md +++ b/docker-compose/kafka/README.md @@ -1,6 +1,6 @@ # Sample configuration with Kafka -This `docker-compose` environment provides a sample configuration of Jaeger depoyment utilizing collector-Kafka-injester pipeline. Storage is provided by the `jageer-remote-storage` service running memstore. +This `docker compose` environment provides a sample configuration of Jaeger depoyment utilizing collector-Kafka-injester pipeline. Storage is provided by the `jageer-remote-storage` service running memstore. Jaeger UI can be accessed at http://localhost:16686/, as usual, and refreshing the screen should produce internal traces. diff --git a/examples/grafana-integration/README.md b/examples/grafana-integration/README.md index 24b18f571e8..eef83392fff 100644 --- a/examples/grafana-integration/README.md +++ b/examples/grafana-integration/README.md @@ -2,7 +2,7 @@ This example combines the Hot R.O.D. demo application ([examples/hotrod/](../hotrod/)) with Grafana, Loki and Prometheus integration, to demonstrate logs, metrics and traces correlation. -## Running via `docker-compose` +## Running via `docker compose` ### Prerequisites @@ -20,7 +20,7 @@ grafana/loki-docker-driver:latest \ ### Run the services -`docker-compose up` +`docker compose up` ### Access the services * HotROD application at http://localhost:8080 @@ -52,4 +52,4 @@ Additionally, there are graphs for each service, visualizing the rate of the req ### Clean up -`docker-compose down` +`docker compose down` diff --git a/examples/hotrod/README.md b/examples/hotrod/README.md index b0dc9e8a305..f69fb49099b 100644 --- a/examples/hotrod/README.md +++ b/examples/hotrod/README.md @@ -1,8 +1,6 @@ # Hot R.O.D. - Rides on Demand -This is a demo application that consists of several microservices and illustrates -the use of the OpenTelemetry API & SDK. It can be run standalone, but requires Jaeger backend -to view the traces. A tutorial / walkthrough is available: +This is a demo application that consists of several microservices and illustrates the use of the OpenTelemetry API & SDK. It can be run standalone, but requires Jaeger backend to view the traces. A tutorial / walkthrough is available: * as a blog post [Take Jaeger for a HotROD ride][hotrod-tutorial], * as a video [OpenShift Commons Briefing: Distributed Tracing with Jaeger & Prometheus on Kubernetes][hotrod-openshift]. @@ -21,14 +19,14 @@ As of Jaeger v1.42.0 this application was upgraded to use the OpenTelemetry SDK ## Running -### Run everything via `docker-compose` +### Run everything via `docker compose` * Download `docker-compose.yml` from https://github.com/jaegertracing/jaeger/blob/main/examples/hotrod/docker-compose.yml * Optional: find the latest Jaeger version (see https://www.jaegertracing.io/download/) and pass it via environment variable `JAEGER_VERSION`. Otherwise `docker compose` will use the `latest` tag, which is fine for the first time you download the images, but once they are in your local registry the `latest` tag is never updated and you may be running stale (and possibly incompatible) verions of Jaeger and the HotROD app. * Run Jaeger backend and HotROD demo, e.g.: - * `JAEGER_VERSION=1.52 docker-compose -f path-to-yml-file up` + * `JAEGER_VERSION=1.52 docker compose -f path-to-yml-file up` * Access Jaeger UI at http://localhost:16686 and HotROD app at http://localhost:8080 -* Shutdown / cleanup with `docker-compose -f path-to-yml-file down` +* Shutdown / cleanup with `docker compose -f path-to-yml-file down` Alternatively, you can run each component separately as described below. @@ -54,7 +52,7 @@ An all-in-one Jaeger backend is packaged as a Docker container with in-memory st docker run \ --rm \ --name jaeger \ - -p6831:6831/udp \ + -p4318:4318 \ -p16686:16686 \ -p14268:14268 \ jaegertracing/all-in-one:latest @@ -75,7 +73,7 @@ go run ./examples/hotrod/main.go all docker run \ --rm \ --link jaeger \ - --env OTEL_EXPORTER_JAEGER_ENDPOINT=http://jaeger:14268/api/traces \ + --env OTEL_EXPORTER_OTLP_ENDPOINT=http://jaeger:4318 \ -p8080-8083:8080-8083 \ jaegertracing/example-hotrod:latest \ all diff --git a/examples/hotrod/services/driver/client.go b/examples/hotrod/services/driver/client.go index e40c8b8c40e..bc83cabbf92 100644 --- a/examples/hotrod/services/driver/client.go +++ b/examples/hotrod/services/driver/client.go @@ -36,7 +36,7 @@ type Client struct { // NewClient creates a new driver.Client func NewClient(tracerProvider trace.TracerProvider, logger log.Factory, hostPort string) *Client { - conn, err := grpc.Dial(hostPort, + conn, err := grpc.NewClient(hostPort, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithStatsHandler(otelgrpc.NewClientHandler(otelgrpc.WithTracerProvider(tracerProvider))), ) diff --git a/go.mod b/go.mod index cbb38192ffd..2e95435b8e5 100644 --- a/go.mod +++ b/go.mod @@ -5,12 +5,12 @@ go 1.21 require ( github.com/HdrHistogram/hdrhistogram-go v1.1.2 github.com/Shopify/sarama v1.37.2 - github.com/apache/thrift v0.19.0 + github.com/apache/thrift v0.20.0 github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 github.com/bsm/sarama-cluster v2.1.13+incompatible github.com/crossdock/crossdock-go v0.0.0-20160816171116-049aabb0122b github.com/dgraph-io/badger/v3 v3.2103.5 - github.com/elastic/go-elasticsearch/v8 v8.12.0 + github.com/elastic/go-elasticsearch/v8 v8.13.1 github.com/fsnotify/fsnotify v1.7.0 github.com/go-kit/kit v0.13.0 github.com/go-logr/zapr v1.3.0 @@ -20,97 +20,97 @@ require ( github.com/gorilla/handlers v1.5.1 github.com/gorilla/mux v1.8.1 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 - github.com/hashicorp/go-hclog v1.6.2 + github.com/hashicorp/go-hclog v1.6.3 github.com/hashicorp/go-plugin v1.6.0 github.com/kr/pretty v0.3.1 github.com/olivere/elastic v6.2.37+incompatible - github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.94.0 - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.94.0 - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.94.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.94.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.94.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.94.0 - github.com/prometheus/client_golang v1.18.0 - github.com/prometheus/client_model v0.6.0 - github.com/prometheus/common v0.47.0 + github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.98.0 + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.98.0 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.98.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.98.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.98.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.98.0 + github.com/prometheus/client_golang v1.19.0 + github.com/prometheus/client_model v0.6.1 + github.com/prometheus/common v0.53.0 github.com/soheilhy/cmux v0.1.5 github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.18.2 - github.com/stretchr/testify v1.8.4 + github.com/stretchr/testify v1.9.0 github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/xdg-go/scram v1.1.2 - go.opentelemetry.io/collector/component v0.94.1 - go.opentelemetry.io/collector/config/configgrpc v0.94.1 - go.opentelemetry.io/collector/config/confighttp v0.94.1 - go.opentelemetry.io/collector/config/configretry v0.94.1 - go.opentelemetry.io/collector/config/configtls v0.94.1 - go.opentelemetry.io/collector/connector v0.94.1 - go.opentelemetry.io/collector/connector/forwardconnector v0.94.1 - go.opentelemetry.io/collector/consumer v0.94.1 - go.opentelemetry.io/collector/exporter v0.94.1 - go.opentelemetry.io/collector/exporter/loggingexporter v0.94.1 - go.opentelemetry.io/collector/exporter/otlpexporter v0.94.1 - go.opentelemetry.io/collector/exporter/otlphttpexporter v0.94.1 - go.opentelemetry.io/collector/extension v0.94.1 - go.opentelemetry.io/collector/extension/ballastextension v0.94.1 - go.opentelemetry.io/collector/extension/zpagesextension v0.94.1 - go.opentelemetry.io/collector/otelcol v0.94.1 - go.opentelemetry.io/collector/pdata v1.1.0 - go.opentelemetry.io/collector/processor v0.94.1 - go.opentelemetry.io/collector/processor/batchprocessor v0.94.1 - go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.94.1 - go.opentelemetry.io/collector/receiver v0.94.1 - go.opentelemetry.io/collector/receiver/otlpreceiver v0.94.1 - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 - go.opentelemetry.io/otel v1.23.1 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.23.1 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.23.1 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.23.1 - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.23.1 - go.opentelemetry.io/otel/metric v1.23.1 - go.opentelemetry.io/otel/sdk v1.23.1 - go.opentelemetry.io/otel/trace v1.23.1 + go.opentelemetry.io/collector/component v0.98.0 + go.opentelemetry.io/collector/config/configgrpc v0.98.0 + go.opentelemetry.io/collector/config/confighttp v0.98.0 + go.opentelemetry.io/collector/config/configretry v0.98.0 + go.opentelemetry.io/collector/config/configtls v0.98.0 + go.opentelemetry.io/collector/confmap v0.98.0 + go.opentelemetry.io/collector/connector v0.98.0 + go.opentelemetry.io/collector/connector/forwardconnector v0.98.0 + go.opentelemetry.io/collector/consumer v0.98.0 + go.opentelemetry.io/collector/exporter v0.98.0 + go.opentelemetry.io/collector/exporter/otlpexporter v0.98.0 + go.opentelemetry.io/collector/exporter/otlphttpexporter v0.98.0 + go.opentelemetry.io/collector/extension v0.98.0 + go.opentelemetry.io/collector/extension/ballastextension v0.98.0 + go.opentelemetry.io/collector/extension/zpagesextension v0.98.0 + go.opentelemetry.io/collector/otelcol v0.98.0 + go.opentelemetry.io/collector/pdata v1.5.0 + go.opentelemetry.io/collector/processor v0.98.0 + go.opentelemetry.io/collector/processor/batchprocessor v0.98.0 + go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.98.0 + go.opentelemetry.io/collector/receiver v0.98.0 + go.opentelemetry.io/collector/receiver/otlpreceiver v0.98.0 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.51.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 + go.opentelemetry.io/otel v1.26.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.26.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0 + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.26.0 + go.opentelemetry.io/otel/metric v1.26.0 + go.opentelemetry.io/otel/sdk v1.26.0 + go.opentelemetry.io/otel/trace v1.26.0 go.uber.org/automaxprocs v1.5.3 go.uber.org/goleak v1.3.0 - go.uber.org/zap v1.26.0 - golang.org/x/net v0.21.0 - golang.org/x/sys v0.17.0 - google.golang.org/grpc v1.61.1 - google.golang.org/protobuf v1.32.0 + go.uber.org/zap v1.27.0 + golang.org/x/net v0.24.0 + golang.org/x/sys v0.19.0 + google.golang.org/grpc v1.63.2 + google.golang.org/protobuf v1.33.0 gopkg.in/yaml.v2 v2.4.0 ) require ( - github.com/IBM/sarama v1.42.1 // indirect + github.com/IBM/sarama v1.43.1 // indirect github.com/VividCortex/gohistogram v1.0.0 // indirect - github.com/aws/aws-sdk-go v1.50.7 // indirect + github.com/aws/aws-sdk-go v1.51.17 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash v1.1.0 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dgraph-io/ristretto v0.1.1 // indirect github.com/dustin/go-humanize v1.0.0 // indirect - github.com/eapache/go-resiliency v1.4.0 // indirect + github.com/eapache/go-resiliency v1.6.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect github.com/eapache/queue v1.1.0 // indirect - github.com/elastic/elastic-transport-go/v8 v8.4.0 // indirect - github.com/fatih/color v1.14.1 // indirect + github.com/elastic/elastic-transport-go/v8 v8.5.0 // indirect + github.com/fatih/color v1.15.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect - github.com/golang/glog v1.1.2 // indirect + github.com/golang/glog v1.2.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/google/flatbuffers v1.12.1 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect @@ -128,17 +128,17 @@ require ( github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.5 // indirect + github.com/klauspost/compress v1.17.8 // indirect github.com/knadh/koanf/maps v0.1.1 // indirect github.com/knadh/koanf/providers/confmap v0.1.0 // indirect - github.com/knadh/koanf/v2 v2.0.2 // indirect + github.com/knadh/koanf/v2 v2.1.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/lightstep/go-expohisto v1.0.0 // indirect - github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.17 // indirect + github.com/mattn/go-isatty v0.0.19 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-testing-interface v1.0.0 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect @@ -148,21 +148,22 @@ require ( github.com/mostynb/go-grpc-compression v1.2.2 // indirect github.com/oklog/run v1.1.0 // indirect github.com/onsi/ginkgo v1.16.5 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.94.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.94.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.94.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.94.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.94.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.94.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.94.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.98.0 + github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.98.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.98.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.98.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.98.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.98.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.98.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.98.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/openzipkin/zipkin-go v0.4.2 // indirect github.com/pelletier/go-toml/v2 v2.1.0 // indirect github.com/pierrec/lz4 v2.6.1+incompatible // indirect - github.com/pierrec/lz4/v4 v4.1.18 // indirect + github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect github.com/prometheus/procfs v0.12.0 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/relvacode/iso8601 v1.4.0 // indirect @@ -171,12 +172,12 @@ require ( github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect - github.com/shirou/gopsutil/v3 v3.24.1 // indirect + github.com/shirou/gopsutil/v3 v3.24.3 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.6.0 // indirect - github.com/stretchr/objx v0.5.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/tilinna/clock v1.1.0 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect @@ -184,41 +185,47 @@ require ( github.com/uber/jaeger-lib v2.4.1+incompatible // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/stringprep v1.0.4 // indirect - github.com/yusufpapurcu/wmi v1.2.3 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector v0.94.1 // indirect - go.opentelemetry.io/collector/config/configauth v0.94.1 // indirect - go.opentelemetry.io/collector/config/configcompression v0.94.1 // indirect - go.opentelemetry.io/collector/config/confignet v0.94.1 // indirect - go.opentelemetry.io/collector/config/configopaque v0.94.1 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.94.1 // indirect - go.opentelemetry.io/collector/config/internal v0.94.1 // indirect - go.opentelemetry.io/collector/confmap v0.94.1 // indirect - go.opentelemetry.io/collector/extension/auth v0.94.1 // indirect - go.opentelemetry.io/collector/featuregate v1.1.0 // indirect - go.opentelemetry.io/collector/semconv v0.94.1 // indirect - go.opentelemetry.io/collector/service v0.94.1 // indirect - go.opentelemetry.io/contrib/config v0.3.0 // indirect - go.opentelemetry.io/contrib/propagators/b3 v1.22.0 // indirect - go.opentelemetry.io/contrib/zpages v0.47.0 // indirect - go.opentelemetry.io/otel/bridge/opencensus v0.45.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.45.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.45.0 // indirect - go.opentelemetry.io/otel/exporters/prometheus v0.45.1 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.45.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.23.0 // indirect - go.opentelemetry.io/proto/otlp v1.1.0 // indirect + go.opentelemetry.io/collector v0.98.0 // indirect + go.opentelemetry.io/collector/config/configauth v0.98.0 // indirect + go.opentelemetry.io/collector/config/configcompression v1.5.0 // indirect + go.opentelemetry.io/collector/config/confignet v0.98.0 // indirect + go.opentelemetry.io/collector/config/configopaque v1.5.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.98.0 // indirect + go.opentelemetry.io/collector/config/internal v0.98.0 // indirect + go.opentelemetry.io/collector/confmap/converter/expandconverter v0.98.0 // indirect + go.opentelemetry.io/collector/confmap/provider/envprovider v0.98.0 // indirect + go.opentelemetry.io/collector/confmap/provider/fileprovider v0.98.0 // indirect + go.opentelemetry.io/collector/confmap/provider/httpprovider v0.98.0 // indirect + go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.98.0 // indirect + go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.98.0 // indirect + go.opentelemetry.io/collector/exporter/debugexporter v0.98.0 + go.opentelemetry.io/collector/extension/auth v0.98.0 // indirect + go.opentelemetry.io/collector/featuregate v1.5.0 // indirect + go.opentelemetry.io/collector/semconv v0.98.0 // indirect + go.opentelemetry.io/collector/service v0.98.0 // indirect + go.opentelemetry.io/contrib/config v0.4.0 // indirect + go.opentelemetry.io/contrib/propagators/b3 v1.25.0 // indirect + go.opentelemetry.io/contrib/zpages v0.50.0 // indirect + go.opentelemetry.io/otel/bridge/opencensus v1.25.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.25.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.25.0 // indirect + go.opentelemetry.io/otel/exporters/prometheus v0.47.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.25.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.25.0 // indirect + go.opentelemetry.io/proto/otlp v1.2.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.19.0 // indirect - golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc // indirect + golang.org/x/crypto v0.22.0 // indirect + golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect golang.org/x/text v0.14.0 // indirect - gonum.org/v1/gonum v0.14.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 // indirect + gonum.org/v1/gonum v0.15.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect + gopkg.in/yaml.v3 v3.0.1 ) replace github.com/Shopify/sarama => github.com/Shopify/sarama v1.33.0 diff --git a/go.sum b/go.sum index 3bb20a57421..7cb8bdba8be 100644 --- a/go.sum +++ b/go.sum @@ -1,16 +1,11 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y= -cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= -cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= -cloud.google.com/go/compute/metadata v0.2.4-0.20230617002413-005d2dfb6b68 h1:aRVqY1p2IJaBGStWMsQMpkAa83cPkCDLl80eOj0Rbz4= -cloud.google.com/go/compute/metadata v0.2.4-0.20230617002413-005d2dfb6b68/go.mod h1:1a3eRNYX12fs5UABBIXS8HXVvQbX9hRB/RkEBPORpe8= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= -github.com/IBM/sarama v1.42.1 h1:wugyWa15TDEHh2kvq2gAy1IHLjEjuYOYgXz/ruC/OSQ= -github.com/IBM/sarama v1.42.1/go.mod h1:Xxho9HkHd4K/MDUo/T/sOqwtX/17D33++E9Wib6hUdQ= +github.com/IBM/sarama v1.43.1 h1:Z5uz65Px7f4DhI/jQqEm/tV9t8aU+JUdTyW/K/fCXpA= +github.com/IBM/sarama v1.43.1/go.mod h1:GG5q1RURtDNPz8xxJs3mgX6Ytak8Z9eLhAkJPObe2xE= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/Shopify/sarama v1.33.0 h1:2K4mB9M4fo46sAM7t6QTsmSO8dLX1OqznLM7vn3OjZ8= @@ -20,13 +15,13 @@ github.com/Shopify/toxiproxy/v2 v2.3.0/go.mod h1:KvQTtB6RjCJY4zqNJn7C7JDFgsG5uoH github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= -github.com/apache/thrift v0.19.0 h1:sOqkWPzMj7w6XaYbJQG7m4sGqVolaW/0D28Ln7yPzMk= -github.com/apache/thrift v0.19.0/go.mod h1:SUALL216IiaOw2Oy+5Vs9lboJ/t9g40C+G07Dc0QC1I= +github.com/apache/thrift v0.20.0 h1:631+KvYbsBZxmuJjYwhezVsrfc/TbqtZV4QcxOX1fOI= +github.com/apache/thrift v0.20.0/go.mod h1:hOk1BQqcp2OLzGsyVXdfMk7YFlMxK3aoEVhjD06QhB8= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-sdk-go v1.50.7 h1:odKb+uneeGgF2jgAerKjFzpljiyZxleV4SHB7oBK+YA= -github.com/aws/aws-sdk-go v1.50.7/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.51.17 h1:Cfa40lCdjv9OxC3X1Ks3a6O1Tu3gOANSyKHOSw/zuWU= +github.com/aws/aws-sdk-go v1.51.17/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= @@ -38,18 +33,16 @@ github.com/bsm/sarama-cluster v2.1.13+incompatible h1:bqU3gMJbWZVxLZ9PGWVKP05yOm github.com/bsm/sarama-cluster v2.1.13+incompatible/go.mod h1:r7ao+4tTNXvWm+VRpRJchr2kQhqxgmAp2iEX5W96gMM= github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= -github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= -github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101 h1:7To3pQ+pZo0i3dsWEbinPNFs5gPSBOsJtx3wTT94VBY= -github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -73,26 +66,24 @@ github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUn github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-resiliency v1.4.0 h1:3OK9bWpPk5q6pbFAaYSEwD9CLUSHG8bnZuqX2yMt3B0= -github.com/eapache/go-resiliency v1.4.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= +github.com/eapache/go-resiliency v1.6.0 h1:CqGDTLtpwuWKn6Nj3uNUdflaq+/kIPsg0gfNzHton30= +github.com/eapache/go-resiliency v1.6.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/elastic/elastic-transport-go/v8 v8.4.0 h1:EKYiH8CHd33BmMna2Bos1rDNMM89+hdgcymI+KzJCGE= -github.com/elastic/elastic-transport-go/v8 v8.4.0/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= -github.com/elastic/go-elasticsearch/v8 v8.12.0 h1:krkiCf4peJa7bZwGegy01b5xWWaYpik78wvisTeRO1U= -github.com/elastic/go-elasticsearch/v8 v8.12.0/go.mod h1:wSzJYrrKPZQ8qPuqAqc6KMR4HrBfHnZORvyL+FMFqq0= +github.com/elastic/elastic-transport-go/v8 v8.5.0 h1:v5membAl7lvQgBTexPRDBO/RdnlQX+FM9fUVDyXxvH0= +github.com/elastic/elastic-transport-go/v8 v8.5.0/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= +github.com/elastic/go-elasticsearch/v8 v8.13.1 h1:du5F8IzUUyCkzxyHdrO9AtopcG95I/qwi2WK8Kf1xlg= +github.com/elastic/go-elasticsearch/v8 v8.13.1/go.mod h1:DIn7HopJs4oZC/w0WoJR13uMUxtHeq92eI5bqv5CRfI= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= -github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= -github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= @@ -132,8 +123,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= -github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= +github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= +github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= @@ -151,9 +142,8 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -166,7 +156,6 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -185,15 +174,15 @@ github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+ github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-hclog v1.6.2 h1:NOtoftovWkDheyUM/8JW3QMiXyxJK3uHRK7wV04nD2I= -github.com/hashicorp/go-hclog v1.6.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-plugin v1.6.0 h1:wgd4KxHJTVGGqWBq4QPB1i5BZNEx9BR8+OFmHDmTk8A= @@ -244,14 +233,14 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.17.5 h1:d4vBd+7CHydUqpFBgUEKkSdtSugf9YFmSkvUYPquI5E= -github.com/klauspost/compress v1.17.5/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= +github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU= github.com/knadh/koanf/providers/confmap v0.1.0/go.mod h1:2uLhxQzJnyHKfxG927awZC7+fyHFdQkd697K4MdLnIU= -github.com/knadh/koanf/v2 v2.0.2 h1:sEZzPW2rVWSahcYILNq/syJdEyRafZIG0l9aWwL86HA= -github.com/knadh/koanf/v2 v2.0.2/go.mod h1:HN9uZ+qFAejH1e4G41gnoffIanINWQuONLXiV7kir6k= +github.com/knadh/koanf/v2 v2.1.1 h1:/R8eXqasSTsmDCsAyYj+81Wteg8AqrV9CP6gvsTsOmM= +github.com/knadh/koanf/v2 v2.1.1/go.mod h1:4mnTRbZCK+ALuBXHZMjDfG9y714L7TykVnZkXbMU3Es= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -264,8 +253,9 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lightstep/go-expohisto v1.0.0 h1:UPtTS1rGdtehbbAF7o/dhkWLTDI73UifG8LbfQI7cA4= github.com/lightstep/go-expohisto v1.0.0/go.mod h1:xDXD0++Mu2FOaItXtdDfksfgxfV0z1TMPa+e/EUd0cs= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= +github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -278,8 +268,8 @@ github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovk github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= -github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -315,34 +305,36 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= -github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.94.0 h1:yG2Y7AAvruaY0AV5/2vdQ0Cbiq4849hPm3OClQGfYOM= -github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.94.0/go.mod h1:hhSpAfse5IQTj+o/ZGhjlblDazOYOOCnYFnp2O2Geyg= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.94.0 h1:4rhjPt3FZS9XLGYMuVwAj52O24HseralRIgbuA3TWwk= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.94.0/go.mod h1:fokDEADqVxQXAnEUslYRJ15oCrDfNUcT12Xo0sI6cCI= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.94.0 h1:geXLFcp9EFJ46XW3OGey10voO2FEv8hB41ttHCxQBFA= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.94.0/go.mod h1:2iqSk/ZjqmRsHdISoVnnKMLRtlpPWWEHaRTY9H3Lqjc= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.94.0 h1:0X1r/YZR+rbQqzKzQ40LTZUw1a8mKXV/BmO+BDuhx1w= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.94.0/go.mod h1:VR/KC70d5Jxfw71448BpbykhTK6+0bLjTgPobcm+7Vk= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.94.0 h1:JzijP0/OfYu2IN5UMNjdFuyGAuKu20XkJMbEZ0ZK9yA= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.94.0/go.mod h1:OlZPu+ys9wUSZA1fIdIQvQct9djlGFnaAyDxqdu/LvY= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.94.0 h1:T6p+BzzJrBCwd7oIQ+LM+0ta2bPTt1ktvUUFcDNLBdw= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.94.0/go.mod h1:VZgNEM8EbzejKEL+zNWYyA7I2OICdVLVT4cz1BE8clQ= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.94.0 h1:nTayRLarCGkB9ld7p8jWJe/9wvf8gNDaS5fRjybkEpg= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.94.0/go.mod h1:xoBvqu56hbky3KZafo68nxtV2+J81+pvo1ttNirakcU= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.94.0 h1:DSGhzGAaC767esMB0Ulr+9xWe6SW0LFUYMxLrLOAkjM= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.94.0/go.mod h1:Nv4nK3E7sUpDbNv0zI0zY15g2xR4jMg+n8taV8dsMeE= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.94.0 h1:c3XyFv8aQlmhCoggTTJQvXNCbztmlJUqkMsRs1qmfMs= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.94.0/go.mod h1:sai2fTcp3A4+dbSKFwLM7LS1WDu/fmA86hkEfQpPIpI= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.94.0 h1:+gpgiUXzrx8YeVBfXPwxpfLrB6FQgTnfXp2HHx2LumI= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.94.0/go.mod h1:73syuA2KFBpSVI9xIYXjMItqcfuR9aKbb5ffg3WgZjo= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.94.0 h1:xmVJ8kSCUAywtBUBLWbnVEsQzkKSCaH+/zu7xtIHMQk= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.94.0/go.mod h1:++t4Bpe2MubDNjVeZ1DkaeNcFt0f8jixKWraZeAUrHE= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.94.0 h1:BlVcB4XE8mkov0Gv8RJyryKihyadJsq9P/t5r8k5RK8= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.94.0/go.mod h1:NPYTGHnJlzmfNhbds2RaeSQXkVYMOFXU3xTCr08060o= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.94.0 h1:zVnSettpsS/WRK8cgfOG9tneoK7701I6LH3FTJpMjzw= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.94.0/go.mod h1:8YE6DNBFWWVC+W5XSTw1EfwzJ5E2pQHtS3/zWBMKnPw= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.94.0 h1:WjWN2v68vLHsHQBB4xXDtynWfA6ppPlTWLHtFxAGhBo= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.94.0/go.mod h1:ScwmnXQID7bOlsoi8zCt4hFEvYku6DTxr7fxgZWPIeY= +github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.98.0 h1:0G3g+C1ASOD7hjyR0XSU0A64iQ2UhoqKJvI/yl+xStA= +github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.98.0/go.mod h1:ea51nQ+s7RiaPfVWfb+ZpqHA5wpC3Zc6C27ZvEXCJBg= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.98.0 h1:qqbNZig9IqIed6mj9FUJVWabiP+mxY09vF+aW/hX2cU= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.98.0/go.mod h1:hYMt6wWecJJF60oWxRvXgA2LU207PwhMEvVg+/yxBvE= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.98.0 h1:yend0fdg/ejfVSFOCI8CLo5ikkNhSl41Zs6ma5jUZ4c= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.98.0/go.mod h1:yGkFJl78686wAA5235HdLLQrWlOxuNqnZzQMUz2I7Ac= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.98.0 h1:rbfZJ4YA82WSC9HW2jgmY1UZEsOYte8VTjrBFU6dv5Y= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.98.0/go.mod h1:KOTp7iN8sWTmqk/SxbMqvM2KXUdgXRWb6hI84E5arLY= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.98.0 h1:FaldDCQ6hpPAauYZ1kbNWkTFU2vRgL/nr5UY8d2jrT4= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.98.0/go.mod h1:0arlQ0mj/VhcFFSKHDmIc+iieHweXKENSBcqNnAY8OA= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.98.0 h1:G4VY01P5r26yAM+ruHnLBTpue1naxBHSSCEYbMzbbUo= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.98.0/go.mod h1:eY3VRXSzUIUSyjsxuKdY6XTzV0oYlge0MMLw7ijd/RM= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.98.0 h1:3JjVzPccHi6r4XPgzH5YIi0/CGHbqVJWADtYi46GfIY= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.98.0/go.mod h1:90/iHUjO/6DGVa161GDsdbntj1r06DRS8ZQBvw+PNOY= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.98.0 h1:lZGvpoHCYoEFBDLhnKGGAgGJyX2l2jGZUgC1kTz/esI= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.98.0/go.mod h1:fOHCXaWnVcKJPY9Jxeoyl7tSuuhZ/PPN92ib8eyjSLQ= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.98.0 h1:F1Vnzkhg0Trncmvw65Mq1wBKPgX2wI3aGE90J7CXhuU= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.98.0/go.mod h1:yybbWtN2RnnIoqw3KzboFjk4iYpEBXPzHQvARs+eO9U= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.98.0 h1:jINjBgpWO/rU6RKzeHzfJAJCTfqGJcBGBgjwwTiqZBE= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.98.0/go.mod h1:3zw5nWtjNyojk1rOPfJ3TTNNPfRUqEUgJUbytsS3f5c= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.98.0 h1:lSP0bTkEMMFa5DeA0QLwgxA7zAIbq2NeLRioAS4qKVE= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.98.0/go.mod h1:ErAJvPIFFMeb9nL1ePsYbIK18BaVyitIaz3zAlA+msY= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.98.0 h1:SRFZwjtkjlh4thYckLGPyKnlJPxWhpYAdMlwrT7iho0= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.98.0/go.mod h1:CgCTyjyZCHfm+nci5W1gqL55imJQXjuETC5fzg54Pus= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.98.0 h1:ct+a4NWbBgM00d1uxJ+8b3adX+2U6sTuAwpFspMuIp4= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.98.0/go.mod h1:mHVzbFtTDv4AaQ18vWP7rJ4OJjPfoi3TQSrDmOyuhDo= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.98.0 h1:4B13z8toU1BKf86cNmqvfYzs3ipXB73zrwaC/Tg/6t4= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.98.0/go.mod h1:DoJc8YvhFDiQ2vFP5SyN5Rtwxvht0Lb3cLc6D+ugjvI= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.98.0 h1:2VAQfI6o8zunjLzkOZ7Bg9N+annovyIcRjz5Eja8ra0= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.98.0/go.mod h1:ci1rco0CWHtTwJZIk8wlyX3tHoUOEUuqwPHZkNPBaUQ= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= @@ -353,8 +345,8 @@ github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6 github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= -github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= +github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -362,17 +354,18 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= +github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= -github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= +github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= +github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= -github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= -github.com/prometheus/common v0.47.0 h1:p5Cz0FNHo7SnWOmWmoRozVcjEp0bIVU8cV7OShpjL1k= -github.com/prometheus/common v0.47.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE= +github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= @@ -393,8 +386,8 @@ github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6ke github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= -github.com/shirou/gopsutil/v3 v3.24.1 h1:R3t6ondCEvmARp3wxODhXMTLC/klMa87h2PHUw5m7QI= -github.com/shirou/gopsutil/v3 v3.24.1/go.mod h1:UU7a2MSBQa+kW1uuDq8DeEBS8kmrnQwsv2b5O513rwU= +github.com/shirou/gopsutil/v3 v3.24.3 h1:eoUGJSmdfLzJ3mxIhmOAhgKEKgQkeOwKpz1NbhVnuPE= +github.com/shirou/gopsutil/v3 v3.24.3/go.mod h1:JpND7O217xa72ewWz9zN2eIIkPWsDN/3pl0H8Qt0uwg= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= @@ -428,8 +421,9 @@ github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMV github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -440,8 +434,9 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/tilinna/clock v1.1.0 h1:6IQQQCo6KoBxVudv6gwtY8o4eDfhHo8ojA5dP0MfhSs= @@ -468,119 +463,133 @@ github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1: github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= -github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector v0.94.1 h1:bGHW5NKmh34oMflMEyNCHpes6vtiQNXpgea4GiscAOs= -go.opentelemetry.io/collector v0.94.1/go.mod h1:5ACZXRo6O23gBkRrHSxYs1sLaP4pZ8w+flZNE7pvoNg= -go.opentelemetry.io/collector/component v0.94.1 h1:j4peKsWb+QVBKPs2RJeIj5EoQW7yp2ZVGrd8Bu9HU9M= -go.opentelemetry.io/collector/component v0.94.1/go.mod h1:vg+kAH81C3YS0SPzUXkSFWLPC1WH7zx70dAtUWWIHcE= -go.opentelemetry.io/collector/config/configauth v0.94.1 h1:VwmxDZRZVihvfwlP+BIM4DHVYqwx3oFUl6Fat1u3UnM= -go.opentelemetry.io/collector/config/configauth v0.94.1/go.mod h1:AgftrmblBdbNE6T3P7EkCHdO/Ibh/AvPwCy3OR0ZftA= -go.opentelemetry.io/collector/config/configcompression v0.94.1 h1:uZ+uh+Ods+FTIaXmf2T9Euq0srCHa7ChPApuwP/Dd98= -go.opentelemetry.io/collector/config/configcompression v0.94.1/go.mod h1:fA36AZC/Qcyl+HvMnvFZuV/iUWGQJrchimmk+qYWuMM= -go.opentelemetry.io/collector/config/configgrpc v0.94.1 h1:Eh8hd6hJbCAxqLd301n4LrOiE1pN+0CXrk24r7+Nnh8= -go.opentelemetry.io/collector/config/configgrpc v0.94.1/go.mod h1:oDake34+Y5i37W5WpnRhc5nenkyEJwjaDbBYp28EqYg= -go.opentelemetry.io/collector/config/confighttp v0.94.1 h1:2G/bOzREhgIEoKGLE3tOeuiAjveB4o24n/oaIm3E5Rs= -go.opentelemetry.io/collector/config/confighttp v0.94.1/go.mod h1:zJopIf0pWZtczVf6k7FECHkH4QVZJ75I1Lq1PmWZfNQ= -go.opentelemetry.io/collector/config/confignet v0.94.1 h1:kaV1iwZKjv7ZJZ+PtLTQYfVNLVD3EAyxfYBNZf5GXmo= -go.opentelemetry.io/collector/config/confignet v0.94.1/go.mod h1:rraribsOoZsYZm51+3TXHavmXKJOC5a5/X20vfmNbdw= -go.opentelemetry.io/collector/config/configopaque v0.94.1 h1:Jq0pF0DPaFnBNKrBN9V2AV0Yk5T+YTm9pYhmG/ipZJs= -go.opentelemetry.io/collector/config/configopaque v0.94.1/go.mod h1:3T6t2PN2/Tl1122CYO+isudwCbSuElqXG/yqRh+SX8U= -go.opentelemetry.io/collector/config/configretry v0.94.1 h1:0rJXulYg7DouKfrfyhNgT2SyDtTx2+PSQATuHQK6kLU= -go.opentelemetry.io/collector/config/configretry v0.94.1/go.mod h1:gt1HRYyMxcMca9lbDLPbivQzsUCjVjkPAn/3S6fiD14= -go.opentelemetry.io/collector/config/configtelemetry v0.94.1 h1:ztYpBEBlvhcoxMiDKNmQ2SS+A41JZ4M19GfcxjCo8Zs= -go.opentelemetry.io/collector/config/configtelemetry v0.94.1/go.mod h1:2XLhyR/GVpWeZ2K044vCmrvH/d4Ewt0aD/y46avZyMU= -go.opentelemetry.io/collector/config/configtls v0.94.1 h1:potTWvF7ssMva7MlrrbXBB4UhXYh9Fg8TZp1noLEPpo= -go.opentelemetry.io/collector/config/configtls v0.94.1/go.mod h1:8/27C8CKCyZTb9T1Y9972BEDr7iPFc7u3H1/GSC4lYU= -go.opentelemetry.io/collector/config/internal v0.94.1 h1:qmGX6ZGYmGOiDa5kDBCmCEnd+qZO3uOwzzcQxtQRnvs= -go.opentelemetry.io/collector/config/internal v0.94.1/go.mod h1:Dj6qq+HPOIUbd2EsQgIsSA5uDiXOLc4DI6nZ3NcDGkg= -go.opentelemetry.io/collector/confmap v0.94.1 h1:O69bkeyR1YPAFz+jMd45aDZc1DtYnwb3Skgr2yALPqQ= -go.opentelemetry.io/collector/confmap v0.94.1/go.mod h1:pCT5UtcHaHVJ5BIILv1Z2VQyjZzmT9uTdBmC9+Z0AgA= -go.opentelemetry.io/collector/connector v0.94.1 h1:ZYGNubGypsxK5XN6rkCdMaS0PDnMW7yzOj4CHUNIAII= -go.opentelemetry.io/collector/connector v0.94.1/go.mod h1:iv4lgIGa15FDwz7UN/pBMtrihTJEsZUxbWfPbM7e2QM= -go.opentelemetry.io/collector/connector/forwardconnector v0.94.1 h1:a8v3vhGpJQm4jd70Yo0LkTIoSB4CFZzpxTP3fkgRwOc= -go.opentelemetry.io/collector/connector/forwardconnector v0.94.1/go.mod h1:cCj8IR0tzvpbz33qvNZP3TvYUzEDKF97FkpwN7+k+Oo= -go.opentelemetry.io/collector/consumer v0.94.1 h1:l/9h5L71xr/d93snQ9fdxgz64C4UuB8mEDxpp456X8o= -go.opentelemetry.io/collector/consumer v0.94.1/go.mod h1:BIPWmw8wES6jlPTPC+acJxLvUzIdOm6uh/p/X85ALsY= -go.opentelemetry.io/collector/exporter v0.94.1 h1:tu9l/lZdgf0zLvWTZeRPV6wKLkQ8ymMFx7GBGjweQtw= -go.opentelemetry.io/collector/exporter v0.94.1/go.mod h1:XO3dwIIjrHTu0Z9Fs0pQASFTNZcT7uQiYd78f49gNsk= -go.opentelemetry.io/collector/exporter/loggingexporter v0.94.1 h1:11b+RGJ2YUGlHTTIBqTujL052jRX7VSdZAq+xeSB0Pg= -go.opentelemetry.io/collector/exporter/loggingexporter v0.94.1/go.mod h1:m0Rjlm7A9NFn87qvtWyad2BI9GVeNA/5SaZuW2t2eGQ= -go.opentelemetry.io/collector/exporter/otlpexporter v0.94.1 h1:0GLWchZb+2Lv3wCIjypKvqk66BXPuh0PomW310c3mNY= -go.opentelemetry.io/collector/exporter/otlpexporter v0.94.1/go.mod h1:al1hQR6yJ/XWpaC+nuuTFdmOkDA+OmGL/jYExd6NOxc= -go.opentelemetry.io/collector/exporter/otlphttpexporter v0.94.1 h1:RxnMLsNJ29isZclfMgxy7rEkma8hZUMc2gFXLMqRFUU= -go.opentelemetry.io/collector/exporter/otlphttpexporter v0.94.1/go.mod h1:kD3zLaplARCcpobVkuJB6WUp+nV4i2xJxJ9H8vL+uUo= -go.opentelemetry.io/collector/extension v0.94.1 h1:f0yyW2lmLg+PI1FjNWJaGcKVQV6TRgLqqbMA/4S5dA4= -go.opentelemetry.io/collector/extension v0.94.1/go.mod h1:fxQXkLkFcea3uJ3hlImBs5kQ/pWjeDIC2OylnDYIA4g= -go.opentelemetry.io/collector/extension/auth v0.94.1 h1:r9YPcI5KpSzMS4/JEWfRJGMQTnToWb+/tT/WZxXRIJk= -go.opentelemetry.io/collector/extension/auth v0.94.1/go.mod h1:r/C9S2Y+s+9Ter7VnGFuo9QOMIWOPuTBp0I6lQInWgg= -go.opentelemetry.io/collector/extension/ballastextension v0.94.1 h1:4MyvV2K9YOKRoBE6TGt8ydSeu9Uaf1HgqL2GgkuJjjI= -go.opentelemetry.io/collector/extension/ballastextension v0.94.1/go.mod h1:W7T5jBE9mblTNOaKZX/AWMNPdIl2yl++DXNrNINmYU0= -go.opentelemetry.io/collector/extension/zpagesextension v0.94.1 h1:s+cb8nh2vS9F2/UkWO+jhsvJE2SV5CF3BZ1KJ6q8rSk= -go.opentelemetry.io/collector/extension/zpagesextension v0.94.1/go.mod h1:NVoJq0mxe7AIw+EjDPyrwMZ21DGxoUqXZ1W5dTXrV38= -go.opentelemetry.io/collector/featuregate v1.1.0 h1:W+/FKvRxHMFC6MuTTEgrHINCf1vFBvLH7stSOEar6zU= -go.opentelemetry.io/collector/featuregate v1.1.0/go.mod h1:QQXjP4etmJQhkQ20j4P/rapWuItYxoFozg/iIwuKnYg= -go.opentelemetry.io/collector/otelcol v0.94.1 h1:iXCIjOxjAHiMtguDz8JK7lGMkvKRrretnJ+hbuimYd8= -go.opentelemetry.io/collector/otelcol v0.94.1/go.mod h1:/cYiy1apIC+04ij+miTGUjm2Qc23oq/6KUcBlCeeBEw= -go.opentelemetry.io/collector/pdata v1.1.0 h1:cE6Al1rQieUjMHro6p6cKwcu3sjHXGG59BZ3kRVUvsM= -go.opentelemetry.io/collector/pdata v1.1.0/go.mod h1:IDkDj+B4Fp4wWOclBELN97zcb98HugJ8Q2gA4ZFsN8Q= -go.opentelemetry.io/collector/processor v0.94.1 h1:cNlGox8fN85KhtUq6yuqgPM9KDCQ4O5aDQ864joc4JQ= -go.opentelemetry.io/collector/processor v0.94.1/go.mod h1:pMwIDr5UTSjBJ8ATLR8e84TWEnqO/9HTmDjj1NJ3K84= -go.opentelemetry.io/collector/processor/batchprocessor v0.94.1 h1:FFS9N2qLcWMvLYtft/jdjCGKpGD7ijv3v7w3TneOefU= -go.opentelemetry.io/collector/processor/batchprocessor v0.94.1/go.mod h1:Nu1f6XsSN1XN+o5ay4j3quKm38BqsWG2+ACvPs763ac= -go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.94.1 h1:APMh49kFsl/Y3io1edwKc/X4EOswGTe+r1b/66oP2dY= -go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.94.1/go.mod h1:lOf71i/eIJPyAlm4DwXlcHMA4kEQ9rLj5Wl2Q+j9vTc= -go.opentelemetry.io/collector/receiver v0.94.1 h1:p9kIPmDeLSAlFZZuHdFELGGiP0JduFEfsT8Uz6Ut+8g= -go.opentelemetry.io/collector/receiver v0.94.1/go.mod h1:AYdIg3Bl4kwiqQy/k3tuYQnS918gb5i3HcInn6owudE= -go.opentelemetry.io/collector/receiver/otlpreceiver v0.94.1 h1:5fQp5ahVApc/XcMVtIJWS9qh5GEabIytgNQRccMVruY= -go.opentelemetry.io/collector/receiver/otlpreceiver v0.94.1/go.mod h1:4xCG73Edl33quyPPQuQQnk2fB7C9LGyT9vm1CxhhTe0= -go.opentelemetry.io/collector/semconv v0.94.1 h1:+FoBlzwFgwalgbdBhJHtHPvR7W0+aJDUAdQdsmfT/Ts= -go.opentelemetry.io/collector/semconv v0.94.1/go.mod h1:gZ0uzkXsN+J5NpiRcdp9xOhNGQDDui8Y62p15sKrlzo= -go.opentelemetry.io/collector/service v0.94.1 h1:O2n+j22ycTi5cikDehYlYKw2VslCbcwjX8Pgf5NeVoc= -go.opentelemetry.io/collector/service v0.94.1/go.mod h1:Lq55nShtnd7y2iZAXW1DIO+gmGYgSdbju+ESL+NnWZg= -go.opentelemetry.io/contrib/config v0.3.0 h1:nJxYSB7/8fckSya4EAFyFGxIytMvNlQInXSmhz/OKKg= -go.opentelemetry.io/contrib/config v0.3.0/go.mod h1:tQW0mY8be9/LGikwZNYno97PleUhF/lMal9xJ1TC2vo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0 h1:P+/g8GpuJGYbOp2tAdKrIPUX9JO02q8Q0YNlHolpibA= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0/go.mod h1:tIKj3DbO8N9Y2xo52og3irLsPI4GW02DSMtrVgNMgxg= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 h1:doUP+ExOpH3spVTLS0FcWGLnQrPct/hD/bCPbDRUEAU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0/go.mod h1:rdENBZMT2OE6Ne/KLwpiXudnAsbdrdBaqBvTN8M8BgA= -go.opentelemetry.io/contrib/propagators/b3 v1.22.0 h1:Okbgv0pWHMQq+mF7H2o1mucJ5PvxKFq2c8cyqoXfeaQ= -go.opentelemetry.io/contrib/propagators/b3 v1.22.0/go.mod h1:N3z0ycFRhsVZ+tG/uavMxHvOvFE95QM6gwW1zSqT9dQ= -go.opentelemetry.io/contrib/zpages v0.47.0 h1:ekpdNa2wqOvAfwZIGDIIV02zmR+z08aWPt21KrPJnaU= -go.opentelemetry.io/contrib/zpages v0.47.0/go.mod h1:rBeFA/UxnMjRlEGpmClIqzf1mCIKtl7ahjww3wsSdGs= -go.opentelemetry.io/otel v1.23.1 h1:Za4UzOqJYS+MUczKI320AtqZHZb7EqxO00jAHE0jmQY= -go.opentelemetry.io/otel v1.23.1/go.mod h1:Td0134eafDLcTS4y+zQ26GE8u3dEuRBiBCTUIRHaikA= -go.opentelemetry.io/otel/bridge/opencensus v0.45.0 h1:kEOlv9Exuv3J8GCf1nLMHfrTPGnZOuIkN8YlRM14TtQ= -go.opentelemetry.io/otel/bridge/opencensus v0.45.0/go.mod h1:tkVMJeFOr43+zzwbxtIWsNcCCDT7rI5/c9rhMfMIENg= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.45.0 h1:tfil6di0PoNV7FZdsCS7A5izZoVVQ7AuXtyekbOpG/I= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.45.0/go.mod h1:AKFZIEPOnqB00P63bTjOiah4ZTaRzl1TKwUWpZdYUHI= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.45.0 h1:+RbSCde0ERway5FwKvXR3aRJIFeDu9rtwC6E7BC6uoM= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.45.0/go.mod h1:zcI8u2EJxbLPyoZ3SkVAAcQPgYb1TDRzW93xLFnsggU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.23.1 h1:o8iWeVFa1BcLtVEV0LzrCxV2/55tB3xLxADr6Kyoey4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.23.1/go.mod h1:SEVfdK4IoBnbT2FXNM/k8yC08MrfbhWk3U4ljM8B3HE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.23.1 h1:p3A5+f5l9e/kuEBwLOrnpkIDHQFlHmbiVxMURWRK6gQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.23.1/go.mod h1:OClrnXUjBqQbInvjJFjYSnMxBSCXBF8r3b34WqjiIrQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.23.1 h1:cfuy3bXmLJS7M1RZmAL6SuhGtKUp2KEsrm00OlAXkq4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.23.1/go.mod h1:22jr92C6KwlwItJmQzfixzQM3oyyuYLCfHiMY+rpsPU= -go.opentelemetry.io/otel/exporters/prometheus v0.45.1 h1:R/bW3afad6q6VGU+MFYpnEdo0stEARMCdhWu6+JI6aI= -go.opentelemetry.io/otel/exporters/prometheus v0.45.1/go.mod h1:wnHAfKRav5Dfp4iZhyWZ7SzQfT+rDZpEpYG7To+qJ1k= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.45.0 h1:NjN6zc7Mwy9torqa3mo+pMJ3mHoPI0uzVSYcqB2t72A= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.45.0/go.mod h1:U+T5v2bk4fCC8XdSEWZja3Pm/ZhvV/zE7JwX/ELJKts= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.23.1 h1:IqmsDcJnxQSs6W+1TMSqpYO7VY4ZuEKJGYlSBPUlT1s= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.23.1/go.mod h1:VMZ84RYOd4Lrp0+09mckDvqBj2PXWDwOFaxb1P5uO8g= -go.opentelemetry.io/otel/metric v1.23.1 h1:PQJmqJ9u2QaJLBOELl1cxIdPcpbwzbkjfEyelTl2rlo= -go.opentelemetry.io/otel/metric v1.23.1/go.mod h1:mpG2QPlAfnK8yNhNJAxDZruU9Y1/HubbC+KyH8FaCWI= -go.opentelemetry.io/otel/sdk v1.23.1 h1:O7JmZw0h76if63LQdsBMKQDWNb5oEcOThG9IrxscV+E= -go.opentelemetry.io/otel/sdk v1.23.1/go.mod h1:LzdEVR5am1uKOOwfBWFef2DCi1nu3SA8XQxx2IerWFk= -go.opentelemetry.io/otel/sdk/metric v1.23.0 h1:u81lMvmK6GMgN4Fty7K7S6cSKOZhMKJMK2TB+KaTs0I= -go.opentelemetry.io/otel/sdk/metric v1.23.0/go.mod h1:2LUOToN/FdX6wtfpHybOnCZjoZ6ViYajJYMiJ1LKDtQ= -go.opentelemetry.io/otel/trace v1.23.1 h1:4LrmmEd8AU2rFvU1zegmvqW7+kWarxtNOPyeL6HmYY8= -go.opentelemetry.io/otel/trace v1.23.1/go.mod h1:4IpnpJFwr1mo/6HL8XIPJaE9y0+u1KcVmuW7dwFSVrI= -go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI= -go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY= +go.opentelemetry.io/collector v0.98.0 h1:O7bpARGWzNfFQEYevLl4iigDrpGTJY3vV/kKqNZzMOk= +go.opentelemetry.io/collector v0.98.0/go.mod h1:fvPM+tBML07uvAP1MV2msYPSYJ9U/lgE1jDb3AFBaMM= +go.opentelemetry.io/collector/component v0.98.0 h1:0TMaBOyCdABiVLFdGOgG8zd/1IeGldCinYonbY08xWk= +go.opentelemetry.io/collector/component v0.98.0/go.mod h1:F6zyQLsoExl6r2q6WWZm8rmSSALbwG2zwIHLrMzZVio= +go.opentelemetry.io/collector/config/configauth v0.98.0 h1:FPffZ1dRL6emStrDUEGpL0rCChbUZNAQgpArXD0SESI= +go.opentelemetry.io/collector/config/configauth v0.98.0/go.mod h1:5pMzf2zgFwS7tujNq0AtOOli5vxIvnrNi7JlZwrBOFo= +go.opentelemetry.io/collector/config/configcompression v1.5.0 h1:FTxKbFPN4LznRCH/GQ+b+0tAWmg80Y2eEka79S2sLZ0= +go.opentelemetry.io/collector/config/configcompression v1.5.0/go.mod h1:O0fOPCADyGwGLLIf5lf7N3960NsnIfxsm6dr/mIpL+M= +go.opentelemetry.io/collector/config/configgrpc v0.98.0 h1:4yP/TphwQnbgLpJ72NymXaERVjLjuDAQp4iDKCTcv5g= +go.opentelemetry.io/collector/config/configgrpc v0.98.0/go.mod h1:tIng0xx1XlVr4I0YG5bNpts0hZDjwzN3Jkz6cKaSH/s= +go.opentelemetry.io/collector/config/confighttp v0.98.0 h1:pW7gR34TTXcrCHJgemL6A4VBVBS2NyDAkruSMvQj1Vo= +go.opentelemetry.io/collector/config/confighttp v0.98.0/go.mod h1:M9PMtiKrTJMG8i3SqJ+AUVKhR6sa3G/8S2F1+Dxkkr0= +go.opentelemetry.io/collector/config/confignet v0.98.0 h1:pXDBb2hFe10T/NMHlL/oMgk1aFfe4NmmJFdFoioyC9o= +go.opentelemetry.io/collector/config/confignet v0.98.0/go.mod h1:3naWoPss70RhDHhYjGACi7xh4NcVRvs9itzIRVWyu1k= +go.opentelemetry.io/collector/config/configopaque v1.5.0 h1:WJzgmsFU2v63BypPBNGL31ACwWn6PwumPJNpLZplcdE= +go.opentelemetry.io/collector/config/configopaque v1.5.0/go.mod h1:/otnfj2E8r5EfaAdNV4qHkTclmiBCZXaahV5EcLwT7k= +go.opentelemetry.io/collector/config/configretry v0.98.0 h1:gZRenX9oMLJmQ/CD8YwFNl9YYl68RtcD0RYSCJhrMAk= +go.opentelemetry.io/collector/config/configretry v0.98.0/go.mod h1:uRdmPeCkrW9Zsadh2WEbQ1AGXGYJ02vCfmmT+0g69nY= +go.opentelemetry.io/collector/config/configtelemetry v0.98.0 h1:f8RNZ1l/kYPPoxFmKKvTUli8iON7CMsm85KM38PVNts= +go.opentelemetry.io/collector/config/configtelemetry v0.98.0/go.mod h1:YV5PaOdtnU1xRomPcYqoHmyCr48tnaAREeGO96EZw8o= +go.opentelemetry.io/collector/config/configtls v0.98.0 h1:g+MADy01ge8iGC6v2tbJ5G27CWNG1BaJtmYdmpvm8e4= +go.opentelemetry.io/collector/config/configtls v0.98.0/go.mod h1:9RHArziz0mNEEkti0kz5LIdvbQGT7/Unu/0whKKazHQ= +go.opentelemetry.io/collector/config/internal v0.98.0 h1:wz/6ncawMX5cfIiXJEYSUm1g1U6iE/VxFRm4/WhVBPI= +go.opentelemetry.io/collector/config/internal v0.98.0/go.mod h1:xPnEE6QaTSXr+ctYMSTBxI2qwTntTUM4cYk7OTm6Ugc= +go.opentelemetry.io/collector/confmap v0.98.0 h1:qQreBlrqio1y7uhrAvr+W86YbQ6fw7StgkbYpvJ2vVc= +go.opentelemetry.io/collector/confmap v0.98.0/go.mod h1:BWKPIpYeUzSG6ZgCJMjF7xsLvyrvJCfYURl57E5vhiQ= +go.opentelemetry.io/collector/confmap/converter/expandconverter v0.98.0 h1:lRhfcLr3gK5S/zn92h3clyOPnCvvNKs1WTMbtH4UvO0= +go.opentelemetry.io/collector/confmap/converter/expandconverter v0.98.0/go.mod h1:vNMFTWe4dF05LsodUOc84OfxdlYVp1kCMuZzb41WfAk= +go.opentelemetry.io/collector/confmap/provider/envprovider v0.98.0 h1:x/VsGlBj+DtJCXIucwzwcxiwnwAU8a6ALK6UN8fPdKQ= +go.opentelemetry.io/collector/confmap/provider/envprovider v0.98.0/go.mod h1:BapTGXu7CYrQGNohbapPwTSt2Ty/k/c6Oemx9mSSiK4= +go.opentelemetry.io/collector/confmap/provider/fileprovider v0.98.0 h1:SxDS+Yr8qE+ID58ELR5n0D+SUlqHKOZ72pK3YPFAelA= +go.opentelemetry.io/collector/confmap/provider/fileprovider v0.98.0/go.mod h1:DEoB0d0k1iGt4KEABntL8AW9xYQ6E7fmgkM2/s8aXvM= +go.opentelemetry.io/collector/confmap/provider/httpprovider v0.98.0 h1:C02SPbRPvrtmZ9TvsHWpz2TvHzqY5mNyEAlDdhax/a4= +go.opentelemetry.io/collector/confmap/provider/httpprovider v0.98.0/go.mod h1:dzZKtykJio3Rm+G+Cmr15VV3xKp0PmFuh9Q9b3c1K7A= +go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.98.0 h1:04zGXVQZ8D6nvoPX8AaqxWxGHNNVsGR78E+tY+2VQr8= +go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.98.0/go.mod h1:+UrRiugWaQPssz4mgEgQQo640f2bDUCFlo2Xr0/5ulc= +go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.98.0 h1:JYpDN0OnMsu0awk0rjaYEIko9hFzzBJ6+2U5W2iVvUE= +go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.98.0/go.mod h1:xrXL804nBum1PgbvmJQ4I+hyW+DU4xBGO3MKMiYFX6E= +go.opentelemetry.io/collector/connector v0.98.0 h1:1ifadXqOtB5bZ+OocLVlzF0zltWjP70E3+xYt2fJnMg= +go.opentelemetry.io/collector/connector v0.98.0/go.mod h1:OFii9qa2ZgktI61/r0gWDsGjXtpEe+qXC8+0o4ZySeA= +go.opentelemetry.io/collector/connector/forwardconnector v0.98.0 h1:zIn0GaqUKq+3GcvfdVEIB/GSv7Zdtda8q0XChXG6Qj0= +go.opentelemetry.io/collector/connector/forwardconnector v0.98.0/go.mod h1:eC6PfRbK0Mx7QpqfnEI0uPAjq27MR//sRb5Vxzuf6eE= +go.opentelemetry.io/collector/consumer v0.98.0 h1:47zJ5HFKXVA0RciuwkZnPU5W8j0TYUxToB1/zzzgEhs= +go.opentelemetry.io/collector/consumer v0.98.0/go.mod h1:c2edTq38uVJET/NE6VV7/Qpyznnlz8b6VE7J6TXD57c= +go.opentelemetry.io/collector/exporter v0.98.0 h1:eN2qtkiwpeX9gBu9JZw1k/CZ3N9wZE1aGJ1A0EvwJ7w= +go.opentelemetry.io/collector/exporter v0.98.0/go.mod h1:GCW46a0VAuW7nljlW//GgFXI+8mSrJjrdEKVO9icExE= +go.opentelemetry.io/collector/exporter/debugexporter v0.98.0 h1:zAFCXYzpxDh64BIVujqntlN5RE4jDahbPfJBy/Wq7D0= +go.opentelemetry.io/collector/exporter/debugexporter v0.98.0/go.mod h1:S15ELDQFoP5da3NHzXJR2I8FGUnM2C1Hp6e/OhaCtw0= +go.opentelemetry.io/collector/exporter/otlpexporter v0.98.0 h1:uhiR/luaJCwMnvvkIS/gIxBbSAp+/vbqeC3AXmuc/kg= +go.opentelemetry.io/collector/exporter/otlpexporter v0.98.0/go.mod h1:1ySnK/6Cl+67FTP6ty04PX9nrXPYFPuBqZ+Xn9Jzz6Y= +go.opentelemetry.io/collector/exporter/otlphttpexporter v0.98.0 h1:+6mRqTgoJxXxuPwI8s5fMKm0mLfwVwJgD2EB7gUNNlE= +go.opentelemetry.io/collector/exporter/otlphttpexporter v0.98.0/go.mod h1:uGocxqpbUrZDwZz6JBKsvNCyDLrS/pnVpn4BUuPauFw= +go.opentelemetry.io/collector/extension v0.98.0 h1:08B5ipEsoNmPHY96j5EUsUrFre01GOZ4zgttUDtPUkY= +go.opentelemetry.io/collector/extension v0.98.0/go.mod h1:fZ1Hnnahszl5j3xcW2sMRJ0FLWDOFkFMQeVDP0Se7i8= +go.opentelemetry.io/collector/extension/auth v0.98.0 h1:7b1jioijJbTMqaOCrz5Hoqf+zJn2iPlGmtN7pXLNWbA= +go.opentelemetry.io/collector/extension/auth v0.98.0/go.mod h1:gssWC4AxAwAEKI2CqS93lhjWffsVdzD8q7UGL6LaRr0= +go.opentelemetry.io/collector/extension/ballastextension v0.98.0 h1:EPzsYpiSY4vAfzJMqhVK6bIh+qZRmXVskaNlRFKjA0w= +go.opentelemetry.io/collector/extension/ballastextension v0.98.0/go.mod h1:IY/JNP0g+tUUe/w5YHgBYwv5XlH4eqo5d4th+RGROFU= +go.opentelemetry.io/collector/extension/zpagesextension v0.98.0 h1:JfvsDpTwAhA9au8/4vmONRh0OBVU6n36seb41JD/mTQ= +go.opentelemetry.io/collector/extension/zpagesextension v0.98.0/go.mod h1:t1zDwy6kYp4w1JgcGHMvdGbKYHqWpK00bB1AEQ0Oqlc= +go.opentelemetry.io/collector/featuregate v1.5.0 h1:uK8qnYQKz1TMkK+FDTFsywg/EybW/gbnOUaPNUkRznM= +go.opentelemetry.io/collector/featuregate v1.5.0/go.mod h1:w7nUODKxEi3FLf1HslCiE6YWtMtOOrMnSwsDam8Mg9w= +go.opentelemetry.io/collector/otelcol v0.98.0 h1:sUk49Wqw+VBYeDynEA+GSeVEusFvzFW3KuF2vfDbyo0= +go.opentelemetry.io/collector/otelcol v0.98.0/go.mod h1:dW3UzuaiaNTddjKajk3Tp2Y7muDvYJdQz2yGUOE53gs= +go.opentelemetry.io/collector/pdata v1.5.0 h1:1fKTmUpr0xCOhP/B0VEvtz7bYPQ45luQ8XFyA07j8LE= +go.opentelemetry.io/collector/pdata v1.5.0/go.mod h1:TYj8aKRWZyT/KuKQXKyqSEvK/GV+slFaDMEI+Ke64Yw= +go.opentelemetry.io/collector/pdata/testdata v0.98.0 h1:8gohV+LFXqMzuDwfOOQy9GcZBOX0C9xGoQkoeXFTzmI= +go.opentelemetry.io/collector/pdata/testdata v0.98.0/go.mod h1:B/IaHcf6+RtxI292CZu9TjfYQdi1n4+v6b8rHEonpKs= +go.opentelemetry.io/collector/processor v0.98.0 h1:onrg8a99lToytbHF148Bg9a7DfNk31B+p6UHouiiVTw= +go.opentelemetry.io/collector/processor v0.98.0/go.mod h1:QxgzjmJI12DQWN0LIHmZBOR7HRzPuVWFW4oqTdrS1ho= +go.opentelemetry.io/collector/processor/batchprocessor v0.98.0 h1:iM4fMLGig3GKmz5XNtOPKDsnCnvbi0+UHYaWsx/aSRc= +go.opentelemetry.io/collector/processor/batchprocessor v0.98.0/go.mod h1:ROnuUkZJgpKEIDf3AIVjgRGNI7KPqCKPXsw8whL6Hzs= +go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.98.0 h1:1jjiC3POfIluGLVM+6y8nolKEI95/vlHAvDmIOatags= +go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.98.0/go.mod h1:VQDDrhQbIoelGF+fKzy6vCQM3hWDCH2YFaZKqgTDmGk= +go.opentelemetry.io/collector/receiver v0.98.0 h1:qw6JYwm+sHcZvM1DByo3QlGe6yGHuwd0yW4hEPVqYKU= +go.opentelemetry.io/collector/receiver v0.98.0/go.mod h1:AwIWn+KnquTR+kbhXQrMH+i2PvTCFldSIJznBWFYs0s= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.98.0 h1:j7lfLwc5o1dtXIPXU8LjmxadejmJVRHN57ZYGH33Wq4= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.98.0/go.mod h1:uWDBDxaWuzF1U5S2UIhstO0+Q8aUiwiUu8uO1IYN2XQ= +go.opentelemetry.io/collector/semconv v0.98.0 h1:zO4L4TmlxXoYu8UgPeYElGY19BW7wPjM+quL5CzoOoY= +go.opentelemetry.io/collector/semconv v0.98.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A= +go.opentelemetry.io/collector/service v0.98.0 h1:lLJ7VXPXcV62fSISh4GuNd5ti6WvKje76NSgezc3ydo= +go.opentelemetry.io/collector/service v0.98.0/go.mod h1:wB7ozvZTHtMefb5KTYy5nyrVYWpGk8teq8jWFs4blIU= +go.opentelemetry.io/contrib/config v0.4.0 h1:Xb+ncYOqseLroMuBesGNRgVQolXcXOhMj7EhGwJCdHs= +go.opentelemetry.io/contrib/config v0.4.0/go.mod h1:drNk2xRqLWW4/amk6Uh1S+sDAJTc7bcEEN1GfJzj418= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.51.0 h1:A3SayB3rNyt+1S6qpI9mHPkeHTZbD7XILEqWnYZb2l0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.51.0/go.mod h1:27iA5uvhuRNmalO+iEUdVn5ZMj2qy10Mm+XRIpRmyuU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 h1:Xs2Ncz0gNihqu9iosIZ5SkBbWo5T8JhhLJFMQL1qmLI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0/go.mod h1:vy+2G/6NvVMpwGX/NyLqcC41fxepnuKHk16E6IZUcJc= +go.opentelemetry.io/contrib/propagators/b3 v1.25.0 h1:QU8UEKyPqgr/8vCC9LlDmkPnfFmiWAUF9GtJdcLz+BU= +go.opentelemetry.io/contrib/propagators/b3 v1.25.0/go.mod h1:qonC7wyvtX1E6cEpAR+bJmhcGr6IVRGc/f6ZTpvi7jA= +go.opentelemetry.io/contrib/zpages v0.50.0 h1:hKC5asr83xDN4ErwSHVdk3gv053pZiF8SZKmS86IPEw= +go.opentelemetry.io/contrib/zpages v0.50.0/go.mod h1:8WovRn95fZdaX/dr3e4h7D8IqiVsnZ+WxY0Yn4LyU3k= +go.opentelemetry.io/otel v1.26.0 h1:LQwgL5s/1W7YiiRwxf03QGnWLb2HW4pLiAhaA5cZXBs= +go.opentelemetry.io/otel v1.26.0/go.mod h1:UmLkJHUAidDval2EICqBMbnAd0/m2vmpf/dAM+fvFs4= +go.opentelemetry.io/otel/bridge/opencensus v1.25.0 h1:0o/9KwAgxjK+3pMV0pwIF5toYHqDsPmQhfrBvKaG6mU= +go.opentelemetry.io/otel/bridge/opencensus v1.25.0/go.mod h1:rZyTdpmRqoV+PpUn6QlruxJp/kE4765rPy0pP6mRDk8= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.25.0 h1:hDKnobznDpcdTlNzO0S/owRB8tyVr1OoeZZhDoqY+Cs= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.25.0/go.mod h1:kUDQaUs1h8iTIHbQTk+iJRiUvSfJYMMKTtMCaiVu7B0= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.25.0 h1:Wc4hZuYXhVqq+TfRXLXlmNIL/awOanGx8ssq3ciDQxc= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.25.0/go.mod h1:BydOvapRqVEc0DVz27qWBX2jq45Ca5TI9mhZBDIdweY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0 h1:1u/AyyOqAWzy+SkPxDpahCNZParHV8Vid1RnI2clyDE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0/go.mod h1:z46paqbJ9l7c9fIPCXTqTGwhQZ5XoTIsfeFYWboizjs= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.26.0 h1:Waw9Wfpo/IXzOI8bCB7DIk+0JZcqqsyn1JFnAc+iam8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.26.0/go.mod h1:wnJIG4fOqyynOnnQF/eQb4/16VlX2EJAHhHgqIqWfAo= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0 h1:1wp/gyxsuYtuE/JFxsQRtcCDtMrO2qMvlfXALU5wkzI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0/go.mod h1:gbTHmghkGgqxMomVQQMur1Nba4M0MQ8AYThXDUjsJ38= +go.opentelemetry.io/otel/exporters/prometheus v0.47.0 h1:OL6yk1Z/pEGdDnrBbxSsH+t4FY1zXfBRGd7bjwhlMLU= +go.opentelemetry.io/otel/exporters/prometheus v0.47.0/go.mod h1:xF3N4OSICZDVbbYZydz9MHFro1RjmkPUKEvar2utG+Q= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.25.0 h1:d7nHbdzU84STOiszaOxQ3kw5IwkSmHsU5Muol5/vL4I= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.25.0/go.mod h1:yiPA1iZbb/EHYnODXOxvtKuB0I2hV8ehfLTEWpl7BJU= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.26.0 h1:0W5o9SzoR15ocYHEQfvfipzcNog1lBxOLfnex91Hk6s= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.26.0/go.mod h1:zVZ8nz+VSggWmnh6tTsJqXQ7rU4xLwRtna1M4x5jq58= +go.opentelemetry.io/otel/metric v1.26.0 h1:7S39CLuY5Jgg9CrnA9HHiEjGMF/X2VHvoXGgSllRz30= +go.opentelemetry.io/otel/metric v1.26.0/go.mod h1:SY+rHOI4cEawI9a7N1A4nIg/nTQXe1ccCNWYOJUrpX4= +go.opentelemetry.io/otel/sdk v1.26.0 h1:Y7bumHf5tAiDlRYFmGqetNcLaVUZmh4iYfmGxtmz7F8= +go.opentelemetry.io/otel/sdk v1.26.0/go.mod h1:0p8MXpqLeJ0pzcszQQN4F0S5FVjBLgypeGSngLsmirs= +go.opentelemetry.io/otel/sdk/metric v1.25.0 h1:7CiHOy08LbrxMAp4vWpbiPcklunUshVpAvGBrdDRlGw= +go.opentelemetry.io/otel/sdk/metric v1.25.0/go.mod h1:LzwoKptdbBBdYfvtGCzGwk6GWMA3aUzBOwtQpR6Nz7o= +go.opentelemetry.io/otel/trace v1.26.0 h1:1ieeAUb4y0TE26jUFrCIXKpTuVK7uJGN9/Z/2LP5sQA= +go.opentelemetry.io/otel/trace v1.26.0/go.mod h1:4iDxvGDQuUkHve82hJJ8UqrwswHYsZuWCBllGV2U2y0= +go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= +go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= @@ -593,8 +602,8 @@ go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9i go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -604,16 +613,16 @@ golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc h1:ao2WRsKSzW6KuUY9IWPwWahcHCgR0s52IfwutMfEbdM= -golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= +golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= +golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -645,11 +654,11 @@ golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -657,8 +666,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= -golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -691,11 +700,12 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -727,8 +737,8 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= -gonum.org/v1/gonum v0.14.0 h1:2NiG67LD1tEH0D7kM+ps2V+fXmsAnpUeec7n8tcr4S0= -gonum.org/v1/gonum v0.14.0/go.mod h1:AoWeoz0becf9QMWtE8iWXNXc27fK4fNeHNf/oMejGfU= +gonum.org/v1/gonum v0.15.0 h1:2lYxjRbTYyxkJxlhC+LvJIx3SsANPdRybu1tGj9/OrQ= +gonum.org/v1/gonum v0.15.0/go.mod h1:xzZVBJBtS+Mz4q0Yl2LJTk+OxOg4jiXZ7qBoM0uISGo= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= @@ -740,12 +750,12 @@ google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20231212172506-995d672761c0 h1:YJ5pD9rF8o9Qtta0Cmy9rdBwkSjrTCT6XTiUQVOtIos= -google.golang.org/genproto v0.0.0-20231212172506-995d672761c0/go.mod h1:l/k7rMz0vFTBPy+tFSGvXEd3z+BcoG1k7EHbqm+YBsY= -google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917 h1:rcS6EyEaoCO52hQDupoSfrxI3R6C2Tq741is7X8OvnM= -google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917/go.mod h1:CmlNWB9lSezaYELKS5Ym1r44VrrbPUa7JTvw+6MbpJ0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 h1:6G8oQ016D88m1xAKljMlBOOGWDZkes4kMhgGFlf8WcQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917/go.mod h1:xtjpI3tXFPP051KaWnhvxkiubL/6dJ18vLVf7q2pTOU= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= +google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0= +google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= @@ -753,8 +763,8 @@ google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.61.1 h1:kLAiWrZs7YeDM6MumDe7m3y4aM6wacLzM1Y/wiLP9XY= -google.golang.org/grpc v1.61.1/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= +google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= +google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -764,10 +774,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= -google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/internal/grpctest/reflection.go b/internal/grpctest/reflection.go index 54a5138d3dc..8aba438531d 100644 --- a/internal/grpctest/reflection.go +++ b/internal/grpctest/reflection.go @@ -34,7 +34,7 @@ type ReflectionServiceValidator struct { // Execute performs validation. func (v ReflectionServiceValidator) Execute(t *testing.T) { - conn, err := grpc.Dial( + conn, err := grpc.NewClient( v.HostPort, grpc.WithTransportCredentials(insecure.NewCredentials())) require.NoError(t, err) diff --git a/internal/metricstest/local.go b/internal/metricstest/local.go index d725661bad3..7036b9ae3a7 100644 --- a/internal/metricstest/local.go +++ b/internal/metricstest/local.go @@ -392,3 +392,7 @@ func (l *Factory) Namespace(scope metrics.NSOptions) metrics.Factory { Backend: l.Backend, } } + +func (l *Factory) Stop() { + l.Backend.Stop() +} diff --git a/jaeger-ui b/jaeger-ui index f5cd649b095..f8a4ece2c01 160000 --- a/jaeger-ui +++ b/jaeger-ui @@ -1 +1 @@ -Subproject commit f5cd649b0954704a8ced8b13b71da02a31952260 +Subproject commit f8a4ece2c01769ac1126019fdca489f73ff4a1e8 diff --git a/model/adjuster/span_id_deduper.go b/model/adjuster/span_id_deduper.go index 7f42e3563e2..03f9b987cd0 100644 --- a/model/adjuster/span_id_deduper.go +++ b/model/adjuster/span_id_deduper.go @@ -17,6 +17,7 @@ package adjuster import ( "errors" + "math" "github.com/jaegertracing/jaeger/model" ) @@ -42,7 +43,7 @@ const ( warningTooManySpans = "cannot assign unique span ID, too many spans in the trace" ) -var maxSpanID = model.NewSpanID(0xffffffffffffffff) +var maxSpanID = model.NewSpanID(math.MaxUint64) type spanIDDeduper struct { trace *model.Trace diff --git a/pkg/cassandra/config/config.go b/pkg/cassandra/config/config.go index e960af81490..a55ed402756 100644 --- a/pkg/cassandra/config/config.go +++ b/pkg/cassandra/config/config.go @@ -19,6 +19,7 @@ import ( "fmt" "time" + "github.com/asaskevich/govalidator" "github.com/gocql/gocql" "go.uber.org/zap" @@ -29,21 +30,21 @@ import ( // Configuration describes the configuration properties needed to connect to a Cassandra cluster type Configuration struct { - Servers []string `validate:"nonzero" mapstructure:"servers"` - Keyspace string `validate:"nonzero" mapstructure:"keyspace"` - LocalDC string `yaml:"local_dc" mapstructure:"local_dc"` - ConnectionsPerHost int `validate:"min=1" yaml:"connections_per_host" mapstructure:"connections_per_host"` - Timeout time.Duration `validate:"min=500" mapstructure:"-"` - ConnectTimeout time.Duration `yaml:"connect_timeout" mapstructure:"connection_timeout"` - ReconnectInterval time.Duration `validate:"min=500" yaml:"reconnect_interval" mapstructure:"reconnect_interval"` - SocketKeepAlive time.Duration `validate:"min=0" yaml:"socket_keep_alive" mapstructure:"socket_keep_alive"` - MaxRetryAttempts int `validate:"min=0" yaml:"max_retry_attempt" mapstructure:"max_retry_attempts"` - ProtoVersion int `yaml:"proto_version" mapstructure:"proto_version"` - Consistency string `yaml:"consistency" mapstructure:"consistency"` - DisableCompression bool `yaml:"disable-compression" mapstructure:"disable_compression"` - Port int `yaml:"port" mapstructure:"port"` - Authenticator Authenticator `yaml:"authenticator" mapstructure:",squash"` - DisableAutoDiscovery bool `yaml:"disable_auto_discovery" mapstructure:"-"` + Servers []string `valid:"required,url" mapstructure:"servers"` + Keyspace string `valid:"nonzero" mapstructure:"keyspace"` + LocalDC string `mapstructure:"local_dc"` + ConnectionsPerHost int `valid:"min=1" mapstructure:"connections_per_host"` + Timeout time.Duration `valid:"min=500" mapstructure:"-"` + ConnectTimeout time.Duration `mapstructure:"connection_timeout"` + ReconnectInterval time.Duration `valid:"min=500" mapstructure:"reconnect_interval"` + SocketKeepAlive time.Duration `valid:"min=0" mapstructure:"socket_keep_alive"` + MaxRetryAttempts int `valid:"min=0" mapstructure:"max_retry_attempts"` + ProtoVersion int `mapstructure:"proto_version"` + Consistency string `mapstructure:"consistency"` + DisableCompression bool `mapstructure:"disable_compression"` + Port int `mapstructure:"port"` + Authenticator Authenticator `mapstructure:",squash"` + DisableAutoDiscovery bool `mapstructure:"-"` TLS tlscfg.Options `mapstructure:"tls"` } @@ -170,3 +171,8 @@ func (c *Configuration) Close() error { func (c *Configuration) String() string { return fmt.Sprintf("%+v", *c) } + +func (c *Configuration) Validate() error { + _, err := govalidator.ValidateStruct(c) + return err +} diff --git a/pkg/cassandra/gocql/testutils/udt_test.go b/pkg/cassandra/gocql/testutils/udt_test.go index 5934a4699ae..fcb2bac146e 100644 --- a/pkg/cassandra/gocql/testutils/udt_test.go +++ b/pkg/cassandra/gocql/testutils/udt_test.go @@ -8,7 +8,8 @@ import ( "github.com/gocql/gocql" - "github.com/jaegertracing/jaeger/pkg/cassandra/gocql/testutils" + gocqlutils "github.com/jaegertracing/jaeger/pkg/cassandra/gocql/testutils" + "github.com/jaegertracing/jaeger/pkg/testutils" ) // CustomUDT is a custom type that implements gocql.UDTMarshaler and gocql.UDTUnmarshaler interfaces. @@ -48,7 +49,7 @@ func TestUDTTestCase(t *testing.T) { } // Define UDT fields for testing - udtFields := []testutils.UDTField{ + udtFields := []gocqlutils.UDTField{ { Name: "Field1", Type: gocql.TypeBigInt, @@ -70,7 +71,7 @@ func TestUDTTestCase(t *testing.T) { } // Create a UDTTestCase - testCase := testutils.UDTTestCase{ + testCase := gocqlutils.UDTTestCase{ Obj: udtInstance, ObjName: "CustomUDT", New: func() gocql.UDTUnmarshaler { return &CustomUDT{} }, @@ -79,3 +80,7 @@ func TestUDTTestCase(t *testing.T) { testCase.Run(t) } + +func TestMain(m *testing.M) { + testutils.VerifyGoLeaks(m) +} diff --git a/pkg/clientcfg/clientcfghttp/cfgmgr_test.go b/pkg/clientcfg/clientcfghttp/cfgmgr_test.go index f6da22f7909..7c24bbeda24 100644 --- a/pkg/clientcfg/clientcfghttp/cfgmgr_test.go +++ b/pkg/clientcfg/clientcfghttp/cfgmgr_test.go @@ -37,6 +37,10 @@ func (m *mockSamplingStore) GetSamplingStrategy(_ context.Context, serviceName s return m.samplingResponse, nil } +func (m *mockSamplingStore) Close() error { + return nil +} + type mockBaggageMgr struct { baggageResponse []*baggage.BaggageRestriction } diff --git a/pkg/config/tlscfg/options.go b/pkg/config/tlscfg/options.go index aab23cd8db9..2707887831d 100644 --- a/pkg/config/tlscfg/options.go +++ b/pkg/config/tlscfg/options.go @@ -78,11 +78,10 @@ func (p *Options) Config(logger *zap.Logger) (*tls.Config, error) { } } - // #nosec G402 tlsCfg := &tls.Config{ RootCAs: certPool, ServerName: p.ServerName, - InsecureSkipVerify: p.SkipHostVerify, + InsecureSkipVerify: p.SkipHostVerify, /* #nosec G402*/ CipherSuites: cipherSuiteIds, MinVersion: minVersionId, MaxVersion: maxVersionId, diff --git a/pkg/discovery/grpcresolver/grpc_resolver_test.go b/pkg/discovery/grpcresolver/grpc_resolver_test.go index c0c64aa3de3..111798548fd 100644 --- a/pkg/discovery/grpcresolver/grpc_resolver_test.go +++ b/pkg/discovery/grpcresolver/grpc_resolver_test.go @@ -148,7 +148,7 @@ func TestGRPCResolverRoundRobin(t *testing.T) { t.Run(fmt.Sprintf("%+v", test), func(t *testing.T) { res := New(notifier, discoverer, zap.NewNop(), test.minPeers) - cc, err := grpc.Dial(res.Scheme()+":///round_robin", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultServiceConfig(GRPCServiceConfig)) + cc, err := grpc.NewClient(res.Scheme()+":///round_robin", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultServiceConfig(GRPCServiceConfig)) require.NoError(t, err, "could not dial using resolver's scheme") defer cc.Close() diff --git a/pkg/es/config/config.go b/pkg/es/config/config.go index 1549156cff6..3e0f44673ab 100644 --- a/pkg/es/config/config.go +++ b/pkg/es/config/config.go @@ -29,6 +29,7 @@ import ( "sync" "time" + "github.com/asaskevich/govalidator" esV8 "github.com/elastic/go-elasticsearch/v8" "github.com/olivere/elastic" "go.uber.org/zap" @@ -45,7 +46,7 @@ import ( // Configuration describes the configuration properties needed to connect to an ElasticSearch cluster type Configuration struct { - Servers []string `mapstructure:"server_urls"` + Servers []string `mapstructure:"server_urls" valid:"required,url"` RemoteReadClusters []string `mapstructure:"remote_read_clusters"` Username string `mapstructure:"username"` Password string `mapstructure:"password" json:"-"` @@ -54,14 +55,14 @@ type Configuration struct { AllowTokenFromContext bool `mapstructure:"-"` Sniffer bool `mapstructure:"sniffer"` // https://github.com/olivere/elastic/wiki/Sniffing SnifferTLSEnabled bool `mapstructure:"sniffer_tls_enabled"` - MaxDocCount int `mapstructure:"-"` // Defines maximum number of results to fetch from storage per query - MaxSpanAge time.Duration `yaml:"max_span_age" mapstructure:"-"` // configures the maximum lookback on span reads - NumShards int64 `yaml:"shards" mapstructure:"num_shards"` - NumReplicas int64 `yaml:"replicas" mapstructure:"num_replicas"` - PrioritySpanTemplate int64 `yaml:"priority_span_template" mapstructure:"priority_span_template"` - PriorityServiceTemplate int64 `yaml:"priority_service_template" mapstructure:"priority_service_template"` - PriorityDependenciesTemplate int64 `yaml:"priority_dependencies_template" mapstructure:"priority_dependencies_template"` - Timeout time.Duration `validate:"min=500" mapstructure:"-"` + MaxDocCount int `mapstructure:"-"` // Defines maximum number of results to fetch from storage per query + MaxSpanAge time.Duration `mapstructure:"-"` // configures the maximum lookback on span reads + NumShards int64 `mapstructure:"num_shards"` + NumReplicas int64 `mapstructure:"num_replicas"` + PrioritySpanTemplate int64 `mapstructure:"priority_span_template"` + PriorityServiceTemplate int64 `mapstructure:"priority_service_template"` + PriorityDependenciesTemplate int64 `mapstructure:"priority_dependencies_template"` + Timeout time.Duration `mapstructure:"-"` BulkSize int `mapstructure:"-"` BulkWorkers int `mapstructure:"-"` BulkActions int `mapstructure:"-"` @@ -69,9 +70,12 @@ type Configuration struct { IndexPrefix string `mapstructure:"index_prefix"` IndexDateLayoutSpans string `mapstructure:"-"` IndexDateLayoutServices string `mapstructure:"-"` + IndexDateLayoutSampling string `mapstructure:"-"` IndexDateLayoutDependencies string `mapstructure:"-"` IndexRolloverFrequencySpans string `mapstructure:"-"` IndexRolloverFrequencyServices string `mapstructure:"-"` + IndexRolloverFrequencySampling string `mapstructure:"-"` + AdaptiveSamplingLookback time.Duration `mapstructure:"-"` Tags TagsAsFields `mapstructure:"tags_as_fields"` Enabled bool `mapstructure:"-"` TLS tlscfg.Options `mapstructure:"tls"` @@ -230,6 +234,9 @@ func (c *Configuration) ApplyDefaults(source *Configuration) { if c.MaxSpanAge == 0 { c.MaxSpanAge = source.MaxSpanAge } + if c.AdaptiveSamplingLookback == 0 { + c.AdaptiveSamplingLookback = source.AdaptiveSamplingLookback + } if c.NumShards == 0 { c.NumShards = source.NumShards } @@ -285,15 +292,22 @@ func (c *Configuration) ApplyDefaults(source *Configuration) { // GetIndexRolloverFrequencySpansDuration returns jaeger-span index rollover frequency duration func (c *Configuration) GetIndexRolloverFrequencySpansDuration() time.Duration { - if c.IndexRolloverFrequencySpans == "hour" { - return -1 * time.Hour - } - return -24 * time.Hour + return getIndexRolloverFrequencyDuration(c.IndexRolloverFrequencySpans) } // GetIndexRolloverFrequencyServicesDuration returns jaeger-service index rollover frequency duration func (c *Configuration) GetIndexRolloverFrequencyServicesDuration() time.Duration { - if c.IndexRolloverFrequencyServices == "hour" { + return getIndexRolloverFrequencyDuration(c.IndexRolloverFrequencyServices) +} + +// GetIndexRolloverFrequencySamplingDuration returns jaeger-sampling index rollover frequency duration +func (c *Configuration) GetIndexRolloverFrequencySamplingDuration() time.Duration { + return getIndexRolloverFrequencyDuration(c.IndexRolloverFrequencySampling) +} + +// GetIndexRolloverFrequencyDuration returns the index rollover frequency duration for the given frequency string +func getIndexRolloverFrequencyDuration(frequency string) time.Duration { + if frequency == "hour" { return -1 * time.Hour } return -24 * time.Hour @@ -467,3 +481,8 @@ func loadTokenFromFile(path string) (string, error) { } return strings.TrimRight(string(b), "\r\n"), nil } + +func (c *Configuration) Validate() error { + _, err := govalidator.ValidateStruct(c) + return err +} diff --git a/pkg/kafka/consumer/config.go b/pkg/kafka/consumer/config.go index 8c849b6b228..402ffeedd56 100644 --- a/pkg/kafka/consumer/config.go +++ b/pkg/kafka/consumer/config.go @@ -42,13 +42,14 @@ type Configuration struct { auth.AuthenticationConfig `mapstructure:"authentication"` Consumer - Brokers []string `mapstructure:"brokers"` - Topic string `mapstructure:"topic"` - InitialOffset int64 - GroupID string `mapstructure:"group_id"` - ClientID string `mapstructure:"client_id"` - ProtocolVersion string `mapstructure:"protocol_version"` - RackID string `mapstructure:"rack_id"` + Brokers []string `mapstructure:"brokers"` + Topic string `mapstructure:"topic"` + InitialOffset int64 + GroupID string `mapstructure:"group_id"` + ClientID string `mapstructure:"client_id"` + ProtocolVersion string `mapstructure:"protocol_version"` + RackID string `mapstructure:"rack_id"` + FetchMaxMessageBytes int32 `mapstructure:"fetch_max_message_bytes"` } // NewConsumer creates a new kafka consumer @@ -57,6 +58,7 @@ func (c *Configuration) NewConsumer(logger *zap.Logger) (Consumer, error) { saramaConfig.Group.Mode = cluster.ConsumerModePartitions saramaConfig.ClientID = c.ClientID saramaConfig.RackID = c.RackID + saramaConfig.Consumer.Fetch.Default = c.FetchMaxMessageBytes if len(c.ProtocolVersion) > 0 { ver, err := sarama.ParseKafkaVersion(c.ProtocolVersion) if err != nil { diff --git a/pkg/kafka/producer/config.go b/pkg/kafka/producer/config.go index f12b09dc2f8..6e9cfa53cd1 100644 --- a/pkg/kafka/producer/config.go +++ b/pkg/kafka/producer/config.go @@ -39,6 +39,7 @@ type Configuration struct { BatchSize int `mapstructure:"batch_size"` BatchMinMessages int `mapstructure:"batch_min_messages"` BatchMaxMessages int `mapstructure:"batch_max_messages"` + MaxMessageBytes int `mapstructure:"max_message_bytes"` auth.AuthenticationConfig `mapstructure:"authentication"` } @@ -53,6 +54,7 @@ func (c *Configuration) NewProducer(logger *zap.Logger) (sarama.AsyncProducer, e saramaConfig.Producer.Flush.Frequency = c.BatchLinger saramaConfig.Producer.Flush.Messages = c.BatchMinMessages saramaConfig.Producer.Flush.MaxMessages = c.BatchMaxMessages + saramaConfig.Producer.MaxMessageBytes = c.MaxMessageBytes if len(c.ProtocolVersion) > 0 { ver, err := sarama.ParseKafkaVersion(c.ProtocolVersion) if err != nil { diff --git a/pkg/testutils/leakcheck_test.go b/pkg/testutils/leakcheck_test.go index b13858c8c30..fb164729dbf 100644 --- a/pkg/testutils/leakcheck_test.go +++ b/pkg/testutils/leakcheck_test.go @@ -12,16 +12,18 @@ // See the License for the specific language governing permissions and // limitations under the License. -package testutils +package testutils_test import ( "testing" + + "github.com/jaegertracing/jaeger/pkg/testutils" ) func TestVerifyGoLeaksOnce(t *testing.T) { - defer VerifyGoLeaksOnce(t) + defer testutils.VerifyGoLeaksOnce(t) } func TestMain(m *testing.M) { - VerifyGoLeaks(m) + testutils.VerifyGoLeaks(m) } diff --git a/plugin/metrics/prometheus/metricsstore/reader_test.go b/plugin/metrics/prometheus/metricsstore/reader_test.go index 2c7066cb200..c5199a684ac 100644 --- a/plugin/metrics/prometheus/metricsstore/reader_test.go +++ b/plugin/metrics/prometheus/metricsstore/reader_test.go @@ -38,6 +38,7 @@ import ( "github.com/jaegertracing/jaeger/pkg/bearertoken" "github.com/jaegertracing/jaeger/pkg/config/tlscfg" "github.com/jaegertracing/jaeger/pkg/prometheus/config" + "github.com/jaegertracing/jaeger/pkg/testutils" "github.com/jaegertracing/jaeger/proto-gen/api_v2/metrics" "github.com/jaegertracing/jaeger/storage/metricsstore" ) @@ -753,13 +754,15 @@ func TestGetRoundTripperTLSConfig(t *testing.T) { } { t.Run(tc.name, func(t *testing.T) { logger := zap.NewNop() - rt, err := getHTTPRoundTripper(&config.Configuration{ + config := &config.Configuration{ ConnectTimeout: 9 * time.Millisecond, TLS: tlscfg.Options{ Enabled: tc.tlsEnabled, }, TokenOverrideFromContext: true, - }, logger) + } + defer config.TLS.Close() + rt, err := getHTTPRoundTripper(config, logger) require.NoError(t, err) server := newFakePromServer(t) @@ -965,3 +968,7 @@ func assertMetrics(t *testing.T, gotMetrics *metrics.MetricFamily, wantLabels ma actualVal := mps[0].Value.(*metrics.MetricPoint_GaugeValue).GaugeValue.Value.(*metrics.GaugeValue_DoubleValue).DoubleValue assert.Equal(t, float64(9223372036854), actualVal) } + +func TestMain(m *testing.M) { + testutils.VerifyGoLeaks(m) +} diff --git a/plugin/sampling/strategystore/adaptive/factory_test.go b/plugin/sampling/strategystore/adaptive/factory_test.go index 71a7bf0bb60..99c02b8c1f2 100644 --- a/plugin/sampling/strategystore/adaptive/factory_test.go +++ b/plugin/sampling/strategystore/adaptive/factory_test.go @@ -72,8 +72,10 @@ func TestFactory(t *testing.T) { assert.Equal(t, time.Second*2, f.options.FollowerLeaseRefreshInterval) require.NoError(t, f.Initialize(metrics.NullFactory, &mockSamplingStoreFactory{}, zap.NewNop())) - _, _, err := f.CreateStrategyStore() + store, aggregator, err := f.CreateStrategyStore() require.NoError(t, err) + require.NoError(t, store.Close()) + require.NoError(t, aggregator.Close()) } func TestBadConfigFail(t *testing.T) { diff --git a/plugin/sampling/strategystore/adaptive/options.go b/plugin/sampling/strategystore/adaptive/options.go index f1bc2eddc6a..64288632dbf 100644 --- a/plugin/sampling/strategystore/adaptive/options.go +++ b/plugin/sampling/strategystore/adaptive/options.go @@ -116,7 +116,7 @@ type Options struct { // AddFlags adds flags for Options func AddFlags(flagSet *flag.FlagSet) { flagSet.Float64(targetSamplesPerSecond, defaultTargetSamplesPerSecond, - "The the global target rate of samples per operation.", + "The global target rate of samples per operation.", ) flagSet.Float64(deltaTolerance, defaultDeltaTolerance, "The acceptable amount of deviation between the observed samples-per-second and the desired (target) samples-per-second, expressed as a ratio.", diff --git a/plugin/sampling/strategystore/adaptive/package_test.go b/plugin/sampling/strategystore/adaptive/package_test.go new file mode 100644 index 00000000000..64db46d18bc --- /dev/null +++ b/plugin/sampling/strategystore/adaptive/package_test.go @@ -0,0 +1,14 @@ +// Copyright (c) 2024 The Jaeger Authors. +// SPDX-License-Identifier: Apache-2.0 + +package adaptive + +import ( + "testing" + + "github.com/jaegertracing/jaeger/pkg/testutils" +) + +func TestMain(m *testing.M) { + testutils.VerifyGoLeaks(m) +} diff --git a/plugin/sampling/strategystore/adaptive/processor.go b/plugin/sampling/strategystore/adaptive/processor.go index a87e98f8fc9..7ac5c1834fa 100644 --- a/plugin/sampling/strategystore/adaptive/processor.go +++ b/plugin/sampling/strategystore/adaptive/processor.go @@ -110,7 +110,8 @@ type Processor struct { serviceCache []SamplingCache - shutdown chan struct{} + shutdown chan struct{} + bgFinished sync.WaitGroup operationsCalculatedGauge metrics.Gauge calculateProbabilitiesLatency metrics.Timer @@ -170,19 +171,28 @@ func (p *Processor) Start() error { p.shutdown = make(chan struct{}) p.loadProbabilities() p.generateStrategyResponses() - go p.runCalculationLoop() - go p.runUpdateProbabilitiesLoop() + p.runBackground(p.runCalculationLoop) + p.runBackground(p.runUpdateProbabilitiesLoop) return nil } +func (p *Processor) runBackground(f func()) { + p.bgFinished.Add(1) + go func() { + f() + p.bgFinished.Done() + }() +} + // Close stops the processor from calculating probabilities. func (p *Processor) Close() error { p.logger.Info("stopping adaptive sampling processor") - if err := p.electionParticipant.Close(); err != nil { - return err + err := p.electionParticipant.Close() + if p.shutdown != nil { + close(p.shutdown) } - close(p.shutdown) - return nil + p.bgFinished.Wait() + return err } func (p *Processor) loadProbabilities() { @@ -200,7 +210,12 @@ func (p *Processor) loadProbabilities() { // runUpdateProbabilitiesLoop is a loop that reads probabilities from storage. // The follower updates its local cache with the latest probabilities and serves them. func (p *Processor) runUpdateProbabilitiesLoop() { - addJitter(p.followerRefreshInterval) + select { + case <-time.After(addJitter(p.followerRefreshInterval)): + case <-p.shutdown: + return + } + ticker := time.NewTicker(p.followerRefreshInterval) defer ticker.Stop() for { @@ -221,13 +236,12 @@ func (p *Processor) isLeader() bool { return p.electionParticipant.IsLeader() } -// addJitter sleeps for a random amount of time. Without jitter, if the host holding the leader +// addJitter adds a random amount of time. Without jitter, if the host holding the leader // lock were to die, then all other collectors can potentially wait for a full cycle before // trying to acquire the lock. With jitter, we can reduce the average amount of time before a // new leader is elected. Furthermore, jitter can be used to spread out read load on storage. -func addJitter(jitterAmount time.Duration) { - delay := (jitterAmount / 2) + time.Duration(rand.Int63n(int64(jitterAmount/2))) - time.Sleep(delay) +func addJitter(jitterAmount time.Duration) time.Duration { + return (jitterAmount / 2) + time.Duration(rand.Int63n(int64(jitterAmount/2))) } func (p *Processor) runCalculationLoop() { @@ -272,7 +286,7 @@ func (p *Processor) runCalculationLoop() { // be way longer than the time to run the calculations. p.generateStrategyResponses() p.calculateProbabilitiesLatency.Record(time.Since(startTime)) - go p.saveProbabilitiesAndQPS() + p.runBackground(p.saveProbabilitiesAndQPS) } case <-p.shutdown: return diff --git a/plugin/sampling/strategystore/adaptive/processor_test.go b/plugin/sampling/strategystore/adaptive/processor_test.go index c63ea246a71..03ae6b7b7bb 100644 --- a/plugin/sampling/strategystore/adaptive/processor_test.go +++ b/plugin/sampling/strategystore/adaptive/processor_test.go @@ -449,6 +449,7 @@ func TestRunUpdateProbabilitiesLoop(t *testing.T) { p.RLock() assert.NotNil(t, p.probabilities) assert.NotNil(t, p.strategyResponses) + p.RUnlock() } func TestRealisticRunCalculationLoop(t *testing.T) { @@ -880,6 +881,7 @@ func TestErrors(t *testing.T) { p, err := newProcessor(cfg, "host", mockStorage, mockEP, metrics.NullFactory, zap.NewNop()) require.NoError(t, err) require.Error(t, p.Start()) + require.Error(t, p.Close()) // close errors mockEP = &epmocks.ElectionParticipant{} diff --git a/plugin/sampling/strategystore/static/fixtures/TestServiceNoPerOperationStrategiesDeprecatedBehavior_ServiceA.json b/plugin/sampling/strategystore/static/fixtures/TestServiceNoPerOperationStrategiesDeprecatedBehavior_ServiceA.json new file mode 100644 index 00000000000..6834df079eb --- /dev/null +++ b/plugin/sampling/strategystore/static/fixtures/TestServiceNoPerOperationStrategiesDeprecatedBehavior_ServiceA.json @@ -0,0 +1,16 @@ +{ + "probabilisticSampling": { + "samplingRate": 1 + }, + "operationSampling": { + "defaultSamplingProbability": 1, + "perOperationStrategies": [ + { + "operation": "/health", + "probabilisticSampling": { + "samplingRate": 0.1 + } + } + ] + } +} \ No newline at end of file diff --git a/plugin/sampling/strategystore/static/fixtures/TestServiceNoPerOperationStrategiesDeprecatedBehavior_ServiceB.json b/plugin/sampling/strategystore/static/fixtures/TestServiceNoPerOperationStrategiesDeprecatedBehavior_ServiceB.json new file mode 100644 index 00000000000..56e51c78391 --- /dev/null +++ b/plugin/sampling/strategystore/static/fixtures/TestServiceNoPerOperationStrategiesDeprecatedBehavior_ServiceB.json @@ -0,0 +1,6 @@ +{ + "strategyType": 1, + "rateLimitingSampling": { + "maxTracesPerSecond": 3 + } +} \ No newline at end of file diff --git a/plugin/sampling/strategystore/static/fixtures/TestServiceNoPerOperationStrategies_ServiceA.json b/plugin/sampling/strategystore/static/fixtures/TestServiceNoPerOperationStrategies_ServiceA.json new file mode 100644 index 00000000000..6834df079eb --- /dev/null +++ b/plugin/sampling/strategystore/static/fixtures/TestServiceNoPerOperationStrategies_ServiceA.json @@ -0,0 +1,16 @@ +{ + "probabilisticSampling": { + "samplingRate": 1 + }, + "operationSampling": { + "defaultSamplingProbability": 1, + "perOperationStrategies": [ + { + "operation": "/health", + "probabilisticSampling": { + "samplingRate": 0.1 + } + } + ] + } +} \ No newline at end of file diff --git a/plugin/sampling/strategystore/static/fixtures/TestServiceNoPerOperationStrategies_ServiceB.json b/plugin/sampling/strategystore/static/fixtures/TestServiceNoPerOperationStrategies_ServiceB.json new file mode 100644 index 00000000000..cc28f904fef --- /dev/null +++ b/plugin/sampling/strategystore/static/fixtures/TestServiceNoPerOperationStrategies_ServiceB.json @@ -0,0 +1,17 @@ +{ + "strategyType": 1, + "rateLimitingSampling": { + "maxTracesPerSecond": 3 + }, + "operationSampling": { + "defaultSamplingProbability": 0.2, + "perOperationStrategies": [ + { + "operation": "/health", + "probabilisticSampling": { + "samplingRate": 0.1 + } + } + ] + } +} \ No newline at end of file diff --git a/plugin/sampling/strategystore/static/fixtures/service_no_per_operation.json b/plugin/sampling/strategystore/static/fixtures/service_no_per_operation.json index 979c5639027..29b50d9f4d3 100644 --- a/plugin/sampling/strategystore/static/fixtures/service_no_per_operation.json +++ b/plugin/sampling/strategystore/static/fixtures/service_no_per_operation.json @@ -1,25 +1,25 @@ - { - "service_strategies": [ - { - "service": "ServiceA", - "type": "probabilistic", - "param": 1.0 - }, - { - "service": "ServiceB", - "type": "ratelimiting", - "param": 3 - } - ], - "default_strategy": { - "type": "probabilistic", - "param": 0.2, - "operation_strategies": [ - { - "operation": "/health", - "type": "probabilistic", - "param": 0.0 - } - ] - } - } +{ + "service_strategies": [ + { + "service": "ServiceA", + "type": "probabilistic", + "param": 1.0 + }, + { + "service": "ServiceB", + "type": "ratelimiting", + "param": 3 + } + ], + "default_strategy": { + "type": "probabilistic", + "param": 0.2, + "operation_strategies": [ + { + "operation": "/health", + "type": "probabilistic", + "param": 0.1 + } + ] + } +} diff --git a/plugin/sampling/strategystore/static/options.go b/plugin/sampling/strategystore/static/options.go index f77ac0002b8..73c8bb801b7 100644 --- a/plugin/sampling/strategystore/static/options.go +++ b/plugin/sampling/strategystore/static/options.go @@ -25,6 +25,7 @@ const ( // samplingStrategiesFile contains the name of CLI option for config file. samplingStrategiesFile = "sampling.strategies-file" samplingStrategiesReloadInterval = "sampling.strategies-reload-interval" + samplingStrategiesBugfix5270 = "sampling.strategies.bugfix-5270" ) // Options holds configuration for the static sampling strategy store. @@ -33,17 +34,23 @@ type Options struct { StrategiesFile string // ReloadInterval is the time interval to check and reload sampling strategies file ReloadInterval time.Duration + // Flag for enabling possibly breaking change which includes default operations level + // strategies when calculating Ratelimiting type service level strategy + // more information https://github.com/jaegertracing/jaeger/issues/5270 + IncludeDefaultOpStrategies bool } // AddFlags adds flags for Options func AddFlags(flagSet *flag.FlagSet) { flagSet.Duration(samplingStrategiesReloadInterval, 0, "Reload interval to check and reload sampling strategies file. Zero value means no reloading") flagSet.String(samplingStrategiesFile, "", "The path for the sampling strategies file in JSON format. See sampling documentation to see format of the file") + flagSet.Bool(samplingStrategiesBugfix5270, false, "Include default operation level strategies for Ratesampling type service level strategy. Cf. https://github.com/jaegertracing/jaeger/issues/5270") } // InitFromViper initializes Options with properties from viper func (opts *Options) InitFromViper(v *viper.Viper) *Options { opts.StrategiesFile = v.GetString(samplingStrategiesFile) opts.ReloadInterval = v.GetDuration(samplingStrategiesReloadInterval) + opts.IncludeDefaultOpStrategies = v.GetBool(samplingStrategiesBugfix5270) return opts } diff --git a/plugin/sampling/strategystore/static/strategy_store.go b/plugin/sampling/strategystore/static/strategy_store.go index cf8544630e9..fdf9959a9d8 100644 --- a/plugin/sampling/strategystore/static/strategy_store.go +++ b/plugin/sampling/strategystore/static/strategy_store.go @@ -43,6 +43,8 @@ type strategyStore struct { storedStrategies atomic.Value // holds *storedStrategies cancelFunc context.CancelFunc + + options Options } type storedStrategies struct { @@ -58,11 +60,12 @@ func NewStrategyStore(options Options, logger *zap.Logger) (ss.StrategyStore, er h := &strategyStore{ logger: logger, cancelFunc: cancelFunc, + options: options, } h.storedStrategies.Store(defaultStrategies()) if options.StrategiesFile == "" { - h.parseStrategies(nil) + h.logger.Info("No sampling strategies source provided, using defaults") return h, nil } @@ -70,8 +73,19 @@ func NewStrategyStore(options Options, logger *zap.Logger) (ss.StrategyStore, er strategies, err := loadStrategies(loadFn) if err != nil { return nil, err + } else if strategies == nil { + h.logger.Info("No sampling strategies found or URL is unavailable, using defaults") + return h, nil + } + + if !h.options.IncludeDefaultOpStrategies { + h.logger.Warn("Default operations level strategies will not be included for Ratelimiting service strategies." + + "This behavior will be changed in future releases. " + + "Cf. https://github.com/jaegertracing/jaeger/issues/5270") + h.parseStrategies_deprecated(strategies) + } else { + h.parseStrategies(strategies) } - h.parseStrategies(strategies) if options.ReloadInterval > 0 { go h.autoUpdateStrategies(ctx, options.ReloadInterval, loadFn) @@ -91,8 +105,9 @@ func (h *strategyStore) GetSamplingStrategy(_ context.Context, serviceName strin } // Close stops updating the strategies -func (h *strategyStore) Close() { +func (h *strategyStore) Close() error { h.cancelFunc() + return nil } func (h *strategyStore) downloadSamplingStrategies(url string) ([]byte, error) { @@ -205,11 +220,7 @@ func loadStrategies(loadFn strategyLoader) (*strategies, error) { return strategies, nil } -func (h *strategyStore) parseStrategies(strategies *strategies) { - if strategies == nil { - h.logger.Info("No sampling strategies provided or URL is unavailable, using defaults") - return - } +func (h *strategyStore) parseStrategies_deprecated(strategies *strategies) { newStore := defaultStrategies() if strategies.DefaultStrategy != nil { newStore.defaultStrategy = h.parseServiceStrategies(strategies.DefaultStrategy) @@ -248,6 +259,45 @@ func (h *strategyStore) parseStrategies(strategies *strategies) { h.storedStrategies.Store(newStore) } +func (h *strategyStore) parseStrategies(strategies *strategies) { + newStore := defaultStrategies() + if strategies.DefaultStrategy != nil { + newStore.defaultStrategy = h.parseServiceStrategies(strategies.DefaultStrategy) + } + + for _, s := range strategies.ServiceStrategies { + newStore.serviceStrategies[s.Service] = h.parseServiceStrategies(s) + + // Config for this service may not have per-operation strategies, + // but if the default strategy has them they should still apply. + + if newStore.defaultStrategy.OperationSampling == nil { + // Default strategy doens't have them either, nothing to do. + continue + } + + opS := newStore.serviceStrategies[s.Service].OperationSampling + if opS == nil { + + // Service does not have its own per-operation rules, so copy (by value) from the default strategy. + newOpS := *newStore.defaultStrategy.OperationSampling + + // If the service's own default is probabilistic, then its sampling rate should take precedence. + if newStore.serviceStrategies[s.Service].ProbabilisticSampling != nil { + newOpS.DefaultSamplingProbability = newStore.serviceStrategies[s.Service].ProbabilisticSampling.SamplingRate + } + newStore.serviceStrategies[s.Service].OperationSampling = &newOpS + continue + } + + // If the service did have its own per-operation strategies, then merge them with the default ones. + opS.PerOperationStrategies = mergePerOperationSamplingStrategies( + opS.PerOperationStrategies, + newStore.defaultStrategy.OperationSampling.PerOperationStrategies) + } + h.storedStrategies.Store(newStore) +} + // mergePerOperationSamplingStrategies merges two operation strategies a and b, where a takes precedence over b. func mergePerOperationSamplingStrategies( a, b []*api_v2.OperationSamplingStrategy, diff --git a/plugin/sampling/strategystore/static/strategy_store_test.go b/plugin/sampling/strategystore/static/strategy_store_test.go index 063c28c7b81..f499baa5a7e 100644 --- a/plugin/sampling/strategystore/static/strategy_store_test.go +++ b/plugin/sampling/strategystore/static/strategy_store_test.go @@ -16,10 +16,12 @@ package static import ( "context" + "encoding/json" "fmt" "net/http" "net/http/httptest" "os" + "path/filepath" "strings" "sync/atomic" "testing" @@ -34,26 +36,33 @@ import ( "github.com/jaegertracing/jaeger/proto-gen/api_v2" ) +const snapshotLocation = "./fixtures/" + +// Snapshots can be regenerated via: +// +// REGENERATE_SNAPSHOTS=true go test -v ./plugin/sampling/strategystore/static/strategy_store_test.go +var regenerateSnapshots = os.Getenv("REGENERATE_SNAPSHOTS") == "true" + // strategiesJSON returns the strategy with // a given probability. func strategiesJSON(probability float32) string { strategy := fmt.Sprintf(` { "default_strategy": { - "type": "probabilistic", - "param": 0.5 - }, - "service_strategies": [ - { - "service": "foo", "type": "probabilistic", - "param": %.1f + "param": 0.5 }, - { - "service": "bar", - "type": "ratelimiting", - "param": 5 - } + "service_strategies": [ + { + "service": "foo", + "type": "probabilistic", + "param": %.1f + }, + { + "service": "bar", + "type": "ratelimiting", + "param": 5 + } ] } `, @@ -107,7 +116,7 @@ func TestStrategyStoreWithFile(t *testing.T) { logger, buf := testutils.NewLogger() store, err := NewStrategyStore(Options{}, logger) require.NoError(t, err) - assert.Contains(t, buf.String(), "No sampling strategies provided or URL is unavailable, using defaults") + assert.Contains(t, buf.String(), "No sampling strategies source provided, using defaults") s, err := store.GetSamplingStrategy(context.Background(), "foo") require.NoError(t, err) assert.EqualValues(t, makeResponse(api_v2.SamplingStrategyType_PROBABILISTIC, 0.001), *s) @@ -134,7 +143,7 @@ func TestStrategyStoreWithURL(t *testing.T) { mockServer, _ := mockStrategyServer(t) store, err := NewStrategyStore(Options{StrategiesFile: mockServer.URL + "/service-unavailable"}, logger) require.NoError(t, err) - assert.Contains(t, buf.String(), "No sampling strategies provided or URL is unavailable, using defaults") + assert.Contains(t, buf.String(), "No sampling strategies found or URL is unavailable, using defaults") s, err := store.GetSamplingStrategy(context.Background(), "foo") require.NoError(t, err) assert.EqualValues(t, makeResponse(api_v2.SamplingStrategyType_PROBABILISTIC, 0.001), *s) @@ -153,84 +162,96 @@ func TestStrategyStoreWithURL(t *testing.T) { } func TestPerOperationSamplingStrategies(t *testing.T) { - logger, buf := testutils.NewLogger() - store, err := NewStrategyStore(Options{StrategiesFile: "fixtures/operation_strategies.json"}, logger) - assert.Contains(t, buf.String(), "Operation strategies only supports probabilistic sampling at the moment,"+ - "'op2' defaulting to probabilistic sampling with probability 0.8") - assert.Contains(t, buf.String(), "Operation strategies only supports probabilistic sampling at the moment,"+ - "'op4' defaulting to probabilistic sampling with probability 0.001") - require.NoError(t, err) - - expected := makeResponse(api_v2.SamplingStrategyType_PROBABILISTIC, 0.8) - - s, err := store.GetSamplingStrategy(context.Background(), "foo") - require.NoError(t, err) - assert.Equal(t, api_v2.SamplingStrategyType_PROBABILISTIC, s.StrategyType) - assert.Equal(t, *expected.ProbabilisticSampling, *s.ProbabilisticSampling) - - require.NotNil(t, s.OperationSampling) - os := s.OperationSampling - assert.EqualValues(t, 0.8, os.DefaultSamplingProbability) - require.Len(t, os.PerOperationStrategies, 4) - - assert.Equal(t, "op6", os.PerOperationStrategies[0].Operation) - assert.EqualValues(t, 0.5, os.PerOperationStrategies[0].ProbabilisticSampling.SamplingRate) - assert.Equal(t, "op1", os.PerOperationStrategies[1].Operation) - assert.EqualValues(t, 0.2, os.PerOperationStrategies[1].ProbabilisticSampling.SamplingRate) - assert.Equal(t, "op0", os.PerOperationStrategies[2].Operation) - assert.EqualValues(t, 0.2, os.PerOperationStrategies[2].ProbabilisticSampling.SamplingRate) - assert.Equal(t, "op7", os.PerOperationStrategies[3].Operation) - assert.EqualValues(t, 1, os.PerOperationStrategies[3].ProbabilisticSampling.SamplingRate) - - expected = makeResponse(api_v2.SamplingStrategyType_RATE_LIMITING, 5) + tests := []struct { + options Options + }{ + {Options{StrategiesFile: "fixtures/operation_strategies.json"}}, + {Options{ + StrategiesFile: "fixtures/operation_strategies.json", + IncludeDefaultOpStrategies: true, + }}, + } - s, err = store.GetSamplingStrategy(context.Background(), "bar") - require.NoError(t, err) - assert.Equal(t, api_v2.SamplingStrategyType_RATE_LIMITING, s.StrategyType) - assert.Equal(t, *expected.RateLimitingSampling, *s.RateLimitingSampling) + for _, tc := range tests { + logger, buf := testutils.NewLogger() + store, err := NewStrategyStore(tc.options, logger) + assert.Contains(t, buf.String(), "Operation strategies only supports probabilistic sampling at the moment,"+ + "'op2' defaulting to probabilistic sampling with probability 0.8") + assert.Contains(t, buf.String(), "Operation strategies only supports probabilistic sampling at the moment,"+ + "'op4' defaulting to probabilistic sampling with probability 0.001") + require.NoError(t, err) - require.NotNil(t, s.OperationSampling) - os = s.OperationSampling - assert.EqualValues(t, 0.001, os.DefaultSamplingProbability) - require.Len(t, os.PerOperationStrategies, 5) - assert.Equal(t, "op3", os.PerOperationStrategies[0].Operation) - assert.EqualValues(t, 0.3, os.PerOperationStrategies[0].ProbabilisticSampling.SamplingRate) - assert.Equal(t, "op5", os.PerOperationStrategies[1].Operation) - assert.EqualValues(t, 0.4, os.PerOperationStrategies[1].ProbabilisticSampling.SamplingRate) - assert.Equal(t, "op0", os.PerOperationStrategies[2].Operation) - assert.EqualValues(t, 0.2, os.PerOperationStrategies[2].ProbabilisticSampling.SamplingRate) - assert.Equal(t, "op6", os.PerOperationStrategies[3].Operation) - assert.EqualValues(t, 0, os.PerOperationStrategies[3].ProbabilisticSampling.SamplingRate) - assert.Equal(t, "op7", os.PerOperationStrategies[4].Operation) - assert.EqualValues(t, 1, os.PerOperationStrategies[4].ProbabilisticSampling.SamplingRate) + expected := makeResponse(api_v2.SamplingStrategyType_PROBABILISTIC, 0.8) - s, err = store.GetSamplingStrategy(context.Background(), "default") - require.NoError(t, err) - expectedRsp := makeResponse(api_v2.SamplingStrategyType_PROBABILISTIC, 0.5) - expectedRsp.OperationSampling = &api_v2.PerOperationSamplingStrategies{ - DefaultSamplingProbability: 0.5, - PerOperationStrategies: []*api_v2.OperationSamplingStrategy{ - { - Operation: "op0", - ProbabilisticSampling: &api_v2.ProbabilisticSamplingStrategy{ - SamplingRate: 0.2, + s, err := store.GetSamplingStrategy(context.Background(), "foo") + require.NoError(t, err) + assert.Equal(t, api_v2.SamplingStrategyType_PROBABILISTIC, s.StrategyType) + assert.Equal(t, *expected.ProbabilisticSampling, *s.ProbabilisticSampling) + + require.NotNil(t, s.OperationSampling) + os := s.OperationSampling + assert.EqualValues(t, 0.8, os.DefaultSamplingProbability) + require.Len(t, os.PerOperationStrategies, 4) + + assert.Equal(t, "op6", os.PerOperationStrategies[0].Operation) + assert.EqualValues(t, 0.5, os.PerOperationStrategies[0].ProbabilisticSampling.SamplingRate) + assert.Equal(t, "op1", os.PerOperationStrategies[1].Operation) + assert.EqualValues(t, 0.2, os.PerOperationStrategies[1].ProbabilisticSampling.SamplingRate) + assert.Equal(t, "op0", os.PerOperationStrategies[2].Operation) + assert.EqualValues(t, 0.2, os.PerOperationStrategies[2].ProbabilisticSampling.SamplingRate) + assert.Equal(t, "op7", os.PerOperationStrategies[3].Operation) + assert.EqualValues(t, 1, os.PerOperationStrategies[3].ProbabilisticSampling.SamplingRate) + + expected = makeResponse(api_v2.SamplingStrategyType_RATE_LIMITING, 5) + + s, err = store.GetSamplingStrategy(context.Background(), "bar") + require.NoError(t, err) + assert.Equal(t, api_v2.SamplingStrategyType_RATE_LIMITING, s.StrategyType) + assert.Equal(t, *expected.RateLimitingSampling, *s.RateLimitingSampling) + + require.NotNil(t, s.OperationSampling) + os = s.OperationSampling + assert.EqualValues(t, 0.001, os.DefaultSamplingProbability) + require.Len(t, os.PerOperationStrategies, 5) + assert.Equal(t, "op3", os.PerOperationStrategies[0].Operation) + assert.EqualValues(t, 0.3, os.PerOperationStrategies[0].ProbabilisticSampling.SamplingRate) + assert.Equal(t, "op5", os.PerOperationStrategies[1].Operation) + assert.EqualValues(t, 0.4, os.PerOperationStrategies[1].ProbabilisticSampling.SamplingRate) + assert.Equal(t, "op0", os.PerOperationStrategies[2].Operation) + assert.EqualValues(t, 0.2, os.PerOperationStrategies[2].ProbabilisticSampling.SamplingRate) + assert.Equal(t, "op6", os.PerOperationStrategies[3].Operation) + assert.EqualValues(t, 0, os.PerOperationStrategies[3].ProbabilisticSampling.SamplingRate) + assert.Equal(t, "op7", os.PerOperationStrategies[4].Operation) + assert.EqualValues(t, 1, os.PerOperationStrategies[4].ProbabilisticSampling.SamplingRate) + + s, err = store.GetSamplingStrategy(context.Background(), "default") + require.NoError(t, err) + expectedRsp := makeResponse(api_v2.SamplingStrategyType_PROBABILISTIC, 0.5) + expectedRsp.OperationSampling = &api_v2.PerOperationSamplingStrategies{ + DefaultSamplingProbability: 0.5, + PerOperationStrategies: []*api_v2.OperationSamplingStrategy{ + { + Operation: "op0", + ProbabilisticSampling: &api_v2.ProbabilisticSamplingStrategy{ + SamplingRate: 0.2, + }, }, - }, - { - Operation: "op6", - ProbabilisticSampling: &api_v2.ProbabilisticSamplingStrategy{ - SamplingRate: 0, + { + Operation: "op6", + ProbabilisticSampling: &api_v2.ProbabilisticSamplingStrategy{ + SamplingRate: 0, + }, }, - }, - { - Operation: "op7", - ProbabilisticSampling: &api_v2.ProbabilisticSamplingStrategy{ - SamplingRate: 1, + { + Operation: "op7", + ProbabilisticSampling: &api_v2.ProbabilisticSamplingStrategy{ + SamplingRate: 1, + }, }, }, - }, + } + assert.EqualValues(t, expectedRsp, *s) } - assert.EqualValues(t, expectedRsp, *s) } func TestMissingServiceSamplingStrategyTypes(t *testing.T) { @@ -465,18 +486,66 @@ func TestAutoUpdateStrategyErrors(t *testing.T) { } func TestServiceNoPerOperationStrategies(t *testing.T) { - store, err := NewStrategyStore(Options{StrategiesFile: "fixtures/service_no_per_operation.json"}, zap.NewNop()) + // given setup of strategy store with no specific per operation sampling strategies + // and option "sampling.strategies.bugfix-5270=true" + store, err := NewStrategyStore(Options{ + StrategiesFile: "fixtures/service_no_per_operation.json", + IncludeDefaultOpStrategies: true, + }, zap.NewNop()) require.NoError(t, err) - s, err := store.GetSamplingStrategy(context.Background(), "ServiceA") - require.NoError(t, err) - assert.Equal(t, 1.0, s.OperationSampling.DefaultSamplingProbability) + for _, service := range []string{"ServiceA", "ServiceB"} { + t.Run(service, func(t *testing.T) { + strategy, err := store.GetSamplingStrategy(context.Background(), service) + require.NoError(t, err) + strategyJson, err := json.MarshalIndent(strategy, "", " ") + require.NoError(t, err) - s, err = store.GetSamplingStrategy(context.Background(), "ServiceB") + testName := strings.ReplaceAll(t.Name(), "/", "_") + snapshotFile := filepath.Join(snapshotLocation, testName+".json") + expectedServiceResponse, err := os.ReadFile(snapshotFile) + require.NoError(t, err) + + assert.Equal(t, string(expectedServiceResponse), string(strategyJson), + "comparing against stored snapshot. Use REGENERATE_SNAPSHOTS=true to rebuild snapshots.") + + if regenerateSnapshots { + os.WriteFile(snapshotFile, strategyJson, 0o644) + } + }) + } +} + +func TestServiceNoPerOperationStrategiesDeprecatedBehavior(t *testing.T) { + // test case to be removed along with removal of strategy_store.parseStrategies_deprecated, + // see https://github.com/jaegertracing/jaeger/issues/5270 for more details + + // given setup of strategy store with no specific per operation sampling strategies + store, err := NewStrategyStore(Options{ + StrategiesFile: "fixtures/service_no_per_operation.json", + }, zap.NewNop()) require.NoError(t, err) - expected := makeResponse(api_v2.SamplingStrategyType_RATE_LIMITING, 3) - assert.Equal(t, *expected.RateLimitingSampling, *s.RateLimitingSampling) + for _, service := range []string{"ServiceA", "ServiceB"} { + t.Run(service, func(t *testing.T) { + strategy, err := store.GetSamplingStrategy(context.Background(), service) + require.NoError(t, err) + strategyJson, err := json.MarshalIndent(strategy, "", " ") + require.NoError(t, err) + + testName := strings.ReplaceAll(t.Name(), "/", "_") + snapshotFile := filepath.Join(snapshotLocation, testName+".json") + expectedServiceResponse, err := os.ReadFile(snapshotFile) + require.NoError(t, err) + + assert.Equal(t, string(expectedServiceResponse), string(strategyJson), + "comparing against stored snapshot. Use REGENERATE_SNAPSHOTS=true to rebuild snapshots.") + + if regenerateSnapshots { + os.WriteFile(snapshotFile, strategyJson, 0o644) + } + }) + } } func TestSamplingStrategyLoader(t *testing.T) { diff --git a/plugin/storage/badger/docs/storage-file-non-root-permission.md b/plugin/storage/badger/docs/storage-file-non-root-permission.md new file mode 100644 index 00000000000..b28dd70745b --- /dev/null +++ b/plugin/storage/badger/docs/storage-file-non-root-permission.md @@ -0,0 +1,44 @@ +# Badger file permissions as non-root service + +After the release of 1.50, Jaeger's Docker image is no longer running with root privileges (in [#4783](https://github.com/jaegertracing/jaeger/pull/4783)). In some installations it may cause issues such as "permission denied" errors when writing data. + +A possible workaround for this ([proposed here](https://github.com/jaegertracing/jaeger/issues/4906#issuecomment-1991779425)) is to run an initialization step as `root` that pre-creates the Badger data directory and updates its owner to the user that will run the main Jaeger process. + +```yaml +version: "3.9" + +services: +[...] + jaeger: + image: jaegertracing/all-in-one:latest + command: + - "--badger.ephemeral=false" + - "--badger.directory-key=/badger/data/keys" + - "--badger.directory-value=/badger/data/values" + - "--badger.span-store-ttl=72h0m0s" # limit storage to 72hrs + environment: + - SPAN_STORAGE_TYPE=badger + # Mount host directory "jaeger_badger_data" as "/badger" inside the container. + # The actual data directory will be "/badger/data", + # since we cannot change permissions on the mount. + volumes: + - jaeger_badger_data:/badger + ports: + - "16686:16686" + - "14250" + - "4317" + depends_on: + prepare-data-dir: + condition: service_completed_successfully + + prepare-data-dir: + # Run this step as root so that we can change the directory owner. + user: root + image: jaegertracing/all-in-one:latest + command: "/bin/sh -c 'mkdir -p /badger/data && touch /badger/data/.initialized && chown -R 10001:10001 /badger/data'" + volumes: + - jaeger_badger_data:/badger + +volumes: + jaeger_badger_data: +``` diff --git a/plugin/storage/badger/docs/upgrade-v1-to-v3.md b/plugin/storage/badger/docs/upgrade-v1-to-v3.md new file mode 100644 index 00000000000..fa1e7ad0dec --- /dev/null +++ b/plugin/storage/badger/docs/upgrade-v1-to-v3.md @@ -0,0 +1,110 @@ +# Upgrade Badger v1 to v3 + +In Jaeger 1.24.0, Badger is upgraded from v1.6.2 to v3.2103.0 which changes the underlying data format. Following steps will help in migrating your data: + +1. In Badger v1, the data looks like: + +```sh +❯ ls /tmp/badger/ +data key +❯ ls /tmp/badger/data/ +000001.vlog 000004.vlog 000005.vlog 000008.vlog 000011.vlog 000012.vlog 000013.vlog 000014.vlog 000015.vlog 000016.vlog 000017.vlog +❯ ls /tmp/badger/key/ +000038.sst 000048.sst 000049.sst 000050.sst 000051.sst 000059.sst 000060.sst 000061.sst 000063.sst 000064.sst 000065.sst 000066.sst MANIFEST +``` + +2. Make a backup of your data directory to have a copy incase migration didn't work successfully. + +```sh +❯ cp -r /tmp/badger /tmp/badger.bk +``` + +3. Download, extract and compile the source code of badger v1: https://github.com/dgraph-io/badger/archive/refs/tags/v1.6.2.tar.gz + +```sh +❯ tar xvzf badger-1.6.2.tar +❯ cd badger-1.6.2/badger/ +❯ go install +``` + +This will install the badger command line utility into your $GOBIN path eg ~/go/bin/badger. + +4. Use badger utility to take backup of data. + +```sh +❯ ~/go/bin/badger backup --dir /tmp/badger/key --vlog-dir /tmp/badger/data/ +Listening for /debug HTTP requests at port: 8080 +badger 2021/06/24 22:04:30 INFO: All 12 tables opened in 907ms +badger 2021/06/24 22:04:30 INFO: Replaying file id: 17 at offset: 64584535 +badger 2021/06/24 22:04:30 INFO: Replay took: 12.303µs +badger 2021/06/24 22:04:30 DEBUG: Value log discard stats empty +badger 2021/06/24 22:04:30 INFO: DB.Backup Created batch of size: 9.7 kB in 75.907µs. +badger 2021/06/24 22:04:31 INFO: DB.Backup Created batch of size: 4.3 MB in 8.003592ms. +.... +.... +badger 2021/06/24 22:04:31 INFO: DB.Backup Created batch of size: 30 MB in 74.808075ms. +badger 2021/06/24 22:04:36 INFO: DB.Backup Sent 15495232 keys +badger 2021/06/24 22:04:36 INFO: Got compaction priority: {level:0 score:1.73 dropPrefixes:[]} +``` + +This will create a badger.bak file in the current directory. + +5. Download, extract and compile the source code of badger v3: https://github.com/dgraph-io/badger/archive/refs/tags/v3.2103.0.tar.gz + +```sh +❯ tar xvzf badger-3.2103.0.tar +❯ cd badger-3.2103.0/badger/ +❯ go install +``` + +This will install the badger command line utility into your $GOBIN path eg ~/go/bin/badger. + +6. Restore the data from backup. + +```sh +❯ ~/go/bin/badger restore --dir jaeger-v3 +Listening for /debug HTTP requests at port: 8080 +jemalloc enabled: false +Using Go memory +badger 2021/06/24 22:08:29 INFO: All 0 tables opened in 0s +badger 2021/06/24 22:08:29 INFO: Discard stats nextEmptySlot: 0 +badger 2021/06/24 22:08:29 INFO: Set nextTxnTs to 0 +badger 2021/06/24 22:08:37 INFO: [0] [E] LOG Compact 0->6 (5, 0 -> 50 tables with 1 splits). [00001 00002 00003 00004 00005 . .] -> [00006 00007 00008 00009 00010 00011 00012 00013 00014 00015 00016 00017 00018 00019 00020 00021 00022 00023 00024 00025 00026 00028 00029 00030 00031 00032 00033 00034 00035 00036 00037 00038 00039 00040 00041 00043 00044 00045 00046 00047 00048 00049 00050 00051 00052 00053 00054 00055 00056 00057 .], took 2.597s +badger 2021/06/24 22:08:53 INFO: Lifetime L0 stalled for: 0s +badger 2021/06/24 22:08:55 INFO: +Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB +Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB +Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB +Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB +Level 4 [B]: NumTables: 45. Size: 86 MiB of 10 MiB. Score: 8.64->10.21 StaleData: 0 B Target FileSize: 2.0 MiB +Level 5 [ ]: NumTables: 08. Size: 29 MiB of 34 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB +Level 6 [ ]: NumTables: 63. Size: 340 MiB of 340 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 8.0 MiB +Level Done +Num Allocated Bytes at program end: 0 B +``` + +This will restore the data in jaeger-v3 directory. It will look like this + +```sh +❯ ls ./jaeger-v3 +000001.vlog 000180.sst 000257.sst 000276.sst 000294.sst 000327.sst 000336.sst 000349.sst 000356.sst 000364.sst 000371.sst 000378.sst 000385.sst 000392.sst 000399.sst 000406.sst 000413.sst MANIFEST +000006.sst 000181.sst 000259.sst 000277.sst 000302.sst 000328.sst 000339.sst 000350.sst 000357.sst 000365.sst 000372.sst 000379.sst 000386.sst 000393.sst 000400.sst 000407.sst 000414.sst +000007.sst 000195.sst 000261.sst 000278.sst 000305.sst 000330.sst 000340.sst 000351.sst 000359.sst 000366.sst 000373.sst 000380.sst 000387.sst 000394.sst 000401.sst 000408.sst 000415.sst +000008.sst 000218.sst 000265.sst 000279.sst 000315.sst 000331.sst 000341.sst 000352.sst 000360.sst 000367.sst 000374.sst 000381.sst 000388.sst 000395.sst 000402.sst 000409.sst 000416.sst +000061.sst 000227.sst 000267.sst 000282.sst 000324.sst 000332.sst 000343.sst 000353.sst 000361.sst 000368.sst 000375.sst 000382.sst 000389.sst 000396.sst 000403.sst 000410.sst 000417.sst +000134.sst 000249.sst 000272.sst 000285.sst 000325.sst 000333.sst 000344.sst 000354.sst 000362.sst 000369.sst 000376.sst 000383.sst 000390.sst 000397.sst 000404.sst 000411.sst DISCARD +000154.sst 000255.sst 000275.sst 000289.sst 000326.sst 000334.sst 000348.sst 000355.sst 000363.sst 000370.sst 000377.sst 000384.sst 000391.sst 000398.sst 000405.sst 000412.sst KEYREGISTRY +``` + +7. Separate out the key and data directories. + +```sh +❯ rm -rf /tmp/badger +❯ mv ./jaeger-v3 /tmp/badger +❯ mkdir /tmp/badger/data /tmp/badger/key +❯ mv /tmp/badger/*.vlog /tmp/badger/data/ +❯ mv /tmp/badger/*.sst /tmp/badger/key/ +❯ mv /tmp/badger/MANIFEST /tmp/badger/DISCARD /tmp/badger/KEYREGISTRY /tmp/badger/key/ +``` + +8. Start Jaeger v1.24.0. It should start well. diff --git a/plugin/storage/badger/factory.go b/plugin/storage/badger/factory.go index 5b2d10b1278..1fc2e1dffac 100644 --- a/plugin/storage/badger/factory.go +++ b/plugin/storage/badger/factory.go @@ -50,6 +50,7 @@ var ( // interface comformance checks _ storage.Factory = (*Factory)(nil) _ io.Closer = (*Factory)(nil) _ plugin.Configurable = (*Factory)(nil) + _ storage.Purger = (*Factory)(nil) // TODO badger could implement archive storage // _ storage.ArchiveFactory = (*Factory)(nil) @@ -303,3 +304,12 @@ func (f *Factory) registerBadgerExpvarMetrics(metricsFactory metrics.Factory) { } }) } + +// Purge removes all data from the Factory's underlying Badger store. +// This function is intended for testing purposes only and should not be used in production environments. +// Calling Purge in production will result in permanent data loss. +func (f *Factory) Purge() error { + return f.store.Update(func(txn *badger.Txn) error { + return f.store.DropAll() + }) +} diff --git a/plugin/storage/cassandra/factory.go b/plugin/storage/cassandra/factory.go index fed7bb77c85..a761207e219 100644 --- a/plugin/storage/cassandra/factory.go +++ b/plugin/storage/cassandra/factory.go @@ -78,6 +78,24 @@ func NewFactory() *Factory { } } +// NewFactoryWithConfig initializes factory with Config. +func NewFactoryWithConfig( + cfg config.Configuration, + metricsFactory metrics.Factory, + logger *zap.Logger, +) (*Factory, error) { + if err := cfg.Validate(); err != nil { + return nil, err + } + f := NewFactory() + f.primaryConfig = &cfg + err := f.Initialize(metricsFactory, logger) + if err != nil { + return nil, err + } + return f, nil +} + // AddFlags implements plugin.Configurable func (f *Factory) AddFlags(flagSet *flag.FlagSet) { f.Options.AddFlags(flagSet) @@ -214,11 +232,19 @@ var _ io.Closer = (*Factory)(nil) // Close closes the resources held by the factory func (f *Factory) Close() error { - f.Options.Get(archiveStorageConfig) + if f.primarySession != nil { + f.primarySession.Close() + } + if f.archiveSession != nil { + f.archiveSession.Close() + } + + var errs []error if cfg := f.Options.Get(archiveStorageConfig); cfg != nil { - cfg.TLS.Close() + errs = append(errs, cfg.TLS.Close()) } - return f.Options.GetPrimary().TLS.Close() + errs = append(errs, f.Options.GetPrimary().TLS.Close()) + return errors.Join(errs...) } // PrimarySession is used from integration tests to clean database between tests diff --git a/plugin/storage/cassandra/factory_test.go b/plugin/storage/cassandra/factory_test.go index 0a5a52e2bb8..5f9fbb30757 100644 --- a/plugin/storage/cassandra/factory_test.go +++ b/plugin/storage/cassandra/factory_test.go @@ -25,6 +25,7 @@ import ( "go.uber.org/zap" "github.com/jaegertracing/jaeger/pkg/cassandra" + cassandraCfg "github.com/jaegertracing/jaeger/pkg/cassandra/config" "github.com/jaegertracing/jaeger/pkg/cassandra/mocks" "github.com/jaegertracing/jaeger/pkg/config" "github.com/jaegertracing/jaeger/pkg/metrics" @@ -64,6 +65,7 @@ func TestCassandraFactory(t *testing.T) { query = &mocks.Query{} ) session.On("Query", mock.AnythingOfType("string"), mock.Anything).Return(query) + session.On("Close").Return() query.On("Exec").Return(nil) f.primaryConfig = newMockSessionBuilder(session, nil) f.archiveConfig = newMockSessionBuilder(nil, errors.New("made-up error")) @@ -197,3 +199,36 @@ func TestInitFromOptions(t *testing.T) { assert.Equal(t, o.GetPrimary(), f.primaryConfig) assert.Equal(t, o.Get(archiveStorageConfig), f.archiveConfig) } + +func TestConfigurationValidation(t *testing.T) { + testCases := []struct { + name string + cfg cassandraCfg.Configuration + wantErr bool + }{ + { + name: "valid configuration", + cfg: cassandraCfg.Configuration{ + Servers: []string{"http://localhost:9200"}, + }, + wantErr: false, + }, + { + name: "missing servers", + cfg: cassandraCfg.Configuration{}, + wantErr: true, + }, + } + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + err := test.cfg.Validate() + if test.wantErr { + require.Error(t, err) + _, err = NewFactoryWithConfig(test.cfg, metrics.NullFactory, zap.NewNop()) + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/plugin/storage/cassandra/schema/create.sh b/plugin/storage/cassandra/schema/create.sh index f3a6aaa390f..b53c15b3708 100755 --- a/plugin/storage/cassandra/schema/create.sh +++ b/plugin/storage/cassandra/schema/create.sh @@ -12,7 +12,7 @@ function usage { >&2 echo " DEPENDENCIES_TTL - time to live for dependencies data, in seconds (default: 0, no TTL)" >&2 echo " KEYSPACE - keyspace (default: jaeger_v1_{datacenter})" >&2 echo " REPLICATION_FACTOR - replication factor for prod (default: 2 for prod, 1 for test)" - >&2 echo " VERSION - Cassandra backend version, 3 or 4 (default: 4). Ignored if template is is provided." + >&2 echo " VERSION - Cassandra backend version, 3 or 4 (default: 4). Ignored if template is provided." >&2 echo "" >&2 echo "The template-file argument must be fully qualified path to a v00#.cql.tmpl template file." >&2 echo "If omitted, the template file with the highest available version will be used." diff --git a/plugin/storage/es/factory.go b/plugin/storage/es/factory.go index 8ce0de4f386..e95be48177d 100644 --- a/plugin/storage/es/factory.go +++ b/plugin/storage/es/factory.go @@ -16,6 +16,7 @@ package es import ( + "context" "errors" "flag" "fmt" @@ -37,9 +38,11 @@ import ( "github.com/jaegertracing/jaeger/plugin" esDepStore "github.com/jaegertracing/jaeger/plugin/storage/es/dependencystore" "github.com/jaegertracing/jaeger/plugin/storage/es/mappings" + esSampleStore "github.com/jaegertracing/jaeger/plugin/storage/es/samplingstore" esSpanStore "github.com/jaegertracing/jaeger/plugin/storage/es/spanstore" "github.com/jaegertracing/jaeger/storage" "github.com/jaegertracing/jaeger/storage/dependencystore" + "github.com/jaegertracing/jaeger/storage/samplingstore" "github.com/jaegertracing/jaeger/storage/spanstore" ) @@ -83,6 +86,39 @@ func NewFactory() *Factory { } } +func NewFactoryWithConfig( + cfg config.Configuration, + metricsFactory metrics.Factory, + logger *zap.Logger, +) (*Factory, error) { + if err := cfg.Validate(); err != nil { + return nil, err + } + + cfg.MaxDocCount = defaultMaxDocCount + cfg.Enabled = true + + archive := make(map[string]*namespaceConfig) + archive[archiveNamespace] = &namespaceConfig{ + Configuration: cfg, + namespace: archiveNamespace, + } + + f := NewFactory() + f.InitFromOptions(Options{ + Primary: namespaceConfig{ + Configuration: cfg, + namespace: primaryNamespace, + }, + others: archive, + }) + err := f.Initialize(metricsFactory, logger) + if err != nil { + return nil, err + } + return f, nil +} + // AddFlags implements plugin.Configurable func (f *Factory) AddFlags(flagSet *flag.FlagSet) { f.Options.AddFlags(flagSet) @@ -231,23 +267,6 @@ func createSpanWriter( return nil, err } - mappingBuilder := mappings.MappingBuilder{ - TemplateBuilder: es.TextTemplateBuilder{}, - Shards: cfg.NumShards, - Replicas: cfg.NumReplicas, - EsVersion: cfg.Version, - IndexPrefix: cfg.IndexPrefix, - UseILM: cfg.UseILM, - PrioritySpanTemplate: cfg.PrioritySpanTemplate, - PriorityServiceTemplate: cfg.PriorityServiceTemplate, - PriorityDependenciesTemplate: cfg.PriorityDependenciesTemplate, - } - - spanMapping, serviceMapping, err := mappingBuilder.GetSpanServiceMappings() - if err != nil { - return nil, err - } - writer := esSpanStore.NewSpanWriter(esSpanStore.SpanWriterParams{ Client: clientFn, IndexPrefix: cfg.IndexPrefix, @@ -264,14 +283,58 @@ func createSpanWriter( // Creating a template here would conflict with the one created for ILM resulting to no index rollover if cfg.CreateIndexTemplates && !cfg.UseILM { - err := writer.CreateTemplates(spanMapping, serviceMapping, cfg.IndexPrefix) + mappingBuilder := mappingBuilderFromConfig(cfg) + spanMapping, serviceMapping, err := mappingBuilder.GetSpanServiceMappings() if err != nil { return nil, err } + if err := writer.CreateTemplates(spanMapping, serviceMapping, cfg.IndexPrefix); err != nil { + return nil, err + } } return writer, nil } +func (f *Factory) CreateSamplingStore(maxBuckets int) (samplingstore.Store, error) { + params := esSampleStore.SamplingStoreParams{ + Client: f.getPrimaryClient, + Logger: f.logger, + IndexPrefix: f.primaryConfig.IndexPrefix, + IndexDateLayout: f.primaryConfig.IndexDateLayoutSampling, + IndexRolloverFrequency: f.primaryConfig.GetIndexRolloverFrequencySamplingDuration(), + Lookback: f.primaryConfig.AdaptiveSamplingLookback, + MaxDocCount: f.primaryConfig.MaxDocCount, + } + store := esSampleStore.NewSamplingStore(params) + + if f.primaryConfig.CreateIndexTemplates && !f.primaryConfig.UseILM { + mappingBuilder := mappingBuilderFromConfig(f.primaryConfig) + samplingMapping, err := mappingBuilder.GetSamplingMappings() + if err != nil { + return nil, err + } + if _, err := f.getPrimaryClient().CreateTemplate(params.PrefixedIndexName()).Body(samplingMapping).Do(context.Background()); err != nil { + return nil, fmt.Errorf("failed to create template: %w", err) + } + } + + return store, nil +} + +func mappingBuilderFromConfig(cfg *config.Configuration) mappings.MappingBuilder { + return mappings.MappingBuilder{ + TemplateBuilder: es.TextTemplateBuilder{}, + Shards: cfg.NumShards, + Replicas: cfg.NumReplicas, + EsVersion: cfg.Version, + IndexPrefix: cfg.IndexPrefix, + UseILM: cfg.UseILM, + PrioritySpanTemplate: cfg.PrioritySpanTemplate, + PriorityServiceTemplate: cfg.PriorityServiceTemplate, + PriorityDependenciesTemplate: cfg.PriorityDependenciesTemplate, + } +} + func createDependencyReader( clientFn func() es.Client, cfg *config.Configuration, diff --git a/plugin/storage/es/factory_test.go b/plugin/storage/es/factory_test.go index d3e1d34ab4a..e072b5bf19b 100644 --- a/plugin/storage/es/factory_test.go +++ b/plugin/storage/es/factory_test.go @@ -106,6 +106,10 @@ func TestElasticsearchFactory(t *testing.T) { _, err = f.CreateArchiveSpanWriter() require.NoError(t, err) + + _, err = f.CreateSamplingStore(1) + require.NoError(t, err) + require.NoError(t, f.Close()) } @@ -208,9 +212,14 @@ func TestCreateTemplateError(t *testing.T) { err := f.Initialize(metrics.NullFactory, zap.NewNop()) require.NoError(t, err) defer f.Close() + w, err := f.CreateSpanWriter() assert.Nil(t, w) require.Error(t, err, "template-error") + + s, err := f.CreateSamplingStore(1) + assert.Nil(t, s) + require.Error(t, err, "template-error") } func TestILMDisableTemplateCreation(t *testing.T) { @@ -263,6 +272,65 @@ func TestInitFromOptions(t *testing.T) { assert.Equal(t, o.Get(archiveNamespace), f.archiveConfig) } +func TestESStorageFactoryWithConfig(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write(mockEsServerResponse) + })) + defer server.Close() + cfg := escfg.Configuration{ + Servers: []string{server.URL}, + LogLevel: "error", + } + factory, err := NewFactoryWithConfig(cfg, metrics.NullFactory, zap.NewNop()) + require.NoError(t, err) + defer factory.Close() +} + +func TestConfigurationValidation(t *testing.T) { + testCases := []struct { + name string + cfg escfg.Configuration + wantErr bool + }{ + { + name: "valid configuration", + cfg: escfg.Configuration{ + Servers: []string{"http://localhost:9200"}, + }, + wantErr: false, + }, + { + name: "missing servers", + cfg: escfg.Configuration{}, + wantErr: true, + }, + } + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + err := test.cfg.Validate() + if test.wantErr { + require.Error(t, err) + _, err = NewFactoryWithConfig(test.cfg, metrics.NullFactory, zap.NewNop()) + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestESStorageFactoryWithConfigError(t *testing.T) { + defer testutils.VerifyGoLeaksOnce(t) + + cfg := escfg.Configuration{ + Servers: []string{"http://127.0.0.1:65535"}, + LogLevel: "error", + } + _, err := NewFactoryWithConfig(cfg, metrics.NullFactory, zap.NewNop()) + require.Error(t, err) + require.ErrorContains(t, err, "failed to create primary Elasticsearch client") +} + func TestPasswordFromFile(t *testing.T) { defer testutils.VerifyGoLeaksOnce(t) t.Run("primary client", func(t *testing.T) { diff --git a/plugin/storage/es/mappings/fixtures/jaeger-sampling-7.json b/plugin/storage/es/mappings/fixtures/jaeger-sampling-7.json new file mode 100644 index 00000000000..910a6ca8675 --- /dev/null +++ b/plugin/storage/es/mappings/fixtures/jaeger-sampling-7.json @@ -0,0 +1,17 @@ +{ + "index_patterns": "*jaeger-sampling-*", + "aliases": { + "test-jaeger-sampling-read" : {} + }, + "settings":{ + "index.number_of_shards": 3, + "index.number_of_replicas": 3, + "index.mapping.nested_fields.limit":50, + "index.requests.cache.enable":true + ,"lifecycle": { + "name": "jaeger-test-policy", + "rollover_alias": "test-jaeger-sampling-write" + } + }, + "mappings":{} +} diff --git a/plugin/storage/es/mappings/fixtures/jaeger-sampling-8.json b/plugin/storage/es/mappings/fixtures/jaeger-sampling-8.json new file mode 100644 index 00000000000..e8fbbbf4082 --- /dev/null +++ b/plugin/storage/es/mappings/fixtures/jaeger-sampling-8.json @@ -0,0 +1,20 @@ +{ + "priority": 503, + "index_patterns": "test-jaeger-sampling-*", + "template": { + "aliases": { + "test-jaeger-sampling-read": {} + }, + "settings": { + "index.number_of_shards": 3, + "index.number_of_replicas": 3, + "index.mapping.nested_fields.limit": 50, + "index.requests.cache.enable": true, + "lifecycle": { + "name": "jaeger-test-policy", + "rollover_alias": "test-jaeger-sampling-write" + } + }, + "mappings": {} + } +} diff --git a/plugin/storage/es/mappings/fixtures/jaeger-sampling.json b/plugin/storage/es/mappings/fixtures/jaeger-sampling.json new file mode 100644 index 00000000000..87304011017 --- /dev/null +++ b/plugin/storage/es/mappings/fixtures/jaeger-sampling.json @@ -0,0 +1,10 @@ +{ + "template": "*jaeger-sampling-*", + "settings":{ + "index.number_of_shards": 3, + "index.number_of_replicas": 3, + "index.mapping.nested_fields.limit":50, + "index.requests.cache.enable":true + }, + "mappings":{} +} diff --git a/plugin/storage/es/mappings/jaeger-sampling-7.json b/plugin/storage/es/mappings/jaeger-sampling-7.json new file mode 100644 index 00000000000..167c1d47928 --- /dev/null +++ b/plugin/storage/es/mappings/jaeger-sampling-7.json @@ -0,0 +1,21 @@ +{ + "index_patterns": "*jaeger-sampling-*", + {{- if .UseILM }} + "aliases": { + "{{ .IndexPrefix }}jaeger-sampling-read" : {} + }, + {{- end }} + "settings":{ + "index.number_of_shards": {{ .Shards }}, + "index.number_of_replicas": {{ .Replicas }}, + "index.mapping.nested_fields.limit":50, + "index.requests.cache.enable":false + {{- if .UseILM }} + ,"lifecycle": { + "name": "{{ .ILMPolicyName }}", + "rollover_alias": "{{ .IndexPrefix }}jaeger-sampling-write" + } + {{- end }} + }, + "mappings":{} +} diff --git a/plugin/storage/es/mappings/jaeger-sampling-8.json b/plugin/storage/es/mappings/jaeger-sampling-8.json new file mode 100644 index 00000000000..0667520803a --- /dev/null +++ b/plugin/storage/es/mappings/jaeger-sampling-8.json @@ -0,0 +1,24 @@ +{ + "priority": {{ .PrioritySamplingTemplate }}, + "index_patterns": "{{ .IndexPrefix }}jaeger-sampling-*", + "template": { + {{- if .UseILM }} + "aliases": { + "{{ .IndexPrefix }}jaeger-sampling-read": {} + }, + {{- end }} + "settings": { + "index.number_of_shards": {{ .Shards }}, + "index.number_of_replicas": {{ .Replicas }}, + "index.mapping.nested_fields.limit": 50, + "index.requests.cache.enable": false + {{- if .UseILM }}, + "lifecycle": { + "name": "{{ .ILMPolicyName }}", + "rollover_alias": "{{ .IndexPrefix }}jaeger-sampling-write" + } + {{- end }} + }, + "mappings": {} + } +} diff --git a/plugin/storage/es/mappings/jaeger-sampling.json b/plugin/storage/es/mappings/jaeger-sampling.json new file mode 100644 index 00000000000..458d490a357 --- /dev/null +++ b/plugin/storage/es/mappings/jaeger-sampling.json @@ -0,0 +1,10 @@ +{ + "template": "*jaeger-sampling-*", + "settings":{ + "index.number_of_shards": {{ .Shards }}, + "index.number_of_replicas": {{ .Replicas }}, + "index.mapping.nested_fields.limit":50, + "index.requests.cache.enable":false + }, + "mappings":{} +} diff --git a/plugin/storage/es/mappings/mapping.go b/plugin/storage/es/mappings/mapping.go index 9ee624120db..5da255f8838 100644 --- a/plugin/storage/es/mappings/mapping.go +++ b/plugin/storage/es/mappings/mapping.go @@ -35,6 +35,7 @@ type MappingBuilder struct { PrioritySpanTemplate int64 PriorityServiceTemplate int64 PriorityDependenciesTemplate int64 + PrioritySamplingTemplate int64 EsVersion uint IndexPrefix string UseILM bool @@ -69,6 +70,11 @@ func (mb *MappingBuilder) GetDependenciesMappings() (string, error) { return mb.GetMapping("jaeger-dependencies") } +// GetSamplingMappings returns sampling mappings +func (mb *MappingBuilder) GetSamplingMappings() (string, error) { + return mb.GetMapping("jaeger-sampling") +} + func loadMapping(name string) string { s, _ := MAPPINGS.ReadFile(name) return string(s) diff --git a/plugin/storage/es/mappings/mapping_test.go b/plugin/storage/es/mappings/mapping_test.go index 562c8400f83..167c6893d1b 100644 --- a/plugin/storage/es/mappings/mapping_test.go +++ b/plugin/storage/es/mappings/mapping_test.go @@ -332,6 +332,19 @@ func TestMappingBuilder_GetDependenciesMappings(t *testing.T) { require.EqualError(t, err, "template load error") } +func TestMappingBuilder_GetSamplingMappings(t *testing.T) { + tb := mocks.TemplateBuilder{} + ta := mocks.TemplateApplier{} + ta.On("Execute", mock.Anything, mock.Anything).Return(errors.New("template load error")) + tb.On("Parse", mock.Anything).Return(&ta, nil) + + mappingBuilder := MappingBuilder{ + TemplateBuilder: &tb, + } + _, err := mappingBuilder.GetSamplingMappings() + require.EqualError(t, err, "template load error") +} + func TestMain(m *testing.M) { testutils.VerifyGoLeaks(m) } diff --git a/plugin/storage/es/options.go b/plugin/storage/es/options.go index 406da2c313e..efdd1c6fd9b 100644 --- a/plugin/storage/es/options.go +++ b/plugin/storage/es/options.go @@ -38,6 +38,7 @@ const ( suffixServerURLs = ".server-urls" suffixRemoteReadClusters = ".remote-read-clusters" suffixMaxSpanAge = ".max-span-age" + suffixAdaptiveSamplingLookback = ".adaptive-sampling.lookback" suffixNumShards = ".num-shards" suffixNumReplicas = ".num-replicas" suffixPrioritySpanTemplate = ".prioirity-span-template" @@ -52,6 +53,7 @@ const ( suffixIndexDateSeparator = ".index-date-separator" suffixIndexRolloverFrequencySpans = ".index-rollover-frequency-spans" suffixIndexRolloverFrequencyServices = ".index-rollover-frequency-services" + suffixIndexRolloverFrequencySampling = ".index-rollover-frequency-adaptive-sampling" suffixTagsAsFields = ".tags-as-fields" suffixTagsAsFieldsAll = suffixTagsAsFields + ".all" suffixTagsAsFieldsInclude = suffixTagsAsFields + ".include" @@ -101,6 +103,7 @@ func NewOptions(primaryNamespace string, otherNamespaces ...string) *Options { Password: "", Sniffer: false, MaxSpanAge: 72 * time.Hour, + AdaptiveSamplingLookback: 72 * time.Hour, NumShards: 5, NumReplicas: 1, PrioritySpanTemplate: 0, @@ -244,6 +247,11 @@ func addFlags(flagSet *flag.FlagSet, nsConfig *namespaceConfig) { defaultIndexRolloverFrequency, "Rotates jaeger-service indices over the given period. For example \"day\" creates \"jaeger-service-yyyy-MM-dd\" every day after UTC 12AM. Valid options: [hour, day]. "+ "This does not delete old indices. For details on complete index management solutions supported by Jaeger, refer to: https://www.jaegertracing.io/docs/deployment/#elasticsearch-rollover") + flagSet.String( + nsConfig.namespace+suffixIndexRolloverFrequencySampling, + defaultIndexRolloverFrequency, + "Rotates jaeger-sampling indices over the given period. For example \"day\" creates \"jaeger-sampling-yyyy-MM-dd\" every day after UTC 12AM. Valid options: [hour, day]. "+ + "This does not delete old indices. For details on complete index management solutions supported by Jaeger, refer to: https://www.jaegertracing.io/docs/deployment/#elasticsearch-rollover") flagSet.Bool( nsConfig.namespace+suffixTagsAsFieldsAll, nsConfig.Tags.AllAsFields, @@ -296,7 +304,10 @@ func addFlags(flagSet *flag.FlagSet, nsConfig *namespaceConfig) { nsConfig.namespace+suffixSendGetBodyAs, nsConfig.SendGetBodyAs, "HTTP verb for requests that contain a body [GET, POST].") - + flagSet.Duration( + nsConfig.namespace+suffixAdaptiveSamplingLookback, + nsConfig.AdaptiveSamplingLookback, + "How far back to look for the latest adaptive sampling probabilities") if nsConfig.namespace == archiveNamespace { flagSet.Bool( nsConfig.namespace+suffixEnabled, @@ -330,6 +341,7 @@ func initFromViper(cfg *namespaceConfig, v *viper.Viper) { cfg.SnifferTLSEnabled = v.GetBool(cfg.namespace + suffixSnifferTLSEnabled) cfg.Servers = strings.Split(stripWhiteSpace(v.GetString(cfg.namespace+suffixServerURLs)), ",") cfg.MaxSpanAge = v.GetDuration(cfg.namespace + suffixMaxSpanAge) + cfg.AdaptiveSamplingLookback = v.GetDuration(cfg.namespace + suffixAdaptiveSamplingLookback) cfg.NumShards = v.GetInt64(cfg.namespace + suffixNumShards) cfg.NumReplicas = v.GetInt64(cfg.namespace + suffixNumReplicas) cfg.PrioritySpanTemplate = v.GetInt64(cfg.namespace + suffixPrioritySpanTemplate) @@ -365,14 +377,15 @@ func initFromViper(cfg *namespaceConfig, v *viper.Viper) { cfg.IndexRolloverFrequencySpans = strings.ToLower(v.GetString(cfg.namespace + suffixIndexRolloverFrequencySpans)) cfg.IndexRolloverFrequencyServices = strings.ToLower(v.GetString(cfg.namespace + suffixIndexRolloverFrequencyServices)) + cfg.IndexRolloverFrequencySampling = strings.ToLower(v.GetString(cfg.namespace + suffixIndexRolloverFrequencySampling)) separator := v.GetString(cfg.namespace + suffixIndexDateSeparator) cfg.IndexDateLayoutSpans = initDateLayout(cfg.IndexRolloverFrequencySpans, separator) cfg.IndexDateLayoutServices = initDateLayout(cfg.IndexRolloverFrequencyServices, separator) + cfg.IndexDateLayoutSampling = initDateLayout(cfg.IndexRolloverFrequencySampling, separator) // Dependencies calculation should be daily, and this index size is very small cfg.IndexDateLayoutDependencies = initDateLayout(defaultIndexRolloverFrequency, separator) - var err error cfg.TLS, err = cfg.getTLSFlagsConfig().InitFromViper(v) if err != nil { diff --git a/plugin/storage/es/samplingstore/dbmodel/converter.go b/plugin/storage/es/samplingstore/dbmodel/converter.go new file mode 100644 index 00000000000..259bc5cbc36 --- /dev/null +++ b/plugin/storage/es/samplingstore/dbmodel/converter.go @@ -0,0 +1,51 @@ +// Copyright (c) 2024 The Jaeger Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbmodel + +import ( + "github.com/jaegertracing/jaeger/cmd/collector/app/sampling/model" +) + +func FromThroughputs(throughputs []*model.Throughput) []Throughput { + if throughputs == nil { + return nil + } + ret := make([]Throughput, len(throughputs)) + for i, d := range throughputs { + ret[i] = Throughput{ + Service: d.Service, + Operation: d.Operation, + Count: d.Count, + Probabilities: d.Probabilities, + } + } + return ret +} + +func ToThroughputs(throughputs []Throughput) []*model.Throughput { + if throughputs == nil { + return nil + } + ret := make([]*model.Throughput, len(throughputs)) + for i, d := range throughputs { + ret[i] = &model.Throughput{ + Service: d.Service, + Operation: d.Operation, + Count: d.Count, + Probabilities: d.Probabilities, + } + } + return ret +} diff --git a/plugin/storage/es/samplingstore/dbmodel/converter_test.go b/plugin/storage/es/samplingstore/dbmodel/converter_test.go new file mode 100644 index 00000000000..9ac02f1eac8 --- /dev/null +++ b/plugin/storage/es/samplingstore/dbmodel/converter_test.go @@ -0,0 +1,53 @@ +// Copyright (c) 2024 The Jaeger Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbmodel + +import ( + "strconv" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/jaegertracing/jaeger/cmd/collector/app/sampling/model" + "github.com/jaegertracing/jaeger/pkg/testutils" +) + +func TestConvertDependencies(t *testing.T) { + tests := []struct { + throughputs []*model.Throughput + }{ + { + throughputs: []*model.Throughput{{Service: "service1", Operation: "operation1", Count: 10, Probabilities: map[string]struct{}{"new-srv": {}}}}, + }, + { + throughputs: []*model.Throughput{}, + }, + { + throughputs: nil, + }, + } + + for i, test := range tests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + got := FromThroughputs(test.throughputs) + a := ToThroughputs(got) + assert.Equal(t, test.throughputs, a) + }) + } +} + +func TestMain(m *testing.M) { + testutils.VerifyGoLeaks(m) +} diff --git a/plugin/storage/es/samplingstore/dbmodel/model.go b/plugin/storage/es/samplingstore/dbmodel/model.go new file mode 100644 index 00000000000..8205dc6415b --- /dev/null +++ b/plugin/storage/es/samplingstore/dbmodel/model.go @@ -0,0 +1,42 @@ +// Copyright (c) 2024 The Jaeger Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbmodel + +import ( + "time" +) + +type Throughput struct { + Service string + Operation string + Count int64 + Probabilities map[string]struct{} +} + +type TimeThroughput struct { + Timestamp time.Time `json:"timestamp"` + Throughput Throughput `json:"throughputs"` +} + +type ProbabilitiesAndQPS struct { + Hostname string + Probabilities map[string]map[string]float64 + QPS map[string]map[string]float64 +} + +type TimeProbabilitiesAndQPS struct { + Timestamp time.Time `json:"timestamp"` + ProbabilitiesAndQPS ProbabilitiesAndQPS `json:"probabilitiesandqps"` +} diff --git a/plugin/storage/es/samplingstore/storage.go b/plugin/storage/es/samplingstore/storage.go new file mode 100644 index 00000000000..9e0cf8750d3 --- /dev/null +++ b/plugin/storage/es/samplingstore/storage.go @@ -0,0 +1,211 @@ +// Copyright (c) 2024 The Jaeger Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package samplingstore + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/olivere/elastic" + "go.uber.org/zap" + + "github.com/jaegertracing/jaeger/cmd/collector/app/sampling/model" + "github.com/jaegertracing/jaeger/pkg/es" + "github.com/jaegertracing/jaeger/plugin/storage/es/samplingstore/dbmodel" +) + +const ( + samplingIndex = "jaeger-sampling" + throughputType = "throughput-sampling" + probabilitiesType = "probabilities-sampling" + indexPrefixSeparator = "-" +) + +type SamplingStore struct { + client func() es.Client + logger *zap.Logger + samplingIndexPrefix string + indexDateLayout string + maxDocCount int + indexRolloverFrequency time.Duration + lookback time.Duration +} + +type SamplingStoreParams struct { + Client func() es.Client + Logger *zap.Logger + IndexPrefix string + IndexDateLayout string + IndexRolloverFrequency time.Duration + Lookback time.Duration + MaxDocCount int +} + +func NewSamplingStore(p SamplingStoreParams) *SamplingStore { + return &SamplingStore{ + client: p.Client, + logger: p.Logger, + samplingIndexPrefix: p.PrefixedIndexName() + indexPrefixSeparator, + indexDateLayout: p.IndexDateLayout, + maxDocCount: p.MaxDocCount, + indexRolloverFrequency: p.IndexRolloverFrequency, + lookback: p.Lookback, + } +} + +func (s *SamplingStore) InsertThroughput(throughput []*model.Throughput) error { + ts := time.Now() + indexName := indexWithDate(s.samplingIndexPrefix, s.indexDateLayout, ts) + for _, eachThroughput := range dbmodel.FromThroughputs(throughput) { + s.client().Index().Index(indexName).Type(throughputType). + BodyJson(&dbmodel.TimeThroughput{ + Timestamp: ts, + Throughput: eachThroughput, + }).Add() + } + return nil +} + +func (s *SamplingStore) GetThroughput(start, end time.Time) ([]*model.Throughput, error) { + ctx := context.Background() + indices := getReadIndices(s.samplingIndexPrefix, s.indexDateLayout, start, end, s.indexRolloverFrequency) + searchResult, err := s.client().Search(indices...). + Size(s.maxDocCount). + Query(buildTSQuery(start, end)). + IgnoreUnavailable(true). + Do(ctx) + if err != nil { + return nil, fmt.Errorf("failed to search for throughputs: %w", err) + } + output := make([]dbmodel.TimeThroughput, len(searchResult.Hits.Hits)) + for i, hit := range searchResult.Hits.Hits { + if err := json.Unmarshal(*hit.Source, &output[i]); err != nil { + return nil, fmt.Errorf("unmarshalling documents failed: %w", err) + } + } + outThroughputs := make([]dbmodel.Throughput, len(output)) + for i, out := range output { + outThroughputs[i] = out.Throughput + } + return dbmodel.ToThroughputs(outThroughputs), nil +} + +func (s *SamplingStore) InsertProbabilitiesAndQPS(hostname string, + probabilities model.ServiceOperationProbabilities, + qps model.ServiceOperationQPS, +) error { + ts := time.Now() + writeIndexName := indexWithDate(s.samplingIndexPrefix, s.indexDateLayout, ts) + val := dbmodel.ProbabilitiesAndQPS{ + Hostname: hostname, + Probabilities: probabilities, + QPS: qps, + } + s.writeProbabilitiesAndQPS(writeIndexName, ts, val) + return nil +} + +func (s *SamplingStore) GetLatestProbabilities() (model.ServiceOperationProbabilities, error) { + ctx := context.Background() + clientFn := s.client() + indices, err := getLatestIndices(s.samplingIndexPrefix, s.indexDateLayout, clientFn, s.indexRolloverFrequency, s.lookback) + if err != nil { + return nil, fmt.Errorf("failed to get latest indices: %w", err) + } + searchResult, err := clientFn.Search(indices...). + Size(s.maxDocCount). + IgnoreUnavailable(true). + Do(ctx) + if err != nil { + return nil, fmt.Errorf("failed to search for Latest Probabilities: %w", err) + } + lengthOfSearchResult := len(searchResult.Hits.Hits) + if lengthOfSearchResult == 0 { + return nil, nil + } + + var latestProbabilities dbmodel.TimeProbabilitiesAndQPS + latestTime := time.Time{} + for _, hit := range searchResult.Hits.Hits { + var data dbmodel.TimeProbabilitiesAndQPS + if err = json.Unmarshal(*hit.Source, &data); err != nil { + return nil, fmt.Errorf("unmarshalling documents failed: %w", err) + } + if data.Timestamp.After(latestTime) { + latestTime = data.Timestamp + latestProbabilities = data + } + } + return latestProbabilities.ProbabilitiesAndQPS.Probabilities, nil +} + +func (s *SamplingStore) writeProbabilitiesAndQPS(indexName string, ts time.Time, pandqps dbmodel.ProbabilitiesAndQPS) { + s.client().Index().Index(indexName).Type(probabilitiesType). + BodyJson(&dbmodel.TimeProbabilitiesAndQPS{ + Timestamp: ts, + ProbabilitiesAndQPS: pandqps, + }).Add() +} + +func getLatestIndices(indexPrefix, indexDateLayout string, clientFn es.Client, rollover time.Duration, maxDuration time.Duration) ([]string, error) { + ctx := context.Background() + now := time.Now().UTC() + earliest := now.Add(-maxDuration) + earliestIndex := indexWithDate(indexPrefix, indexDateLayout, earliest) + for { + currentIndex := indexWithDate(indexPrefix, indexDateLayout, now) + exists, err := clientFn.IndexExists(currentIndex).Do(ctx) + if err != nil { + return nil, fmt.Errorf("failed to check index existence: %w", err) + } + if exists { + return []string{currentIndex}, nil + } + if currentIndex == earliestIndex { + return nil, fmt.Errorf("falied to find latest index") + } + now = now.Add(rollover) // rollover is negative + } +} + +func getReadIndices(indexName, indexDateLayout string, startTime time.Time, endTime time.Time, rollover time.Duration) []string { + var indices []string + firstIndex := indexWithDate(indexName, indexDateLayout, startTime) + currentIndex := indexWithDate(indexName, indexDateLayout, endTime) + for currentIndex != firstIndex { + indices = append(indices, currentIndex) + endTime = endTime.Add(rollover) // rollover is negative + currentIndex = indexWithDate(indexName, indexDateLayout, endTime) + } + indices = append(indices, firstIndex) + return indices +} + +func (p *SamplingStoreParams) PrefixedIndexName() string { + if p.IndexPrefix != "" { + return p.IndexPrefix + indexPrefixSeparator + samplingIndex + } + return samplingIndex +} + +func buildTSQuery(start, end time.Time) elastic.Query { + return elastic.NewRangeQuery("timestamp").Gte(start).Lte(end) +} + +func indexWithDate(indexNamePrefix, indexDateLayout string, date time.Time) string { + return indexNamePrefix + date.UTC().Format(indexDateLayout) +} diff --git a/plugin/storage/es/samplingstore/storage_test.go b/plugin/storage/es/samplingstore/storage_test.go new file mode 100644 index 00000000000..f0d171119c0 --- /dev/null +++ b/plugin/storage/es/samplingstore/storage_test.go @@ -0,0 +1,461 @@ +// Copyright (c) 2024 The Jaeger Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package samplingstore + +import ( + "encoding/json" + "errors" + "strings" + "testing" + "time" + + "github.com/olivere/elastic" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + samplemodel "github.com/jaegertracing/jaeger/cmd/collector/app/sampling/model" + "github.com/jaegertracing/jaeger/pkg/es" + "github.com/jaegertracing/jaeger/pkg/es/mocks" + "github.com/jaegertracing/jaeger/pkg/testutils" + "github.com/jaegertracing/jaeger/plugin/storage/es/samplingstore/dbmodel" +) + +const defaultMaxDocCount = 10_000 + +type samplingStorageTest struct { + client *mocks.Client + logger *zap.Logger + logBuffer *testutils.Buffer + storage *SamplingStore +} + +func withEsSampling(indexPrefix, indexDateLayout string, maxDocCount int, fn func(w *samplingStorageTest)) { + client := &mocks.Client{} + logger, logBuffer := testutils.NewLogger() + w := &samplingStorageTest{ + client: client, + logger: logger, + logBuffer: logBuffer, + storage: NewSamplingStore(SamplingStoreParams{ + Client: func() es.Client { return client }, + Logger: logger, + IndexPrefix: indexPrefix, + IndexDateLayout: indexDateLayout, + MaxDocCount: maxDocCount, + }), + } + fn(w) +} + +func TestNewIndexPrefix(t *testing.T) { + tests := []struct { + name string + prefix string + expected string + }{ + { + name: "without prefix", + prefix: "", + expected: "", + }, + { + name: "with prefix", + prefix: "foo", + expected: "foo-", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + client := &mocks.Client{} + r := NewSamplingStore(SamplingStoreParams{ + Client: func() es.Client { return client }, + Logger: zap.NewNop(), + IndexPrefix: test.prefix, + IndexDateLayout: "2006-01-02", + MaxDocCount: defaultMaxDocCount, + }) + assert.Equal(t, test.expected+samplingIndex+indexPrefixSeparator, r.samplingIndexPrefix) + }) + } +} + +func TestGetReadIndices(t *testing.T) { + test := struct { + name string + start time.Time + end time.Time + }{ + name: "", + start: time.Date(2024, time.February, 10, 0, 0, 0, 0, time.UTC), + end: time.Date(2024, time.February, 12, 0, 0, 0, 0, time.UTC), + } + t.Run(test.name, func(t *testing.T) { + expectedIndices := []string{ + "prefix-jaeger-sampling-2024-02-12", + "prefix-jaeger-sampling-2024-02-11", + "prefix-jaeger-sampling-2024-02-10", + } + rollover := -time.Hour * 24 + indices := getReadIndices("prefix-jaeger-sampling-", "2006-01-02", test.start, test.end, rollover) + assert.Equal(t, expectedIndices, indices) + }) +} + +func TestGetLatestIndices(t *testing.T) { + tests := []struct { + name string + indexDateLayout string + maxDuration time.Duration + expectedIndices []string + expectedError string + IndexExistError error + indexExist bool + }{ + { + name: "with index", + indexDateLayout: "2006-01-02", + maxDuration: 24 * time.Hour, + expectedIndices: []string{indexWithDate("", "2006-01-02", time.Now().UTC())}, + expectedError: "", + indexExist: true, + }, + { + name: "without index", + indexDateLayout: "2006-01-02", + maxDuration: 72 * time.Hour, + expectedError: "falied to find latest index", + indexExist: false, + }, + { + name: "check index existence", + indexDateLayout: "2006-01-02", + maxDuration: 24 * time.Hour, + expectedError: "failed to check index existence: fail", + indexExist: true, + IndexExistError: errors.New("fail"), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + withEsSampling("", test.indexDateLayout, defaultMaxDocCount, func(w *samplingStorageTest) { + indexService := &mocks.IndicesExistsService{} + w.client.On("IndexExists", mock.Anything).Return(indexService) + indexService.On("Do", mock.Anything).Return(test.indexExist, test.IndexExistError) + clientFnMock := w.storage.client() + actualIndices, err := getLatestIndices("", test.indexDateLayout, clientFnMock, -24*time.Hour, test.maxDuration) + if test.expectedError != "" { + require.EqualError(t, err, test.expectedError) + assert.Nil(t, actualIndices) + } else { + require.NoError(t, err) + require.Equal(t, test.expectedIndices, actualIndices) + } + }) + }) + } +} + +func TestInsertThroughput(t *testing.T) { + test := struct { + name string + expectedError string + }{ + name: "insert throughput", + expectedError: "", + } + + t.Run(test.name, func(t *testing.T) { + withEsSampling("", "2006-01-02", defaultMaxDocCount, func(w *samplingStorageTest) { + throughputs := []*samplemodel.Throughput{ + {Service: "my-svc", Operation: "op"}, + {Service: "our-svc", Operation: "op2"}, + } + fixedTime := time.Now() + indexName := indexWithDate("", "2006-01-02", fixedTime) + writeService := &mocks.IndexService{} + w.client.On("Index").Return(writeService) + writeService.On("Index", stringMatcher(indexName)).Return(writeService) + writeService.On("Type", stringMatcher(throughputType)).Return(writeService) + writeService.On("BodyJson", mock.Anything).Return(writeService) + writeService.On("Add", mock.Anything) + err := w.storage.InsertThroughput(throughputs) + if test.expectedError != "" { + require.EqualError(t, err, test.expectedError) + } else { + require.NoError(t, err) + } + }) + }) +} + +func TestInsertProbabilitiesAndQPS(t *testing.T) { + test := struct { + name string + expectedError string + }{ + name: "insert probabilities and qps", + expectedError: "", + } + + t.Run(test.name, func(t *testing.T) { + withEsSampling("", "2006-01-02", defaultMaxDocCount, func(w *samplingStorageTest) { + pAQ := dbmodel.ProbabilitiesAndQPS{ + Hostname: "dell11eg843d", + Probabilities: samplemodel.ServiceOperationProbabilities{"new-srv": {"op": 0.1}}, + QPS: samplemodel.ServiceOperationQPS{"new-srv": {"op": 4}}, + } + fixedTime := time.Now() + indexName := indexWithDate("", "2006-01-02", fixedTime) + writeService := &mocks.IndexService{} + w.client.On("Index").Return(writeService) + writeService.On("Index", stringMatcher(indexName)).Return(writeService) + writeService.On("Type", stringMatcher(probabilitiesType)).Return(writeService) + writeService.On("BodyJson", mock.Anything).Return(writeService) + writeService.On("Add", mock.Anything) + err := w.storage.InsertProbabilitiesAndQPS(pAQ.Hostname, pAQ.Probabilities, pAQ.QPS) + if test.expectedError != "" { + require.EqualError(t, err, test.expectedError) + } else { + require.NoError(t, err) + } + }) + }) +} + +func TestGetThroughput(t *testing.T) { + mockIndex := "jaeger-sampling-" + time.Now().UTC().Format("2006-01-02") + goodThroughputs := `{ + "timestamp": "2024-02-08T12:00:00Z", + "throughputs": { + "Service": "my-svc", + "Operation": "op", + "Count": 10 + } + }` + tests := []struct { + name string + searchResult *elastic.SearchResult + searchError error + expectedError string + expectedOutput []*samplemodel.Throughput + indexPrefix string + maxDocCount int + index string + }{ + { + name: "good throughputs without prefix", + searchResult: createSearchResult(goodThroughputs), + expectedOutput: []*samplemodel.Throughput{ + { + Service: "my-svc", + Operation: "op", + Count: 10, + }, + }, + index: mockIndex, + maxDocCount: 1000, + }, + { + name: "good throughputs without prefix", + searchResult: createSearchResult(goodThroughputs), + expectedOutput: []*samplemodel.Throughput{ + { + Service: "my-svc", + Operation: "op", + Count: 10, + }, + }, + index: mockIndex, + maxDocCount: 1000, + }, + { + name: "good throughputs with prefix", + searchResult: createSearchResult(goodThroughputs), + expectedOutput: []*samplemodel.Throughput{ + { + Service: "my-svc", + Operation: "op", + Count: 10, + }, + }, + index: mockIndex, + indexPrefix: "foo", + maxDocCount: 1000, + }, + { + name: "bad throughputs", + searchResult: createSearchResult(`badJson{hello}world`), + expectedError: "unmarshalling documents failed: invalid character 'b' looking for beginning of value", + index: mockIndex, + }, + { + name: "search fails", + searchError: errors.New("search failure"), + expectedError: "failed to search for throughputs: search failure", + index: mockIndex, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + withEsSampling(test.indexPrefix, "2006-01-02", defaultMaxDocCount, func(w *samplingStorageTest) { + searchService := &mocks.SearchService{} + if test.indexPrefix != "" { + test.indexPrefix += "-" + } + index := test.indexPrefix + test.index + w.client.On("Search", index).Return(searchService) + searchService.On("Size", mock.Anything).Return(searchService) + searchService.On("Query", mock.Anything).Return(searchService) + searchService.On("IgnoreUnavailable", true).Return(searchService) + searchService.On("Do", mock.Anything).Return(test.searchResult, test.searchError) + + actual, err := w.storage.GetThroughput(time.Now().Add(-time.Minute), time.Now()) + if test.expectedError != "" { + require.EqualError(t, err, test.expectedError) + assert.Nil(t, actual) + } else { + require.NoError(t, err) + assert.EqualValues(t, test.expectedOutput, actual) + } + }) + }) + } +} + +func TestGetLatestProbabilities(t *testing.T) { + mockIndex := "jaeger-sampling-" + time.Now().UTC().Format("2006-01-02") + goodProbabilities := `{ + "timestamp": "2024-02-08T12:00:00Z", + "probabilitiesandqps": { + "Hostname": "dell11eg843d", + "Probabilities": { + "new-srv": {"op": 0.1} + }, + "QPS": { + "new-srv": {"op": 4} + } + } + }` + tests := []struct { + name string + searchResult *elastic.SearchResult + searchError error + expectedOutput samplemodel.ServiceOperationProbabilities + expectedError string + maxDocCount int + index string + indexPresent bool + indexError error + indexPrefix string + }{ + { + name: "good probabilities without prefix", + searchResult: createSearchResult(goodProbabilities), + expectedOutput: samplemodel.ServiceOperationProbabilities{ + "new-srv": { + "op": 0.1, + }, + }, + index: mockIndex, + maxDocCount: 1000, + indexPresent: true, + }, + { + name: "good probabilities with prefix", + searchResult: createSearchResult(goodProbabilities), + expectedOutput: samplemodel.ServiceOperationProbabilities{ + "new-srv": { + "op": 0.1, + }, + }, + index: mockIndex, + maxDocCount: 1000, + indexPresent: true, + indexPrefix: "foo", + }, + { + name: "bad probabilities", + searchResult: createSearchResult(`badJson{hello}world`), + expectedError: "unmarshalling documents failed: invalid character 'b' looking for beginning of value", + index: mockIndex, + indexPresent: true, + }, + { + name: "search fail", + searchError: errors.New("search failure"), + expectedError: "failed to search for Latest Probabilities: search failure", + index: mockIndex, + indexPresent: true, + }, + { + name: "index check fail", + indexError: errors.New("index check failure"), + expectedError: "failed to get latest indices: failed to check index existence: index check failure", + index: mockIndex, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + withEsSampling(test.indexPrefix, "2006-01-02", defaultMaxDocCount, func(w *samplingStorageTest) { + searchService := &mocks.SearchService{} + if test.indexPrefix != "" { + test.indexPrefix += "-" + } + index := test.indexPrefix + test.index + w.client.On("Search", index).Return(searchService) + searchService.On("Size", mock.Anything).Return(searchService) + searchService.On("IgnoreUnavailable", true).Return(searchService) + searchService.On("Do", mock.Anything).Return(test.searchResult, test.searchError) + + indicesexistsservice := &mocks.IndicesExistsService{} + w.client.On("IndexExists", index).Return(indicesexistsservice) + indicesexistsservice.On("Do", mock.Anything).Return(test.indexPresent, test.indexError) + + actual, err := w.storage.GetLatestProbabilities() + if test.expectedError != "" { + require.EqualError(t, err, test.expectedError) + assert.Nil(t, actual) + } else { + require.NoError(t, err) + assert.EqualValues(t, test.expectedOutput, actual) + } + }) + }) + } +} + +func createSearchResult(rawJsonStr string) *elastic.SearchResult { + rawJsonArr := []byte(rawJsonStr) + hits := make([]*elastic.SearchHit, 1) + hits[0] = &elastic.SearchHit{ + Source: (*json.RawMessage)(&rawJsonArr), + } + searchResult := &elastic.SearchResult{Hits: &elastic.SearchHits{Hits: hits}} + return searchResult +} + +func stringMatcher(q string) interface{} { + matchFunc := func(s string) bool { + return strings.Contains(s, q) + } + return mock.MatchedBy(matchFunc) +} + +func TestMain(m *testing.M) { + testutils.VerifyGoLeaks(m) +} diff --git a/plugin/storage/grpc/config/config.go b/plugin/storage/grpc/config/config.go index cd1f98ff917..c22e9f9d6f1 100644 --- a/plugin/storage/grpc/config/config.go +++ b/plugin/storage/grpc/config/config.go @@ -18,7 +18,6 @@ import ( "context" "fmt" "os/exec" - "runtime" "time" "github.com/hashicorp/go-hclog" @@ -50,12 +49,21 @@ type Configuration struct { pluginHealthCheck *time.Ticker pluginHealthCheckDone chan bool pluginRPCClient plugin.ClientProtocol + remoteConn *grpc.ClientConn } // ClientPluginServices defines services plugin can expose and its capabilities type ClientPluginServices struct { shared.PluginServices - Capabilities shared.PluginCapabilities + Capabilities shared.PluginCapabilities + killPluginClient func() +} + +func (c *ClientPluginServices) Close() error { + if c.killPluginClient != nil { + c.killPluginClient() + } + return nil } // PluginBuilder is used to create storage plugins. Implemented by Configuration. @@ -78,6 +86,9 @@ func (c *Configuration) Close() error { c.pluginHealthCheck.Stop() c.pluginHealthCheckDone <- true } + if c.remoteConn != nil { + c.remoteConn.Close() + } return c.RemoteTLS.Close() } @@ -106,12 +117,14 @@ func (c *Configuration) buildRemote(logger *zap.Logger, tracerProvider trace.Tra opts = append(opts, grpc.WithUnaryInterceptor(tenancy.NewClientUnaryInterceptor(tenancyMgr))) opts = append(opts, grpc.WithStreamInterceptor(tenancy.NewClientStreamInterceptor(tenancyMgr))) } - conn, err := grpc.DialContext(ctx, c.RemoteServerAddr, opts...) + var err error + // TODO: Need to replace grpc.DialContext with grpc.NewClient and pass test + c.remoteConn, err = grpc.DialContext(ctx, c.RemoteServerAddr, opts...) if err != nil { return nil, fmt.Errorf("error connecting to remote storage: %w", err) } - grpcClient := shared.NewGRPCClient(conn) + grpcClient := shared.NewGRPCClient(c.remoteConn) return &ClientPluginServices{ PluginServices: shared.PluginServices{ Store: grpcClient, @@ -148,10 +161,6 @@ func (c *Configuration) buildPlugin(logger *zap.Logger, tracerProvider trace.Tra GRPCDialOptions: opts, }) - runtime.SetFinalizer(client, func(c *plugin.Client) { - c.Kill() - }) - rpcClient, err := client.Client() if err != nil { return nil, fmt.Errorf("error attempting to connect to plugin rpc client: %w", err) @@ -194,7 +203,8 @@ func (c *Configuration) buildPlugin(logger *zap.Logger, tracerProvider trace.Tra ArchiveStore: archiveStoragePlugin, StreamingSpanWriter: streamingSpanWriterPlugin, }, - Capabilities: capabilities, + Capabilities: capabilities, + killPluginClient: client.Kill, }, nil } diff --git a/plugin/storage/grpc/factory.go b/plugin/storage/grpc/factory.go index c4656c36ab4..0009ebcb6c8 100644 --- a/plugin/storage/grpc/factory.go +++ b/plugin/storage/grpc/factory.go @@ -15,6 +15,7 @@ package grpc import ( + "errors" "flag" "fmt" "io" @@ -53,6 +54,8 @@ type Factory struct { archiveStore shared.ArchiveStoragePlugin streamingSpanWriter shared.StreamingSpanWriterPlugin capabilities shared.PluginCapabilities + + servicesCloser io.Closer } // NewFactory creates a new Factory. @@ -60,6 +63,21 @@ func NewFactory() *Factory { return &Factory{} } +// NewFactoryWithConfig is used from jaeger(v2). +func NewFactoryWithConfig( + cfg config.Configuration, + metricsFactory metrics.Factory, + logger *zap.Logger, +) (*Factory, error) { + f := NewFactory() + f.InitFromOptions(Options{Configuration: cfg}) + err := f.Initialize(metricsFactory, logger) + if err != nil { + return nil, err + } + return f, nil +} + // AddFlags implements plugin.Configurable func (f *Factory) AddFlags(flagSet *flag.FlagSet) { f.options.AddFlags(flagSet) @@ -93,6 +111,7 @@ func (f *Factory) Initialize(metricsFactory metrics.Factory, logger *zap.Logger) f.archiveStore = services.ArchiveStore f.capabilities = services.Capabilities f.streamingSpanWriter = services.StreamingSpanWriter + f.servicesCloser = services logger.Info("External plugin storage configuration", zap.Any("configuration", f.options.Configuration)) return nil } @@ -149,5 +168,10 @@ func (f *Factory) CreateArchiveSpanWriter() (spanstore.Writer, error) { // Close closes the resources held by the factory func (f *Factory) Close() error { - return f.builder.Close() + errs := []error{} + if f.servicesCloser != nil { + errs = append(errs, f.servicesCloser.Close()) + } + errs = append(errs, f.builder.Close()) + return errors.Join(errs...) } diff --git a/plugin/storage/grpc/factory_test.go b/plugin/storage/grpc/factory_test.go index e9b4417383b..62699eb2e2a 100644 --- a/plugin/storage/grpc/factory_test.go +++ b/plugin/storage/grpc/factory_test.go @@ -16,13 +16,17 @@ package grpc import ( "errors" + "log" + "net" "testing" + "time" "github.com/spf13/viper" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/trace" "go.uber.org/zap" + "google.golang.org/grpc" "github.com/jaegertracing/jaeger/pkg/config" "github.com/jaegertracing/jaeger/pkg/metrics" @@ -143,6 +147,29 @@ func TestGRPCStorageFactory(t *testing.T) { assert.Equal(t, f.store.DependencyReader(), depReader) } +func TestGRPCStorageFactoryWithConfig(t *testing.T) { + cfg := grpcConfig.Configuration{} + _, err := NewFactoryWithConfig(cfg, metrics.NullFactory, zap.NewNop()) + require.ErrorContains(t, err, "grpc-plugin builder failed to create a store: error connecting to remote storage") + + lis, err := net.Listen("tcp", ":0") + require.NoError(t, err, "failed to listen") + + s := grpc.NewServer() + go func() { + if err := s.Serve(lis); err != nil { + log.Fatalf("Server exited with error: %v", err) + } + }() + defer s.Stop() + + cfg.RemoteServerAddr = lis.Addr().String() + cfg.RemoteConnectTimeout = 1 * time.Second + f, err := NewFactoryWithConfig(cfg, metrics.NullFactory, zap.NewNop()) + require.NoError(t, err) + require.NoError(t, f.Close()) +} + func TestGRPCStorageFactory_Capabilities(t *testing.T) { f := NewFactory() v := viper.New() diff --git a/plugin/storage/grpc/options.go b/plugin/storage/grpc/options.go index 27c61786e7a..b3b8d9210b2 100644 --- a/plugin/storage/grpc/options.go +++ b/plugin/storage/grpc/options.go @@ -77,7 +77,7 @@ func (opt *Options) InitFromViper(v *viper.Viper) error { opt.Configuration.RemoteConnectTimeout = v.GetDuration(remoteConnectionTimeout) opt.Configuration.TenancyOpts = tenancy.InitFromViper(v) if opt.Configuration.PluginBinary != "" { - log.Printf(deprecatedSidecar + "using sidecar model of grpc-plugin storage, please upgrade to 'reomte' gRPC storage. https://github.com/jaegertracing/jaeger/issues/4647") + log.Printf(deprecatedSidecar + "using sidecar model of grpc-plugin storage, please upgrade to 'remote' gRPC storage. https://github.com/jaegertracing/jaeger/issues/4647") } return nil } diff --git a/plugin/storage/grpc/shared/grpc_handler_test.go b/plugin/storage/grpc/shared/grpc_handler_test.go index a6682264ef3..23d40c4534f 100644 --- a/plugin/storage/grpc/shared/grpc_handler_test.go +++ b/plugin/storage/grpc/shared/grpc_handler_test.go @@ -91,8 +91,10 @@ func withGRPCServer(fn func(r *grpcServerTest)) { streamWriter: streamWriter, } + handler := NewGRPCHandlerWithPlugins(impl, impl, impl) + defer handler.Close(context.Background(), &storage_v1.CloseWriterRequest{}) r := &grpcServerTest{ - server: NewGRPCHandlerWithPlugins(impl, impl, impl), + server: handler, impl: impl, } fn(r) diff --git a/plugin/storage/integration/badgerstore_test.go b/plugin/storage/integration/badgerstore_test.go index e1eba9cda14..17d2fdaa4d2 100644 --- a/plugin/storage/integration/badgerstore_test.go +++ b/plugin/storage/integration/badgerstore_test.go @@ -11,8 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -//go:build badger_storage_integration -// +build badger_storage_integration package integration @@ -33,59 +31,42 @@ type BadgerIntegrationStorage struct { factory *badger.Factory } -func (s *BadgerIntegrationStorage) initialize() error { +func (s *BadgerIntegrationStorage) initialize(t *testing.T) { s.factory = badger.NewFactory() + s.factory.Options.Primary.Ephemeral = false err := s.factory.Initialize(metrics.NullFactory, zap.NewNop()) - if err != nil { - return err - } - - sw, err := s.factory.CreateSpanWriter() - if err != nil { - return err - } - sr, err := s.factory.CreateSpanReader() - if err != nil { - return err - } - if s.SamplingStore, err = s.factory.CreateSamplingStore(0); err != nil { - return err - } - - s.SpanReader = sr - s.SpanWriter = sw - - s.Refresh = s.refresh - s.CleanUp = s.cleanUp + require.NoError(t, err) + t.Cleanup(func() { + s.factory.Close() + }) - logger, _ := testutils.NewLogger() - s.logger = logger + s.SpanWriter, err = s.factory.CreateSpanWriter() + require.NoError(t, err) - // TODO: remove this badger supports returning spanKind from GetOperations - s.GetOperationsMissingSpanKind = true - return nil -} + s.SpanReader, err = s.factory.CreateSpanReader() + require.NoError(t, err) -func (s *BadgerIntegrationStorage) clear() error { - return s.factory.Close() + s.SamplingStore, err = s.factory.CreateSamplingStore(0) + require.NoError(t, err) } -func (s *BadgerIntegrationStorage) cleanUp() error { - err := s.clear() - if err != nil { - return err - } - return s.initialize() -} - -func (s *BadgerIntegrationStorage) refresh() error { - return nil +func (s *BadgerIntegrationStorage) cleanUp(t *testing.T) { + s.factory.Purge() } func TestBadgerStorage(t *testing.T) { - s := &BadgerIntegrationStorage{} - require.NoError(t, s.initialize()) - s.IntegrationTestAll(t) - defer s.clear() + SkipUnlessEnv(t, "badger") + s := &BadgerIntegrationStorage{ + StorageIntegration: StorageIntegration{ + SkipArchiveTest: true, + + // TODO: remove this badger supports returning spanKind from GetOperations + GetOperationsMissingSpanKind: true, + }, + } + s.CleanUp = s.cleanUp + s.logger, _ = testutils.NewLogger() + s.initialize(t) + s.RunAll(t) } diff --git a/plugin/storage/integration/cassandra_test.go b/plugin/storage/integration/cassandra_test.go index facbb11a5f4..53da7167fab 100644 --- a/plugin/storage/integration/cassandra_test.go +++ b/plugin/storage/integration/cassandra_test.go @@ -16,9 +16,6 @@ package integration import ( - "errors" - "fmt" - "os" "testing" "github.com/stretchr/testify/require" @@ -32,8 +29,6 @@ import ( "github.com/jaegertracing/jaeger/storage/dependencystore" ) -var errInitializeCassandraDependencyWriter = errors.New("failed to initialize cassandra dependency writer") - type CassandraStorageIntegration struct { StorageIntegration @@ -45,8 +40,8 @@ func newCassandraStorageIntegration() *CassandraStorageIntegration { s := &CassandraStorageIntegration{ StorageIntegration: StorageIntegration{ GetDependenciesReturnsSource: true, + SkipArchiveTest: true, - Refresh: func() error { return nil }, SkipList: []string{ "Tags_+_Operation_name_+_Duration_range", "Tags_+_Duration_range", @@ -63,68 +58,61 @@ func newCassandraStorageIntegration() *CassandraStorageIntegration { return s } -func (s *CassandraStorageIntegration) cleanUp() error { - return s.session.Query("TRUNCATE traces").Exec() +func (s *CassandraStorageIntegration) cleanUp(t *testing.T) { + require.NoError(t, s.session.Query("TRUNCATE traces").Exec()) } -func (s *CassandraStorageIntegration) initializeCassandraFactory(flags []string) (*cassandra.Factory, error) { +func (s *CassandraStorageIntegration) initializeCassandraFactory(t *testing.T, flags []string) *cassandra.Factory { s.logger, _ = testutils.NewLogger() f := cassandra.NewFactory() v, command := config.Viperize(f.AddFlags) - if err := command.ParseFlags(flags); err != nil { - return nil, fmt.Errorf("unable to parse flags: %w", err) + { + err := command.ParseFlags(flags) + require.NoError(t, err) } f.InitFromViper(v, zap.NewNop()) - if err := f.Initialize(metrics.NullFactory, s.logger); err != nil { - return nil, err + { + err := f.Initialize(metrics.NullFactory, s.logger) + require.NoError(t, err) } - return f, nil + return f } -func (s *CassandraStorageIntegration) initializeCassandra() error { - f, err := s.initializeCassandraFactory([]string{ +func (s *CassandraStorageIntegration) initializeCassandra(t *testing.T) { + f := s.initializeCassandraFactory(t, []string{ "--cassandra.keyspace=jaeger_v1_dc1", }) - if err != nil { - return err - } s.session = f.PrimarySession() - if s.SpanWriter, err = f.CreateSpanWriter(); err != nil { - return err - } - if s.SpanReader, err = f.CreateSpanReader(); err != nil { - return err - } - if s.SamplingStore, err = f.CreateSamplingStore(0); err != nil { - return err - } - - if err = s.initializeDependencyReaderAndWriter(f); err != nil { - return err - } - return nil + var err error + s.SpanWriter, err = f.CreateSpanWriter() + require.NoError(t, err) + s.SpanReader, err = f.CreateSpanReader() + require.NoError(t, err) + s.SamplingStore, err = f.CreateSamplingStore(0) + require.NoError(t, err) + s.initializeDependencyReaderAndWriter(t, f) + t.Cleanup(func() { + require.NoError(t, f.Close()) + }) } -func (s *CassandraStorageIntegration) initializeDependencyReaderAndWriter(f *cassandra.Factory) error { +func (s *CassandraStorageIntegration) initializeDependencyReaderAndWriter(t *testing.T, f *cassandra.Factory) { var ( err error ok bool ) - if s.DependencyReader, err = f.CreateDependencyReader(); err != nil { - return err - } + s.DependencyReader, err = f.CreateDependencyReader() + require.NoError(t, err) + // TODO: Update this when the factory interface has CreateDependencyWriter if s.DependencyWriter, ok = s.DependencyReader.(dependencystore.Writer); !ok { - return errInitializeCassandraDependencyWriter + t.Log("DependencyWriter not implemented ") } - return nil } func TestCassandraStorage(t *testing.T) { - if os.Getenv("STORAGE") != "cassandra" { - t.Skip("Integration test against Cassandra skipped; set STORAGE env var to cassandra to run this") - } + SkipUnlessEnv(t, "cassandra") s := newCassandraStorageIntegration() - require.NoError(t, s.initializeCassandra()) - s.IntegrationTestAll(t) + s.initializeCassandra(t) + s.RunAll(t) } diff --git a/plugin/storage/integration/elasticsearch_test.go b/plugin/storage/integration/elasticsearch_test.go index 8a687be23fe..2f0e1cc2fe1 100644 --- a/plugin/storage/integration/elasticsearch_test.go +++ b/plugin/storage/integration/elasticsearch_test.go @@ -18,8 +18,8 @@ package integration import ( "context" "errors" + "fmt" "net/http" - "os" "strconv" "strings" "testing" @@ -29,19 +29,14 @@ import ( "github.com/olivere/elastic" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - sdktrace "go.opentelemetry.io/otel/sdk/trace" - "go.opentelemetry.io/otel/sdk/trace/tracetest" - "go.opentelemetry.io/otel/trace" "go.uber.org/zap" + "go.uber.org/zap/zaptest" - "github.com/jaegertracing/jaeger/model" - estemplate "github.com/jaegertracing/jaeger/pkg/es" - eswrapper "github.com/jaegertracing/jaeger/pkg/es/wrapper" + "github.com/jaegertracing/jaeger/pkg/config" "github.com/jaegertracing/jaeger/pkg/metrics" "github.com/jaegertracing/jaeger/pkg/testutils" - "github.com/jaegertracing/jaeger/plugin/storage/es/dependencystore" - "github.com/jaegertracing/jaeger/plugin/storage/es/mappings" - "github.com/jaegertracing/jaeger/plugin/storage/es/spanstore" + "github.com/jaegertracing/jaeger/plugin/storage/es" + "github.com/jaegertracing/jaeger/storage/dependencystore" ) const ( @@ -57,29 +52,16 @@ const ( spanTemplateName = "jaeger-span" serviceTemplateName = "jaeger-service" dependenciesTemplateName = "jaeger-dependencies" + primaryNamespace = "es" + archiveNamespace = "es-archive" ) type ESStorageIntegration struct { StorageIntegration - client *elastic.Client - v8Client *elasticsearch8.Client - bulkProcessor *elastic.BulkProcessor - logger *zap.Logger -} - -func (s *ESStorageIntegration) tracerProvider() (trace.TracerProvider, *tracetest.InMemoryExporter, func()) { - exporter := tracetest.NewInMemoryExporter() - tp := sdktrace.NewTracerProvider( - sdktrace.WithSampler(sdktrace.AlwaysSample()), - sdktrace.WithSyncer(exporter), - ) - closer := func() { - if err := tp.Shutdown(context.Background()); err != nil { - s.logger.Error("failed to close tracer", zap.Error(err)) - } - } - return tp, exporter, closer + client *elastic.Client + v8Client *elasticsearch8.Client + logger *zap.Logger } func (s *ESStorageIntegration) getVersion() (uint, error) { @@ -100,13 +82,11 @@ func (s *ESStorageIntegration) getVersion() (uint, error) { return uint(esVersion), nil } -func (s *ESStorageIntegration) initializeES(allTagsAsFields, archive bool) error { +func (s *ESStorageIntegration) initializeES(t *testing.T, allTagsAsFields bool) { rawClient, err := elastic.NewClient( elastic.SetURL(queryURL), elastic.SetSniff(false)) - if err != nil { - return err - } + require.NoError(t, err) s.logger, _ = testutils.NewLogger() s.client = rawClient @@ -114,110 +94,72 @@ func (s *ESStorageIntegration) initializeES(allTagsAsFields, archive bool) error Addresses: []string{queryURL}, DiscoverNodesOnStart: false, }) - if err != nil { - return err - } + require.NoError(t, err) - if err := s.initSpanstore(allTagsAsFields, archive); err != nil { - return err - } - s.CleanUp = func() error { - return s.esCleanUp(allTagsAsFields, archive) + s.initSpanstore(t, allTagsAsFields) + + s.CleanUp = func(t *testing.T) { + s.esCleanUp(t, allTagsAsFields) } - s.Refresh = s.esRefresh - s.esCleanUp(allTagsAsFields, archive) - // TODO: remove this flag after ES support returning spanKind when get operations + s.esCleanUp(t, allTagsAsFields) + s.SkipArchiveTest = false + // TODO: remove this flag after ES supports returning spanKind + // Issue https://github.com/jaegertracing/jaeger/issues/1923 s.GetOperationsMissingSpanKind = true - return nil } -func (s *ESStorageIntegration) esCleanUp(allTagsAsFields, archive bool) error { +func (s *ESStorageIntegration) esCleanUp(t *testing.T, allTagsAsFields bool) { _, err := s.client.DeleteIndex("*").Do(context.Background()) - if err != nil { - return err - } - return s.initSpanstore(allTagsAsFields, archive) + require.NoError(t, err) + s.initSpanstore(t, allTagsAsFields) } -func (s *ESStorageIntegration) initSpanstore(allTagsAsFields, archive bool) error { - bp, _ := s.client.BulkProcessor().BulkActions(1).FlushInterval(time.Nanosecond).Do(context.Background()) - s.bulkProcessor = bp - esVersion, err := s.getVersion() - if err != nil { - return err - } - client := eswrapper.WrapESClient(s.client, bp, esVersion, s.v8Client) - mappingBuilder := mappings.MappingBuilder{ - TemplateBuilder: estemplate.TextTemplateBuilder{}, - Shards: 5, - Replicas: 1, - EsVersion: client.GetVersion(), - IndexPrefix: indexPrefix, - UseILM: false, - } - spanMapping, serviceMapping, err := mappingBuilder.GetSpanServiceMappings() - if err != nil { - return err - } - - clientFn := func() estemplate.Client { return client } +func (s *ESStorageIntegration) initializeESFactory(t *testing.T, allTagsAsFields bool) *es.Factory { + s.logger = zaptest.NewLogger(t) + f := es.NewFactory() + v, command := config.Viperize(f.AddFlags) + args := []string{ + fmt.Sprintf("--es.num-shards=%v", 5), + fmt.Sprintf("--es.num-replicas=%v", 1), + fmt.Sprintf("--es.index-prefix=%v", indexPrefix), + fmt.Sprintf("--es.use-ilm=%v", false), + fmt.Sprintf("--es.tags-as-fields.all=%v", allTagsAsFields), + fmt.Sprintf("--es.bulk.actions=%v", 1), + fmt.Sprintf("--es.bulk.flush-interval=%v", time.Nanosecond), + "--es-archive.enabled=true", + fmt.Sprintf("--es-archive.tags-as-fields.all=%v", allTagsAsFields), + fmt.Sprintf("--es-archive.index-prefix=%v", indexPrefix), + } + require.NoError(t, command.ParseFlags(args)) + f.InitFromViper(v, s.logger) + require.NoError(t, f.Initialize(metrics.NullFactory, s.logger)) + + // TODO ideally we need to close the factory once the test is finished + // but because esCleanup calls initialize() we get a panic later + // t.Cleanup(func() { + // require.NoError(t, f.Close()) + // }) + return f +} - w := spanstore.NewSpanWriter( - spanstore.SpanWriterParams{ - Client: clientFn, - Logger: s.logger, - MetricsFactory: metrics.NullFactory, - IndexPrefix: indexPrefix, - AllTagsAsFields: allTagsAsFields, - TagDotReplacement: tagKeyDeDotChar, - Archive: archive, - }) - err = w.CreateTemplates(spanMapping, serviceMapping, indexPrefix) - if err != nil { - return err - } - tracer, _, closer := s.tracerProvider() - defer closer() - s.SpanWriter = w - s.SpanReader = spanstore.NewSpanReader(spanstore.SpanReaderParams{ - Client: clientFn, - Logger: s.logger, - MetricsFactory: metrics.NullFactory, - IndexPrefix: indexPrefix, - MaxSpanAge: maxSpanAge, - TagDotReplacement: tagKeyDeDotChar, - Archive: archive, - MaxDocCount: defaultMaxDocCount, - Tracer: tracer.Tracer("test"), - }) - dependencyStore := dependencystore.NewDependencyStore(dependencystore.DependencyStoreParams{ - Client: clientFn, - Logger: s.logger, - IndexPrefix: indexPrefix, - IndexDateLayout: indexDateLayout, - MaxDocCount: defaultMaxDocCount, - }) +func (s *ESStorageIntegration) initSpanstore(t *testing.T, allTagsAsFields bool) { + f := s.initializeESFactory(t, allTagsAsFields) + var err error + s.SpanWriter, err = f.CreateSpanWriter() + require.NoError(t, err) + s.SpanReader, err = f.CreateSpanReader() + require.NoError(t, err) + s.ArchiveSpanReader, err = f.CreateArchiveSpanReader() + require.NoError(t, err) + s.ArchiveSpanWriter, err = f.CreateArchiveSpanWriter() + require.NoError(t, err) - depMapping, err := mappingBuilder.GetDependenciesMappings() - if err != nil { - return err - } - err = dependencyStore.CreateTemplates(depMapping) - if err != nil { - return err - } - s.DependencyReader = dependencyStore - s.DependencyWriter = dependencyStore - return nil -} + s.DependencyReader, err = f.CreateDependencyReader() + require.NoError(t, err) + s.DependencyWriter = s.DependencyReader.(dependencystore.Writer) -func (s *ESStorageIntegration) esRefresh() error { - err := s.bulkProcessor.Flush() - if err != nil { - return err - } - _, err = s.client.Refresh().Do(context.Background()) - return err + s.SamplingStore, err = f.CreateSamplingStore(1) + require.NoError(t, err) } func healthCheck() error { @@ -230,46 +172,34 @@ func healthCheck() error { return errors.New("elastic search is not ready") } -func testElasticsearchStorage(t *testing.T, allTagsAsFields, archive bool) { - if os.Getenv("STORAGE") != "elasticsearch" && os.Getenv("STORAGE") != "opensearch" { - t.Skip("Integration test against ElasticSearch skipped; set STORAGE env var to elasticsearch to run this") - } +func testElasticsearchStorage(t *testing.T, allTagsAsFields bool) { + SkipUnlessEnv(t, "elasticsearch", "opensearch") if err := healthCheck(); err != nil { t.Fatal(err) } s := &ESStorageIntegration{} - require.NoError(t, s.initializeES(allTagsAsFields, archive)) + s.initializeES(t, allTagsAsFields) s.Fixtures = LoadAndParseQueryTestCases(t, "fixtures/queries_es.json") - if archive { - t.Run("ArchiveTrace", s.testArchiveTrace) - } else { - s.IntegrationTestAll(t) - } + s.RunAll(t) } func TestElasticsearchStorage(t *testing.T) { - testElasticsearchStorage(t, false, false) + testElasticsearchStorage(t, false) } func TestElasticsearchStorage_AllTagsAsObjectFields(t *testing.T) { - testElasticsearchStorage(t, true, false) -} - -func TestElasticsearchStorage_Archive(t *testing.T) { - testElasticsearchStorage(t, false, true) + testElasticsearchStorage(t, true) } func TestElasticsearchStorage_IndexTemplates(t *testing.T) { - if os.Getenv("STORAGE") != "elasticsearch" { - t.Skip("Integration test against ElasticSearch skipped; set STORAGE env var to elasticsearch to run this") - } + SkipUnlessEnv(t, "elasticsearch", "opensearch") if err := healthCheck(); err != nil { t.Fatal(err) } s := &ESStorageIntegration{} - require.NoError(t, s.initializeES(true, false)) + s.initializeES(t, true) esVersion, err := s.getVersion() require.NoError(t, err) // TODO abstract this into pkg/es/client.IndexManagementLifecycleAPI @@ -288,34 +218,7 @@ func TestElasticsearchStorage_IndexTemplates(t *testing.T) { require.NoError(t, err) assert.Equal(t, 200, spanTemplateExistsResponse.StatusCode) } - err = s.cleanESIndexTemplates(t, indexPrefix) - require.NoError(t, err) -} - -func (s *ESStorageIntegration) testArchiveTrace(t *testing.T) { - defer s.cleanUp(t) - tID := model.NewTraceID(uint64(11), uint64(22)) - expected := &model.Span{ - OperationName: "archive_span", - StartTime: time.Now().Add(-maxSpanAge * 5), - TraceID: tID, - SpanID: model.NewSpanID(55), - References: []model.SpanRef{}, - Process: model.NewProcess("archived_service", model.KeyValues{}), - } - - require.NoError(t, s.SpanWriter.WriteSpan(context.Background(), expected)) - s.refresh(t) - - var actual *model.Trace - found := s.waitForCondition(t, func(t *testing.T) bool { - var err error - actual, err = s.SpanReader.GetTrace(context.Background(), tID) - return err == nil && len(actual.Spans) == 1 - }) - if !assert.True(t, found) { - CompareTraces(t, &model.Trace{Spans: []*model.Span{expected}}, actual) - } + s.cleanESIndexTemplates(t, indexPrefix) } func (s *ESStorageIntegration) cleanESIndexTemplates(t *testing.T, prefix string) error { diff --git a/plugin/storage/integration/es_index_cleaner_test.go b/plugin/storage/integration/es_index_cleaner_test.go index 6805c99a684..fbc63bc94c9 100644 --- a/plugin/storage/integration/es_index_cleaner_test.go +++ b/plugin/storage/integration/es_index_cleaner_test.go @@ -12,9 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build index_cleaner -// +build index_cleaner - package integration import ( @@ -34,6 +31,7 @@ import ( const ( archiveIndexName = "jaeger-span-archive" dependenciesIndexName = "jaeger-dependencies-2019-01-01" + samplingIndexName = "jaeger-sampling-2019-01-01" spanIndexName = "jaeger-span-2019-01-01" serviceIndexName = "jaeger-service-2019-01-01" indexCleanerImage = "jaegertracing/jaeger-es-index-cleaner:latest" @@ -42,6 +40,7 @@ const ( ) func TestIndexCleaner_doNotFailOnEmptyStorage(t *testing.T) { + SkipUnlessEnv(t, "elasticsearch", "opensearch") client, err := createESClient() require.NoError(t, err) _, err = client.DeleteIndex("*").Do(context.Background()) @@ -61,6 +60,7 @@ func TestIndexCleaner_doNotFailOnEmptyStorage(t *testing.T) { } func TestIndexCleaner_doNotFailOnFullStorage(t *testing.T) { + SkipUnlessEnv(t, "elasticsearch", "opensearch") client, err := createESClient() require.NoError(t, err) tests := []struct { @@ -73,7 +73,8 @@ func TestIndexCleaner_doNotFailOnFullStorage(t *testing.T) { for _, test := range tests { _, err = client.DeleteIndex("*").Do(context.Background()) require.NoError(t, err) - err := createAllIndices(client, "") + // Create Indices with adaptive sampling disabled (set to false). + err := createAllIndices(client, "", false) require.NoError(t, err) err = runEsCleaner(1500, test.envs) require.NoError(t, err) @@ -81,15 +82,17 @@ func TestIndexCleaner_doNotFailOnFullStorage(t *testing.T) { } func TestIndexCleaner(t *testing.T) { + SkipUnlessEnv(t, "elasticsearch", "opensearch") client, err := createESClient() require.NoError(t, err) v8Client, err := createESV8Client() require.NoError(t, err) tests := []struct { - name string - envVars []string - expectedIndices []string + name string + envVars []string + expectedIndices []string + adaptiveSampling bool }{ { name: "RemoveDailyIndices", @@ -99,49 +102,62 @@ func TestIndexCleaner(t *testing.T) { "jaeger-span-000001", "jaeger-service-000001", "jaeger-dependencies-000001", "jaeger-span-000002", "jaeger-service-000002", "jaeger-dependencies-000002", "jaeger-span-archive-000001", "jaeger-span-archive-000002", }, + adaptiveSampling: false, }, { name: "RemoveRolloverIndices", envVars: []string{"ROLLOVER=true"}, expectedIndices: []string{ - archiveIndexName, spanIndexName, serviceIndexName, dependenciesIndexName, + archiveIndexName, spanIndexName, serviceIndexName, dependenciesIndexName, samplingIndexName, "jaeger-span-000002", "jaeger-service-000002", "jaeger-dependencies-000002", "jaeger-span-archive-000001", "jaeger-span-archive-000002", }, + adaptiveSampling: false, }, { name: "RemoveArchiveIndices", envVars: []string{"ARCHIVE=true"}, expectedIndices: []string{ - archiveIndexName, spanIndexName, serviceIndexName, dependenciesIndexName, + archiveIndexName, spanIndexName, serviceIndexName, dependenciesIndexName, samplingIndexName, "jaeger-span-000001", "jaeger-service-000001", "jaeger-dependencies-000001", "jaeger-span-000002", "jaeger-service-000002", "jaeger-dependencies-000002", "jaeger-span-archive-000002", }, + adaptiveSampling: false, + }, + { + name: "RemoveDailyIndices with adaptiveSampling", + envVars: []string{}, + expectedIndices: []string{ + archiveIndexName, + "jaeger-span-000001", "jaeger-service-000001", "jaeger-dependencies-000001", "jaeger-span-000002", "jaeger-service-000002", "jaeger-dependencies-000002", + "jaeger-span-archive-000001", "jaeger-span-archive-000002", "jaeger-sampling-000001", "jaeger-sampling-000002", + }, + adaptiveSampling: true, }, } for _, test := range tests { t.Run(fmt.Sprintf("%s_no_prefix, %s", test.name, test.envVars), func(t *testing.T) { - runIndexCleanerTest(t, client, v8Client, "", test.expectedIndices, test.envVars) + runIndexCleanerTest(t, client, v8Client, "", test.expectedIndices, test.envVars, test.adaptiveSampling) }) t.Run(fmt.Sprintf("%s_prefix, %s", test.name, test.envVars), func(t *testing.T) { - runIndexCleanerTest(t, client, v8Client, indexPrefix, test.expectedIndices, append(test.envVars, "INDEX_PREFIX="+indexPrefix)) + runIndexCleanerTest(t, client, v8Client, indexPrefix, test.expectedIndices, append(test.envVars, "INDEX_PREFIX="+indexPrefix), test.adaptiveSampling) }) } } -func runIndexCleanerTest(t *testing.T, client *elastic.Client, v8Client *elasticsearch8.Client, prefix string, expectedIndices, envVars []string) { +func runIndexCleanerTest(t *testing.T, client *elastic.Client, v8Client *elasticsearch8.Client, prefix string, expectedIndices, envVars []string, adaptiveSampling bool) { // make sure ES is clean _, err := client.DeleteIndex("*").Do(context.Background()) require.NoError(t, err) defer cleanESIndexTemplates(t, client, v8Client, prefix) - err = createAllIndices(client, prefix) + err = createAllIndices(client, prefix, adaptiveSampling) require.NoError(t, err) err = runEsCleaner(0, envVars) require.NoError(t, err) indices, err := client.IndexNames() require.NoError(t, err) if prefix != "" { - prefix = prefix + "-" + prefix += "-" } var expected []string for _, index := range expectedIndices { @@ -150,34 +166,37 @@ func runIndexCleanerTest(t *testing.T, client *elastic.Client, v8Client *elastic assert.ElementsMatch(t, indices, expected, fmt.Sprintf("indices found: %v, expected: %v", indices, expected)) } -func createAllIndices(client *elastic.Client, prefix string) error { +func createAllIndices(client *elastic.Client, prefix string, adaptiveSampling bool) error { prefixWithSeparator := prefix if prefix != "" { - prefixWithSeparator = prefixWithSeparator + "-" + prefixWithSeparator += "-" } // create daily indices and archive index err := createEsIndices(client, []string{ - prefixWithSeparator + spanIndexName, prefixWithSeparator + serviceIndexName, - prefixWithSeparator + dependenciesIndexName, prefixWithSeparator + archiveIndexName, + prefixWithSeparator + spanIndexName, + prefixWithSeparator + serviceIndexName, + prefixWithSeparator + dependenciesIndexName, + prefixWithSeparator + samplingIndexName, + prefixWithSeparator + archiveIndexName, }) if err != nil { return err } // create rollover archive index and roll alias to the new index - err = runEsRollover("init", []string{"ARCHIVE=true", "INDEX_PREFIX=" + prefix}) + err = runEsRollover("init", []string{"ARCHIVE=true", "INDEX_PREFIX=" + prefix}, adaptiveSampling) if err != nil { return err } - err = runEsRollover("rollover", []string{"ARCHIVE=true", "INDEX_PREFIX=" + prefix, rolloverNowEnvVar}) + err = runEsRollover("rollover", []string{"ARCHIVE=true", "INDEX_PREFIX=" + prefix, rolloverNowEnvVar}, adaptiveSampling) if err != nil { return err } // create rollover main indices and roll over to the new index - err = runEsRollover("init", []string{"ARCHIVE=false", "INDEX_PREFIX=" + prefix}) + err = runEsRollover("init", []string{"ARCHIVE=false", "INDEX_PREFIX=" + prefix}, adaptiveSampling) if err != nil { return err } - err = runEsRollover("rollover", []string{"ARCHIVE=false", "INDEX_PREFIX=" + prefix, rolloverNowEnvVar}) + err = runEsRollover("rollover", []string{"ARCHIVE=false", "INDEX_PREFIX=" + prefix, rolloverNowEnvVar}, adaptiveSampling) if err != nil { return err } @@ -205,12 +224,12 @@ func runEsCleaner(days int, envs []string) error { return err } -func runEsRollover(action string, envs []string) error { +func runEsRollover(action string, envs []string, adaptiveSampling bool) error { var dockerEnv string for _, e := range envs { dockerEnv += fmt.Sprintf(" -e %s", e) } - args := fmt.Sprintf("docker run %s --rm --net=host %s %s http://%s", dockerEnv, rolloverImage, action, queryHostPort) + args := fmt.Sprintf("docker run %s --rm --net=host %s %s --adaptive-sampling=%t http://%s", dockerEnv, rolloverImage, action, adaptiveSampling, queryHostPort) cmd := exec.Command("/bin/sh", "-c", args) out, err := cmd.CombinedOutput() fmt.Println(string(out)) diff --git a/plugin/storage/integration/es_index_rollover_test.go b/plugin/storage/integration/es_index_rollover_test.go index a3f1ef99f16..45d874c4475 100644 --- a/plugin/storage/integration/es_index_rollover_test.go +++ b/plugin/storage/integration/es_index_rollover_test.go @@ -12,32 +12,25 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build index_rollover -// +build index_rollover - package integration import ( "context" "fmt" - "os/exec" "strconv" "testing" - elasticsearch8 "github.com/elastic/go-elasticsearch/v8" "github.com/olivere/elastic" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - "github.com/jaegertracing/jaeger/pkg/testutils" ) const ( defaultILMPolicyName = "jaeger-ilm-policy" - rolloverImage = "jaegertracing/jaeger-es-rollover:latest" ) func TestIndexRollover_FailIfILMNotPresent(t *testing.T) { + SkipUnlessEnv(t, "elasticsearch", "opensearch") client, err := createESClient() require.NoError(t, err) esVersion, err := getVersion(client) @@ -48,7 +41,8 @@ func TestIndexRollover_FailIfILMNotPresent(t *testing.T) { // make sure ES is clean cleanES(t, client, defaultILMPolicyName) envVars := []string{"ES_USE_ILM=true"} - err = runEsRollover("init", envVars) + // Run the ES rollover test with adaptive sampling disabled (set to false). + err = runEsRollover("init", envVars, false) require.EqualError(t, err, "exit status 1") indices, err := client.IndexNames() require.NoError(t, err) @@ -56,13 +50,14 @@ func TestIndexRollover_FailIfILMNotPresent(t *testing.T) { } func TestIndexRollover_CreateIndicesWithILM(t *testing.T) { + SkipUnlessEnv(t, "elasticsearch", "opensearch") // Test using the default ILM Policy Name, i.e. do not pass the ES_ILM_POLICY_NAME env var to the rollover script. - t.Run(fmt.Sprintf("DefaultPolicyName"), func(t *testing.T) { + t.Run("DefaultPolicyName", func(t *testing.T) { runCreateIndicesWithILM(t, defaultILMPolicyName) }) // Test using a configured ILM Policy Name, i.e. pass the ES_ILM_POLICY_NAME env var to the rollover script. - t.Run(fmt.Sprintf("SetPolicyName"), func(t *testing.T) { + t.Run("SetPolicyName", func(t *testing.T) { runCreateIndicesWithILM(t, "jaeger-test-policy") }) } @@ -84,7 +79,8 @@ func runCreateIndicesWithILM(t *testing.T, ilmPolicyName string) { if esVersion < 7 { cleanES(t, client, "") - err := runEsRollover("init", envVars) + // Run the ES rollover test with adaptive sampling disabled (set to false). + err := runEsRollover("init", envVars, false) require.EqualError(t, err, "exit status 1") indices, err1 := client.IndexNames() require.NoError(t, err1) @@ -92,18 +88,24 @@ func runCreateIndicesWithILM(t *testing.T, ilmPolicyName string) { } else { expectedIndices := []string{"jaeger-span-000001", "jaeger-service-000001", "jaeger-dependencies-000001"} - t.Run(fmt.Sprintf("NoPrefix"), func(t *testing.T) { - runIndexRolloverWithILMTest(t, client, "", expectedIndices, envVars, ilmPolicyName) + t.Run("NoPrefix", func(t *testing.T) { + runIndexRolloverWithILMTest(t, client, "", expectedIndices, envVars, ilmPolicyName, false) + }) + t.Run("WithPrefix", func(t *testing.T) { + runIndexRolloverWithILMTest(t, client, indexPrefix, expectedIndices, append(envVars, "INDEX_PREFIX="+indexPrefix), ilmPolicyName, false) }) - t.Run(fmt.Sprintf("WithPrefix"), func(t *testing.T) { - runIndexRolloverWithILMTest(t, client, indexPrefix, expectedIndices, append(envVars, "INDEX_PREFIX="+indexPrefix), ilmPolicyName) + t.Run("WithAdaptiveSampling", func(t *testing.T) { + runIndexRolloverWithILMTest(t, client, indexPrefix, expectedIndices, append(envVars, "INDEX_PREFIX="+indexPrefix), ilmPolicyName, true) }) } } -func runIndexRolloverWithILMTest(t *testing.T, client *elastic.Client, prefix string, expectedIndices, envVars []string, ilmPolicyName string) { +func runIndexRolloverWithILMTest(t *testing.T, client *elastic.Client, prefix string, expectedIndices, envVars []string, ilmPolicyName string, adaptiveSampling bool) { writeAliases := []string{"jaeger-service-write", "jaeger-span-write", "jaeger-dependencies-write"} - + if adaptiveSampling { + writeAliases = append(writeAliases, "jaeger-sampling-write") + expectedIndices = append(expectedIndices, "jaeger-sampling-000001") + } // make sure ES is cleaned before test cleanES(t, client, ilmPolicyName) v8Client, err := createESV8Client() @@ -115,7 +117,7 @@ func runIndexRolloverWithILMTest(t *testing.T, client *elastic.Client, prefix st require.NoError(t, err) if prefix != "" { - prefix = prefix + "-" + prefix += "-" } var expected, expectedWriteAliases, actualWriteAliases []string for _, index := range expectedIndices { @@ -126,7 +128,7 @@ func runIndexRolloverWithILMTest(t *testing.T, client *elastic.Client, prefix st } // Run rollover with given EnvVars - err = runEsRollover("init", envVars) + err = runEsRollover("init", envVars, adaptiveSampling) require.NoError(t, err) indices, err := client.IndexNames() @@ -146,31 +148,6 @@ func runIndexRolloverWithILMTest(t *testing.T, client *elastic.Client, prefix st assert.ElementsMatch(t, actualWriteAliases, expectedWriteAliases, fmt.Sprintf("aliases found: %v, expected: %v", actualWriteAliases, expectedWriteAliases)) } -func createESClient() (*elastic.Client, error) { - return elastic.NewClient( - elastic.SetURL(queryURL), - elastic.SetSniff(false)) -} - -func createESV8Client() (*elasticsearch8.Client, error) { - return elasticsearch8.NewClient(elasticsearch8.Config{ - Addresses: []string{queryURL}, - DiscoverNodesOnStart: false, - }) -} - -func runEsRollover(action string, envs []string) error { - var dockerEnv string - for _, e := range envs { - dockerEnv += fmt.Sprintf(" -e %s", e) - } - args := fmt.Sprintf("docker run %s --rm --net=host %s %s http://%s", dockerEnv, rolloverImage, action, queryHostPort) - cmd := exec.Command("/bin/sh", "-c", args) - out, err := cmd.CombinedOutput() - fmt.Println(string(out)) - return err -} - func getVersion(client *elastic.Client) (uint, error) { pingResult, _, err := client.Ping(queryURL).Do(context.Background()) if err != nil { @@ -202,12 +179,3 @@ func cleanES(t *testing.T, client *elastic.Client, policyName string) { _, err = client.IndexDeleteTemplate("*").Do(context.Background()) require.NoError(t, err) } - -func cleanESIndexTemplates(t *testing.T, client *elastic.Client, v8Client *elasticsearch8.Client, prefix string) { - s := &ESStorageIntegration{ - client: client, - v8Client: v8Client, - } - s.logger, _ = testutils.NewLogger() - s.cleanESIndexTemplates(t, prefix) -} diff --git a/plugin/storage/integration/grpc_test.go b/plugin/storage/integration/grpc_test.go index 5b6504561cb..357088a7ccd 100644 --- a/plugin/storage/integration/grpc_test.go +++ b/plugin/storage/integration/grpc_test.go @@ -13,29 +13,20 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build grpc_storage_integration -// +build grpc_storage_integration - package integration import ( - "net" "os" "path" - "sync" "testing" "github.com/stretchr/testify/require" "go.uber.org/zap" - googleGRPC "google.golang.org/grpc" "github.com/jaegertracing/jaeger/pkg/config" "github.com/jaegertracing/jaeger/pkg/metrics" "github.com/jaegertracing/jaeger/pkg/testutils" "github.com/jaegertracing/jaeger/plugin/storage/grpc" - grpcMemory "github.com/jaegertracing/jaeger/plugin/storage/grpc/memory" - "github.com/jaegertracing/jaeger/plugin/storage/grpc/shared" - "github.com/jaegertracing/jaeger/plugin/storage/memory" ) const ( @@ -43,104 +34,55 @@ const ( streamingPluginConfigPath = "fixtures/grpc_plugin_conf.yaml" ) -type gRPCServer struct { - errChan chan error - server *googleGRPC.Server - wg sync.WaitGroup -} - -func newgRPCServer() (*gRPCServer, error) { - return &gRPCServer{errChan: make(chan error, 1)}, nil -} - -func (s *gRPCServer) Restart() error { - // stop the server if one already exists - if s.server != nil { - s.server.GracefulStop() - s.wg.Wait() - select { - case err := <-s.errChan: - return err - default: - } - } - - memStorePlugin := grpcMemory.NewStoragePlugin(memory.NewStore(), memory.NewStore()) - - s.server = googleGRPC.NewServer() - queryPlugin := shared.StorageGRPCPlugin{ - Impl: memStorePlugin, - ArchiveImpl: memStorePlugin, - } - - if err := queryPlugin.RegisterHandlers(s.server); err != nil { - return err - } - - listener, err := net.Listen("tcp", "localhost:2001") - if err != nil { - return err - } - s.wg.Add(1) - go func() { - defer s.wg.Done() - if err = s.server.Serve(listener); err != nil { - select { - case s.errChan <- err: - default: - } - } - }() - return nil -} - type GRPCStorageIntegrationTestSuite struct { StorageIntegration - logger *zap.Logger - flags []string - server *gRPCServer + logger *zap.Logger + flags []string + factory *grpc.Factory + useRemoteStorage bool + remoteStorage *RemoteMemoryStorage } -func (s *GRPCStorageIntegrationTestSuite) initialize() error { +func (s *GRPCStorageIntegrationTestSuite) initialize(t *testing.T) { s.logger, _ = testutils.NewLogger() - if s.server != nil { - if err := s.server.Restart(); err != nil { - return err - } + if s.useRemoteStorage { + s.remoteStorage = StartNewRemoteMemoryStorage(t, s.logger) } f := grpc.NewFactory() v, command := config.Viperize(f.AddFlags) err := command.ParseFlags(s.flags) - if err != nil { - return err - } + require.NoError(t, err) f.InitFromViper(v, zap.NewNop()) - if err := f.Initialize(metrics.NullFactory, s.logger); err != nil { - return err - } + err = f.Initialize(metrics.NullFactory, s.logger) + require.NoError(t, err) + s.factory = f - if s.SpanWriter, err = f.CreateSpanWriter(); err != nil { - return err - } - if s.SpanReader, err = f.CreateSpanReader(); err != nil { - return err - } + s.SpanWriter, err = f.CreateSpanWriter() + require.NoError(t, err) + s.SpanReader, err = f.CreateSpanReader() + require.NoError(t, err) + s.ArchiveSpanReader, err = f.CreateArchiveSpanReader() + require.NoError(t, err) + s.ArchiveSpanWriter, err = f.CreateArchiveSpanWriter() + require.NoError(t, err) // TODO DependencyWriter is not implemented in grpc store - s.Refresh = s.refresh s.CleanUp = s.cleanUp - return nil } -func (s *GRPCStorageIntegrationTestSuite) refresh() error { - return nil +func (s *GRPCStorageIntegrationTestSuite) close(t *testing.T) { + require.NoError(t, s.factory.Close()) + if s.useRemoteStorage { + s.remoteStorage.Close(t) + } } -func (s *GRPCStorageIntegrationTestSuite) cleanUp() error { - return s.initialize() +func (s *GRPCStorageIntegrationTestSuite) cleanUp(t *testing.T) { + s.close(t) + s.initialize(t) } func getPluginFlags(t *testing.T) []string { @@ -157,6 +99,7 @@ func getPluginFlags(t *testing.T) []string { } func TestGRPCStorage(t *testing.T) { + SkipUnlessEnv(t, "grpc") flags := getPluginFlags(t) if configPath := os.Getenv("PLUGIN_CONFIG_PATH"); configPath == "" { t.Log("PLUGIN_CONFIG_PATH env var not set") @@ -167,11 +110,13 @@ func TestGRPCStorage(t *testing.T) { s := &GRPCStorageIntegrationTestSuite{ flags: flags, } - require.NoError(t, s.initialize()) - s.IntegrationTestAll(t) + s.initialize(t) + defer s.close(t) + s.RunAll(t) } func TestGRPCStreamingWriter(t *testing.T) { + SkipUnlessEnv(t, "grpc") flags := getPluginFlags(t) wd, err := os.Getwd() require.NoError(t, err) @@ -182,22 +127,23 @@ func TestGRPCStreamingWriter(t *testing.T) { s := &GRPCStorageIntegrationTestSuite{ flags: flags, } - require.NoError(t, s.initialize()) - s.IntegrationTestAll(t) + s.initialize(t) + defer s.close(t) + s.RunAll(t) } func TestGRPCRemoteStorage(t *testing.T) { + SkipUnlessEnv(t, "grpc") flags := []string{ - "--grpc-storage.server=localhost:2001", + "--grpc-storage.server=localhost:17271", "--grpc-storage.tls.enabled=false", } - server, err := newgRPCServer() - require.NoError(t, err) s := &GRPCStorageIntegrationTestSuite{ - flags: flags, - server: server, + flags: flags, + useRemoteStorage: true, } - require.NoError(t, s.initialize()) - s.IntegrationTestAll(t) + s.initialize(t) + defer s.close(t) + s.RunAll(t) } diff --git a/plugin/storage/integration/integration.go b/plugin/storage/integration/integration.go index 52170a1fb60..b8654e13a1e 100644 --- a/plugin/storage/integration/integration.go +++ b/plugin/storage/integration/integration.go @@ -21,6 +21,7 @@ import ( "embed" "encoding/json" "fmt" + "os" "regexp" "sort" "strings" @@ -46,31 +47,44 @@ const ( //go:embed fixtures var fixtures embed.FS -// StorageIntegration holds components for storage integration test +// StorageIntegration holds components for storage integration test. +// The intended usage is as follows: +// - a specific storage implementation declares its own test functions +// - in those functions it instantiates and populates this struct +// - it then calls RunAll. +// +// Some implementations may declate multuple tests, with different settings, +// and RunAll() under different conditions. type StorageIntegration struct { - SpanWriter spanstore.Writer - SpanReader spanstore.Reader - DependencyWriter dependencystore.Writer - DependencyReader dependencystore.Reader - SamplingStore samplingstore.Store - Fixtures []*QueryFixtures + SpanWriter spanstore.Writer + SpanReader spanstore.Reader + ArchiveSpanReader spanstore.Reader + ArchiveSpanWriter spanstore.Writer + DependencyWriter dependencystore.Writer + DependencyReader dependencystore.Reader + SamplingStore samplingstore.Store + Fixtures []*QueryFixtures // TODO: remove this after all storage backends return spanKind from GetOperations GetOperationsMissingSpanKind bool // TODO: remove this after all storage backends return Source column from GetDependencies + GetDependenciesReturnsSource bool + // Skip Archive Test if not supported by the storage backend + SkipArchiveTest bool + + // TODO: remove this after upstream issue in OTEL jaeger translator is fixed + // Skip testing trace binary tags, logs, and process + SkipBinaryAttrs bool + // List of tests which has to be skipped, it can be regex too. SkipList []string // CleanUp() should ensure that the storage backend is clean before another test. // called either before or after each test, and should be idempotent - CleanUp func() error - - // Refresh() should ensure that the storage backend is up to date before being queried. - // called between set-up and queries in each test - Refresh func() error + CleanUp func(t *testing.T) } // === SpanStore Integration Tests === @@ -90,12 +104,17 @@ type QueryFixtures struct { func (s *StorageIntegration) cleanUp(t *testing.T) { require.NotNil(t, s.CleanUp, "CleanUp function must be provided") - require.NoError(t, s.CleanUp()) + s.CleanUp(t) } -func (s *StorageIntegration) refresh(t *testing.T) { - require.NotNil(t, s.Refresh, "Refresh function must be provided") - require.NoError(t, s.Refresh()) +func SkipUnlessEnv(t *testing.T, storage ...string) { + env := os.Getenv("STORAGE") + for _, s := range storage { + if env == s { + return + } + } + t.Skipf("This test requires environment variable STORAGE=%s", strings.Join(storage, "|")) } func (s *StorageIntegration) skipIfNeeded(t *testing.T) { @@ -127,7 +146,6 @@ func (s *StorageIntegration) testGetServices(t *testing.T) { expected := []string{"example-service-1", "example-service-2", "example-service-3"} s.loadParseAndWriteExampleTrace(t) - s.refresh(t) var actual []string found := s.waitForCondition(t, func(t *testing.T) bool { @@ -144,6 +162,34 @@ func (s *StorageIntegration) testGetServices(t *testing.T) { } } +func (s *StorageIntegration) testArchiveTrace(t *testing.T) { + s.skipIfNeeded(t) + if s.SkipArchiveTest { + t.Skip("Skipping ArchiveTrace test because archive reader or writer is nil") + } + defer s.cleanUp(t) + tID := model.NewTraceID(uint64(11), uint64(22)) + expected := &model.Span{ + OperationName: "archive_span", + StartTime: time.Now().Add(-time.Hour * 72 * 5).Truncate(time.Microsecond), + TraceID: tID, + SpanID: model.NewSpanID(55), + References: []model.SpanRef{}, + Process: model.NewProcess("archived_service", model.KeyValues{}), + } + + require.NoError(t, s.ArchiveSpanWriter.WriteSpan(context.Background(), expected)) + + var actual *model.Trace + found := s.waitForCondition(t, func(t *testing.T) bool { + var err error + actual, err = s.ArchiveSpanReader.GetTrace(context.Background(), tID) + return err == nil && len(actual.Spans) == 1 + }) + require.True(t, found) + CompareTraces(t, &model.Trace{Spans: []*model.Span{expected}}, actual) +} + func (s *StorageIntegration) testGetLargeSpan(t *testing.T) { s.skipIfNeeded(t) defer s.cleanUp(t) @@ -151,7 +197,6 @@ func (s *StorageIntegration) testGetLargeSpan(t *testing.T) { t.Log("Testing Large Trace over 10K ...") expected := s.loadParseAndWriteLargeTrace(t) expectedTraceID := expected.Spans[0].TraceID - s.refresh(t) var actual *model.Trace found := s.waitForCondition(t, func(t *testing.T) bool { @@ -183,7 +228,6 @@ func (s *StorageIntegration) testGetOperations(t *testing.T) { } } s.loadParseAndWriteExampleTrace(t) - s.refresh(t) var actual []spanstore.Operation found := s.waitForCondition(t, func(t *testing.T) bool { @@ -209,7 +253,6 @@ func (s *StorageIntegration) testGetTrace(t *testing.T) { expected := s.loadParseAndWriteExampleTrace(t) expectedTraceID := expected.Spans[0].TraceID - s.refresh(t) var actual *model.Trace found := s.waitForCondition(t, func(t *testing.T) bool { @@ -225,7 +268,7 @@ func (s *StorageIntegration) testGetTrace(t *testing.T) { } t.Run("NotFound error", func(t *testing.T) { - fakeTraceID := model.TraceID{High: 0, Low: 0} + fakeTraceID := model.TraceID{High: 0, Low: 1} trace, err := s.SpanReader.GetTrace(context.Background(), fakeTraceID) assert.Equal(t, spanstore.ErrTraceNotFound, err) assert.Nil(t, trace) @@ -249,15 +292,13 @@ func (s *StorageIntegration) testFindTraces(t *testing.T) { trace, ok := allTraceFixtures[traceFixture] if !ok { trace = s.getTraceFixture(t, traceFixture) - err := s.writeTrace(t, trace) - require.NoError(t, err, "Unexpected error when writing trace %s to storage", traceFixture) + s.writeTrace(t, trace) allTraceFixtures[traceFixture] = trace } expected = append(expected, trace) } expectedTracesPerTestCase = append(expectedTracesPerTestCase, expected) } - s.refresh(t) for i, queryTestCase := range s.Fixtures { t.Run(queryTestCase.Caption, func(t *testing.T) { s.skipIfNeeded(t) @@ -275,29 +316,29 @@ func (s *StorageIntegration) findTracesByQuery(t *testing.T, query *spanstore.Tr traces, err = s.SpanReader.FindTraces(context.Background(), query) require.NoError(t, err) if len(expected) != len(traces) { - t.Logf("FindTraces: expected: %d, actual: %d", len(expected), len(traces)) + t.Logf("Expecting certain number of traces: expected: %d, actual: %d", len(expected), len(traces)) + return false + } + if spanCount(expected) != spanCount(traces) { + t.Logf("Excepting certain number of spans: expected: %d, actual: %d", spanCount(expected), spanCount(traces)) return false } return true }) require.True(t, found) - tracesMatch(t, traces, expected) return traces } -func (s *StorageIntegration) writeTrace(t *testing.T, trace *model.Trace) error { +func (s *StorageIntegration) writeTrace(t *testing.T, trace *model.Trace) { for _, span := range trace.Spans { - if err := s.SpanWriter.WriteSpan(context.Background(), span); err != nil { - return err - } + err := s.SpanWriter.WriteSpan(context.Background(), span) + require.NoError(t, err, "Not expecting error when writing trace to storage") } - return nil } func (s *StorageIntegration) loadParseAndWriteExampleTrace(t *testing.T) *model.Trace { trace := s.getTraceFixture(t, "example_trace") - err := s.writeTrace(t, trace) - require.NoError(t, err, "Not expecting error when writing example_trace to storage") + s.writeTrace(t, trace) return trace } @@ -314,14 +355,45 @@ func (s *StorageIntegration) loadParseAndWriteLargeTrace(t *testing.T) *model.Tr s.StartTime = s.StartTime.Add(time.Second * time.Duration(i+1)) trace.Spans = append(trace.Spans, s) } - err := s.writeTrace(t, trace) - require.NoError(t, err, "Not expecting error when writing example_trace to storage") + s.writeTrace(t, trace) return trace } func (s *StorageIntegration) getTraceFixture(t *testing.T, fixture string) *model.Trace { fileName := fmt.Sprintf("fixtures/traces/%s.json", fixture) - return getTraceFixtureExact(t, fileName) + trace := getTraceFixtureExact(t, fileName) + + if s.SkipBinaryAttrs { + t.Logf("Dropped binary type attributes from trace ID: %s", trace.Spans[0].TraceID.String()) + trace = s.dropBinaryAttrs(t, trace) + } + + return trace +} + +func (s *StorageIntegration) dropBinaryAttrs(t *testing.T, trace *model.Trace) *model.Trace { + for _, span := range trace.Spans { + span.Tags = s.dropBinaryTags(t, span.Tags) + span.Process.Tags = s.dropBinaryTags(t, span.Process.Tags) + + for i := range span.Logs { + span.Logs[i].Fields = s.dropBinaryTags(t, span.Logs[i].Fields) + } + } + + return trace +} + +func (s *StorageIntegration) dropBinaryTags(_ *testing.T, tags []model.KeyValue) []model.KeyValue { + newTags := make([]model.KeyValue, 0) + for _, tag := range tags { + if tag.VType == model.ValueType_BINARY { + continue + } + newTags = append(newTags, tag) + } + + return newTags } func getTraceFixtureExact(t *testing.T, fileName string) *model.Trace { @@ -364,13 +436,6 @@ func correctTime(json []byte) []byte { return []byte(retString) } -func tracesMatch(t *testing.T, actual []*model.Trace, expected []*model.Trace) bool { - if !assert.Equal(t, len(expected), len(actual), "Expecting certain number of traces") { - return false - } - return assert.Equal(t, spanCount(expected), spanCount(actual), "Expecting certain number of spans") -} - func spanCount(traces []*model.Trace) int { var count int for _, trace := range traces { @@ -411,13 +476,22 @@ func (s *StorageIntegration) testGetDependencies(t *testing.T) { } require.NoError(t, s.DependencyWriter.WriteDependencies(time.Now(), expected)) - s.refresh(t) - actual, err := s.DependencyReader.GetDependencies(context.Background(), time.Now(), 5*time.Minute) - require.NoError(t, err) - sort.Slice(actual, func(i, j int) bool { - return actual[i].Parent < actual[j].Parent + + var actual []model.DependencyLink + found := s.waitForCondition(t, func(t *testing.T) bool { + var err error + actual, err = s.DependencyReader.GetDependencies(context.Background(), time.Now(), 5*time.Minute) + require.NoError(t, err) + sort.Slice(actual, func(i, j int) bool { + return actual[i].Parent < actual[j].Parent + }) + return assert.ObjectsAreEqualValues(expected, actual) }) - assert.EqualValues(t, expected, actual) + + if !assert.True(t, found) { + t.Log("\t Expected:", expected) + t.Log("\t Actual :", actual) + } } // === Sampling Store Integration Tests === @@ -478,14 +552,20 @@ func (s *StorageIntegration) insertThroughput(t *testing.T) { require.NoError(t, err) } -// IntegrationTestAll runs all integration tests -func (s *StorageIntegration) IntegrationTestAll(t *testing.T) { +// RunAll runs all integration tests +func (s *StorageIntegration) RunAll(t *testing.T) { + s.RunSpanStoreTests(t) + t.Run("ArchiveTrace", s.testArchiveTrace) + t.Run("GetDependencies", s.testGetDependencies) + t.Run("GetThroughput", s.testGetThroughput) + t.Run("GetLatestProbability", s.testGetLatestProbability) +} + +// RunTestSpanstore runs only span related integration tests +func (s *StorageIntegration) RunSpanStoreTests(t *testing.T) { t.Run("GetServices", s.testGetServices) t.Run("GetOperations", s.testGetOperations) t.Run("GetTrace", s.testGetTrace) t.Run("GetLargeSpans", s.testGetLargeSpan) t.Run("FindTraces", s.testFindTraces) - t.Run("GetDependencies", s.testGetDependencies) - t.Run("GetThroughput", s.testGetThroughput) - t.Run("GetLatestProbability", s.testGetLatestProbability) } diff --git a/plugin/storage/integration/kafka_test.go b/plugin/storage/integration/kafka_test.go index 9026631c031..9a315beb18b 100644 --- a/plugin/storage/integration/kafka_test.go +++ b/plugin/storage/integration/kafka_test.go @@ -16,7 +16,6 @@ package integration import ( "context" - "os" "strconv" "testing" "time" @@ -44,7 +43,7 @@ type KafkaIntegrationTestSuite struct { logger *zap.Logger } -func (s *KafkaIntegrationTestSuite) initialize() error { +func (s *KafkaIntegrationTestSuite) initialize(t *testing.T) { s.logger, _ = testutils.NewLogger() const encoding = "json" const groupID = "kafka-integration-test" @@ -62,17 +61,13 @@ func (s *KafkaIntegrationTestSuite) initialize() error { "--kafka.producer.encoding", encoding, }) - if err != nil { - return err - } + require.NoError(t, err) f.InitFromViper(v, zap.NewNop()) - if err := f.Initialize(metrics.NullFactory, s.logger); err != nil { - return err - } + err = f.Initialize(metrics.NullFactory, s.logger) + require.NoError(t, err) + spanWriter, err := f.CreateSpanWriter() - if err != nil { - return err - } + require.NoError(t, err) v, command = config.Viperize(app.AddFlags) err = command.ParseFlags([]string{ @@ -89,9 +84,7 @@ func (s *KafkaIntegrationTestSuite) initialize() error { "--ingester.parallelism", "1000", }) - if err != nil { - return err - } + require.NoError(t, err) options := app.Options{ Configuration: consumer.Configuration{ InitialOffset: sarama.OffsetOldest, @@ -100,16 +93,13 @@ func (s *KafkaIntegrationTestSuite) initialize() error { options.InitFromViper(v) traceStore := memory.NewStore() spanConsumer, err := builder.CreateConsumer(s.logger, metrics.NullFactory, traceStore, options) - if err != nil { - return err - } + require.NoError(t, err) spanConsumer.Start() s.SpanWriter = spanWriter s.SpanReader = &ingester{traceStore} - s.Refresh = func() error { return nil } - s.CleanUp = func() error { return nil } - return nil + s.CleanUp = func(_ *testing.T) {} + s.SkipArchiveTest = true } // The ingester consumes spans from kafka and writes them to an in-memory traceStore @@ -141,10 +131,8 @@ func (r *ingester) FindTraceIDs(ctx context.Context, query *spanstore.TraceQuery } func TestKafkaStorage(t *testing.T) { - if os.Getenv("STORAGE") != "kafka" { - t.Skip("Integration test against kafka skipped; set STORAGE env var to kafka to run this") - } + SkipUnlessEnv(t, "kafka") s := &KafkaIntegrationTestSuite{} - require.NoError(t, s.initialize()) + s.initialize(t) t.Run("GetTrace", s.testGetTrace) } diff --git a/plugin/storage/integration/memstore_test.go b/plugin/storage/integration/memstore_test.go index e470a935b19..b55bbea7462 100644 --- a/plugin/storage/integration/memstore_test.go +++ b/plugin/storage/integration/memstore_test.go @@ -12,15 +12,12 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -//go:build memory_storage_integration -// +build memory_storage_integration package integration import ( "testing" - "github.com/stretchr/testify/require" "go.uber.org/zap" "github.com/jaegertracing/jaeger/pkg/testutils" @@ -32,31 +29,25 @@ type MemStorageIntegrationTestSuite struct { logger *zap.Logger } -func (s *MemStorageIntegrationTestSuite) initialize() error { +func (s *MemStorageIntegrationTestSuite) initialize(_ *testing.T) { s.logger, _ = testutils.NewLogger() store := memory.NewStore() + archiveStore := memory.NewStore() s.SamplingStore = memory.NewSamplingStore(2) s.SpanReader = store s.SpanWriter = store + s.ArchiveSpanReader = archiveStore + s.ArchiveSpanWriter = archiveStore // TODO DependencyWriter is not implemented in memory store - s.Refresh = s.refresh - s.CleanUp = s.cleanUp - return nil -} - -func (s *MemStorageIntegrationTestSuite) refresh() error { - return nil -} - -func (s *MemStorageIntegrationTestSuite) cleanUp() error { - return s.initialize() + s.CleanUp = s.initialize } func TestMemoryStorage(t *testing.T) { + SkipUnlessEnv(t, "memory") s := &MemStorageIntegrationTestSuite{} - require.NoError(t, s.initialize()) - s.IntegrationTestAll(t) + s.initialize(t) + s.RunAll(t) } diff --git a/plugin/storage/integration/remote_memory_storage.go b/plugin/storage/integration/remote_memory_storage.go new file mode 100644 index 00000000000..a15020e4ae5 --- /dev/null +++ b/plugin/storage/integration/remote_memory_storage.go @@ -0,0 +1,55 @@ +// Copyright (c) 2024 The Jaeger Authors. +// SPDX-License-Identifier: Apache-2.0 + +package integration + +import ( + "os" + "testing" + + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "github.com/jaegertracing/jaeger/cmd/remote-storage/app" + "github.com/jaegertracing/jaeger/pkg/config" + "github.com/jaegertracing/jaeger/pkg/healthcheck" + "github.com/jaegertracing/jaeger/pkg/metrics" + "github.com/jaegertracing/jaeger/pkg/tenancy" + "github.com/jaegertracing/jaeger/plugin/storage" + "github.com/jaegertracing/jaeger/ports" +) + +type RemoteMemoryStorage struct { + server *app.Server + storageFactory *storage.Factory +} + +func StartNewRemoteMemoryStorage(t *testing.T, logger *zap.Logger) *RemoteMemoryStorage { + opts := &app.Options{ + GRPCHostPort: ports.PortToHostPort(ports.RemoteStorageGRPC), + Tenancy: tenancy.Options{ + Enabled: false, + }, + } + tm := tenancy.NewManager(&opts.Tenancy) + storageFactory, err := storage.NewFactory(storage.FactoryConfigFromEnvAndCLI(os.Args, os.Stderr)) + require.NoError(t, err) + + v, _ := config.Viperize(storageFactory.AddFlags) + storageFactory.InitFromViper(v, logger) + require.NoError(t, storageFactory.Initialize(metrics.NullFactory, logger)) + + server, err := app.NewServer(opts, storageFactory, tm, logger, healthcheck.New()) + require.NoError(t, err) + require.NoError(t, server.Start()) + + return &RemoteMemoryStorage{ + server: server, + storageFactory: storageFactory, + } +} + +func (s *RemoteMemoryStorage) Close(t *testing.T) { + require.NoError(t, s.server.Close()) + require.NoError(t, s.storageFactory.Close()) +} diff --git a/plugin/storage/kafka/options.go b/plugin/storage/kafka/options.go index 02a45c638b2..635e7526d63 100644 --- a/plugin/storage/kafka/options.go +++ b/plugin/storage/kafka/options.go @@ -47,6 +47,7 @@ const ( suffixBatchSize = ".batch-size" suffixBatchMinMessages = ".batch-min-messages" suffixBatchMaxMessages = ".batch-max-messages" + suffixMaxMessageBytes = ".max-message-bytes" defaultBroker = "127.0.0.1:9092" defaultTopic = "jaeger-spans" @@ -58,6 +59,7 @@ const ( defaultBatchSize = 0 defaultBatchMinMessages = 0 defaultBatchMaxMessages = 0 + defaultMaxMessageBytes = 1000000 // https://github.com/IBM/sarama/blob/main/config.go#L177 ) var ( @@ -152,6 +154,11 @@ func (opt *Options) AddFlags(flagSet *flag.FlagSet) { defaultBatchMaxMessages, "(experimental) Maximum number of message to batch before sending records to Kafka", ) + flagSet.Int( + configPrefix+suffixMaxMessageBytes, + defaultMaxMessageBytes, + "(experimental) The maximum permitted size of a message. Should be set equal to or smaller than the broker's `message.max.bytes`.", + ) flagSet.String( configPrefix+suffixBrokers, defaultBroker, @@ -207,6 +214,7 @@ func (opt *Options) InitFromViper(v *viper.Viper) { BatchSize: v.GetInt(configPrefix + suffixBatchSize), BatchMinMessages: v.GetInt(configPrefix + suffixBatchMinMessages), BatchMaxMessages: v.GetInt(configPrefix + suffixBatchMaxMessages), + MaxMessageBytes: v.GetInt(configPrefix + suffixMaxMessageBytes), } opt.Topic = v.GetString(configPrefix + suffixTopic) opt.Encoding = v.GetString(configPrefix + suffixEncoding) diff --git a/plugin/storage/kafka/options_test.go b/plugin/storage/kafka/options_test.go index c34bd70a82e..790cbab35ee 100644 --- a/plugin/storage/kafka/options_test.go +++ b/plugin/storage/kafka/options_test.go @@ -42,6 +42,7 @@ func TestOptionsWithFlags(t *testing.T) { "--kafka.producer.batch-size=128000", "--kafka.producer.batch-min-messages=50", "--kafka.producer.batch-max-messages=100", + "--kafka.producer.max-message-bytes=10485760", }) opts.InitFromViper(v) @@ -55,6 +56,8 @@ func TestOptionsWithFlags(t *testing.T) { assert.Equal(t, time.Duration(1*time.Second), opts.Config.BatchLinger) assert.Equal(t, 50, opts.Config.BatchMinMessages) assert.Equal(t, 100, opts.Config.BatchMaxMessages) + assert.Equal(t, 100, opts.Config.BatchMaxMessages) + assert.Equal(t, 10485760, opts.Config.MaxMessageBytes) } func TestFlagDefaults(t *testing.T) { @@ -73,6 +76,7 @@ func TestFlagDefaults(t *testing.T) { assert.Equal(t, time.Duration(0*time.Second), opts.Config.BatchLinger) assert.Equal(t, 0, opts.Config.BatchMinMessages) assert.Equal(t, 0, opts.Config.BatchMaxMessages) + assert.Equal(t, defaultMaxMessageBytes, opts.Config.MaxMessageBytes) } func TestCompressionLevelDefaults(t *testing.T) { diff --git a/scripts/check-goleak-files.sh b/scripts/check-goleak-files.sh index 278db449139..47d61295a01 100755 --- a/scripts/check-goleak-files.sh +++ b/scripts/check-goleak-files.sh @@ -3,6 +3,7 @@ set -euo pipefail bad_pkgs=0 +failed_pkgs=0 # shellcheck disable=SC2048 for dir in $*; do @@ -12,10 +13,10 @@ for dir in $*; do testFiles=$(find "${dir}" -maxdepth 1 -name '*_test.go') if [[ -z "$testFiles" ]]; then continue - fi + fi good=0 for test in ${testFiles}; do - if grep -q "TestMain" "${test}" | grep -q "testutils.VerifyGoLeaks" "${test}"; then + if grep -q "TestMain" "${test}" && grep -q "testutils.VerifyGoLeaks" "${test}"; then good=1 break fi @@ -23,12 +24,24 @@ for dir in $*; do if ((good == 0)); then echo "🔴 Error(check-goleak): no goleak check in package ${dir}" ((bad_pkgs+=1)) + if [[ "${dir}" == "./cmd/jaeger/internal/integration/" || "${dir}" == "./plugin/storage/integration/" ]]; then + echo " this package is temporarily allowed and will not cause linter failure" + else + ((failed_pkgs+=1)) + fi fi done -if ((bad_pkgs > 0)); then - echo "Error(check-goleak): no goleak check in ${bad_pkgs} package(s)." - echo "See https://github.com/jaegertracing/jaeger/pull/5010/files for example of adding the checks." - echo "In the future this will be a fatal error in the CI." - exit 0 # TODO change to 1 in the future +function help() { + echo " See https://github.com/jaegertracing/jaeger/pull/5010/files" + echo " for examples of adding the checks." +} + +if ((failed_pkgs > 0)); then + echo "⛔ Fatal(check-goleak): no goleak check in ${bad_pkgs} package(s), ${failed_pkgs} of which not allowed." + help + exit 1 +elif ((bad_pkgs > 0)); then + echo "🐞 Warning(check-goleak): no goleak check in ${bad_pkgs} package(s)." + help fi diff --git a/scripts/release-notes.py b/scripts/release-notes.py index a30fd7cfb2d..2d2f67a9a95 100755 --- a/scripts/release-notes.py +++ b/scripts/release-notes.py @@ -156,13 +156,20 @@ def main(token, repo, num_commits, exclude_dependabot): # Print categorized pull requests print() + print('### Backend Changes') + print() for category, results in category_results.items(): if results and category: - print(f'{category}:\n') + print(f'{category}\n') for result in results: print(result) print() + print() + print('### 📊 UI Changes') + print() + print('* UI pinned to version [x.y.z](https://github.com/jaegertracing/jaeger-ui/blob/main/CHANGELOG.md#---ANCHOR---).') + # Print pull requests in the 'UNCATTEGORIZED' category if other_results: print(f'#### 💩💩💩 The following commits cannot be categorized (missing changeglog labels):\n') @@ -180,7 +187,7 @@ def main(token, repo, num_commits, exclude_dependabot): print() if skipped_dependabot: - print(f"(Skipped {skipped_dependabot} dependabot commit{'' if skipped_dependabot == 1 else 's'})") + print(f"(Skipped dependabot commits: {skipped_dependabot})") def get_pull_request_labels(token, repo, pull_number): diff --git a/storage/factory.go b/storage/factory.go index f6d3394840b..b56e6fdc07f 100644 --- a/storage/factory.go +++ b/storage/factory.go @@ -23,7 +23,7 @@ import ( "github.com/jaegertracing/jaeger/pkg/distributedlock" "github.com/jaegertracing/jaeger/pkg/metrics" "github.com/jaegertracing/jaeger/storage/dependencystore" - metricsstore "github.com/jaegertracing/jaeger/storage/metricsstore" + "github.com/jaegertracing/jaeger/storage/metricsstore" "github.com/jaegertracing/jaeger/storage/samplingstore" "github.com/jaegertracing/jaeger/storage/spanstore" ) @@ -49,6 +49,13 @@ type Factory interface { CreateDependencyReader() (dependencystore.Reader, error) } +// Purger defines an interface that is capable of purging the storage. +// Only meant to be used from integration tests. +type Purger interface { + // Purge removes all data from the storage. + Purge() error +} + // SamplingStoreFactory defines an interface that is capable of returning the necessary backends for // adaptive sampling. type SamplingStoreFactory interface { diff --git a/storage/spanstore/downsampling_writer_test.go b/storage/spanstore/downsampling_writer_test.go index 42c3b0a7c70..aaa085c69eb 100644 --- a/storage/spanstore/downsampling_writer_test.go +++ b/storage/spanstore/downsampling_writer_test.go @@ -70,6 +70,7 @@ func TestDownSamplingWriter_hashBytes(t *testing.T) { } c := NewDownsamplingWriter(&noopWriteSpanStore{}, downsamplingOptions) h := c.sampler.hasherPool.Get().(*hasher) + //nolint:testifylint assert.Equal(t, h.hashBytes(), h.hashBytes()) c.sampler.hasherPool.Put(h) trace := model.TraceID{ @@ -81,6 +82,7 @@ func TestDownSamplingWriter_hashBytes(t *testing.T) { } _, _ = span.TraceID.MarshalTo(h.buffer) // Same traceID should always be hashed to same uint64 in DownSamplingWriter. + //nolint:testifylint assert.Equal(t, h.hashBytes(), h.hashBytes()) }