diff --git a/.codecov.yml b/.codecov.yml index 248475af4b933..3b23a3da8bf86 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -186,6 +186,10 @@ coverage: target: 75 flags: - druid + DuckDB: + target: 75 + flags: + - duckdb EKS_Fargate: target: 75 flags: @@ -310,6 +314,10 @@ coverage: target: 75 flags: - karpenter + Keda: + target: 75 + flags: + - keda Kong: target: 75 flags: @@ -410,6 +418,10 @@ coverage: target: 75 flags: - mesos_master + Milvus: + target: 75 + flags: + - milvus MongoDB: target: 75 flags: @@ -582,6 +594,10 @@ coverage: target: 75 flags: - strimzi + Supabase: + target: 75 + flags: + - supabase Supervisord: target: 75 flags: @@ -962,6 +978,11 @@ flags: paths: - druid/datadog_checks/druid - druid/tests + duckdb: + carryforward: true + paths: + - duckdb/datadog_checks/duckdb + - duckdb/tests ecs_fargate: carryforward: true paths: @@ -1132,6 +1153,11 @@ flags: paths: - karpenter/datadog_checks/karpenter - karpenter/tests + keda: + carryforward: true + paths: + - keda/datadog_checks/keda + - keda/tests kong: carryforward: true paths: @@ -1262,6 +1288,11 @@ flags: paths: - mesos_slave/datadog_checks/mesos_slave - mesos_slave/tests + milvus: + carryforward: true + paths: + - milvus/datadog_checks/milvus + - milvus/tests mongo: carryforward: true paths: @@ -1482,6 +1513,11 @@ flags: paths: - strimzi/datadog_checks/strimzi - strimzi/tests + supabase: + carryforward: true + paths: + - supabase/datadog_checks/supabase + - supabase/tests supervisord: carryforward: true paths: diff --git a/.github/workflows/config/labeler.yml b/.github/workflows/config/labeler.yml index 1978013e24976..6084caed5feff 100644 --- a/.github/workflows/config/labeler.yml +++ b/.github/workflows/config/labeler.yml @@ -55,6 +55,8 @@ integration/amazon_msk: - amazon_msk/**/* integration/ambari: - ambari/**/* +integration/anthropic: +- anthropic/**/* integration/anyscale: - anyscale/**/* integration/apache: @@ -175,6 +177,8 @@ integration/dotnetclr: - dotnetclr/**/* integration/druid: - druid/**/* +integration/duckdb: +- duckdb/**/* integration/ecs_fargate: - ecs_fargate/**/* integration/eks_anywhere: @@ -285,6 +289,8 @@ integration/kafka_consumer: - kafka_consumer/**/* integration/karpenter: - karpenter/**/* +integration/keda: +- keda/**/* integration/kong: - kong/**/* integration/kube_apiserver_metrics: @@ -325,8 +331,6 @@ integration/kyverno: - kyverno/**/* integration/langchain: - langchain/**/* -integration/anthropic: -- anthropic/**/* integration/lastpass: - lastpass/**/* integration/lighttpd: @@ -353,6 +357,8 @@ integration/mesos_slave: - mesos_slave/**/* integration/metabase: - metabase/**/* +integration/milvus: +- milvus/**/* integration/mimecast: - mimecast/**/* integration/mongo: @@ -521,6 +527,8 @@ integration/streamnative: - streamnative/**/* integration/strimzi: - strimzi/**/* +integration/supabase: +- supabase/**/* integration/supervisord: - supervisord/**/* integration/suricata: diff --git a/.github/workflows/test-all.yml b/.github/workflows/test-all.yml index d1701ec09df8b..4d0f92ea74c8b 100644 --- a/.github/workflows/test-all.yml +++ b/.github/workflows/test-all.yml @@ -1154,6 +1154,26 @@ jobs: minimum-base-package: ${{ inputs.minimum-base-package }} pytest-args: ${{ inputs.pytest-args }} secrets: inherit + j9eb6aa6: + uses: ./.github/workflows/test-target.yml + with: + job-name: DuckDB + target: duckdb + platform: linux + runner: '["ubuntu-22.04"]' + repo: "${{ inputs.repo }}" + python-version: "${{ inputs.python-version }}" + standard: ${{ inputs.standard }} + latest: ${{ inputs.latest }} + agent-image: "${{ inputs.agent-image }}" + agent-image-py2: "${{ inputs.agent-image-py2 }}" + agent-image-windows: "${{ inputs.agent-image-windows }}" + agent-image-windows-py2: "${{ inputs.agent-image-windows-py2 }}" + test-py2: ${{ inputs.test-py2 }} + test-py3: ${{ inputs.test-py3 }} + minimum-base-package: ${{ inputs.minimum-base-package }} + pytest-args: ${{ inputs.pytest-args }} + secrets: inherit j562bfe5: uses: ./.github/workflows/test-target.yml with: @@ -1974,6 +1994,26 @@ jobs: minimum-base-package: ${{ inputs.minimum-base-package }} pytest-args: ${{ inputs.pytest-args }} secrets: inherit + j2cb7ef0: + uses: ./.github/workflows/test-target.yml + with: + job-name: Keda + target: keda + platform: linux + runner: '["ubuntu-22.04"]' + repo: "${{ inputs.repo }}" + python-version: "${{ inputs.python-version }}" + standard: ${{ inputs.standard }} + latest: ${{ inputs.latest }} + agent-image: "${{ inputs.agent-image }}" + agent-image-py2: "${{ inputs.agent-image-py2 }}" + agent-image-windows: "${{ inputs.agent-image-windows }}" + agent-image-windows-py2: "${{ inputs.agent-image-windows-py2 }}" + test-py2: ${{ inputs.test-py2 }} + test-py3: ${{ inputs.test-py3 }} + minimum-base-package: ${{ inputs.minimum-base-package }} + pytest-args: ${{ inputs.pytest-args }} + secrets: inherit jaee58c5: uses: ./.github/workflows/test-target.yml with: @@ -2494,6 +2534,26 @@ jobs: minimum-base-package: ${{ inputs.minimum-base-package }} pytest-args: ${{ inputs.pytest-args }} secrets: inherit + j062aeb0: + uses: ./.github/workflows/test-target.yml + with: + job-name: Milvus + target: milvus + platform: linux + runner: '["ubuntu-22.04"]' + repo: "${{ inputs.repo }}" + python-version: "${{ inputs.python-version }}" + standard: ${{ inputs.standard }} + latest: ${{ inputs.latest }} + agent-image: "${{ inputs.agent-image }}" + agent-image-py2: "${{ inputs.agent-image-py2 }}" + agent-image-windows: "${{ inputs.agent-image-windows }}" + agent-image-windows-py2: "${{ inputs.agent-image-windows-py2 }}" + test-py2: ${{ inputs.test-py2 }} + test-py3: ${{ inputs.test-py3 }} + minimum-base-package: ${{ inputs.minimum-base-package }} + pytest-args: ${{ inputs.pytest-args }} + secrets: inherit j91231ff: uses: ./.github/workflows/test-target.yml with: @@ -3454,6 +3514,26 @@ jobs: minimum-base-package: ${{ inputs.minimum-base-package }} pytest-args: ${{ inputs.pytest-args }} secrets: inherit + jfe7ceb7: + uses: ./.github/workflows/test-target.yml + with: + job-name: Supabase + target: supabase + platform: linux + runner: '["ubuntu-22.04"]' + repo: "${{ inputs.repo }}" + python-version: "${{ inputs.python-version }}" + standard: ${{ inputs.standard }} + latest: ${{ inputs.latest }} + agent-image: "${{ inputs.agent-image }}" + agent-image-py2: "${{ inputs.agent-image-py2 }}" + agent-image-windows: "${{ inputs.agent-image-windows }}" + agent-image-windows-py2: "${{ inputs.agent-image-windows-py2 }}" + test-py2: ${{ inputs.test-py2 }} + test-py3: ${{ inputs.test-py3 }} + minimum-base-package: ${{ inputs.minimum-base-package }} + pytest-args: ${{ inputs.pytest-args }} + secrets: inherit jf04a052: uses: ./.github/workflows/test-target.yml with: diff --git a/.github/workflows/test-fips.yml b/.github/workflows/test-fips.yml new file mode 100644 index 0000000000000..663bf7b45b273 --- /dev/null +++ b/.github/workflows/test-fips.yml @@ -0,0 +1,151 @@ +name: Test FIPS E2E + +on: + workflow_dispatch: + inputs: + agent-image: + description: "Agent image to use" + required: false + type: string + target: + description: "Target to test" + required: false + type: string + pull_request: + path: + - datadog_checks_base/datadog_checks/** + schedule: + - cron: '0 0,8,16 * * *' + +defaults: + run: + shell: bash + +jobs: + run: + name: "Test FIPS" + runs-on: ["ubuntu-22.04"] + + env: + FORCE_COLOR: "1" + PYTHON_VERSION: "3.12" + DDEV_E2E_AGENT: "${{ inputs.agent-image || 'datadog/agent-dev:master-fips' }}" + # Test results for later processing + TEST_RESULTS_BASE_DIR: "test-results" + # Tracing to monitor our test suite + DD_ENV: "ci" + DD_SERVICE: "ddev-integrations-core" + DD_TAGS: "team:agent-integrations" + DD_TRACE_ANALYTICS_ENABLED: "true" + # Capture traces for a separate job to do the submission + TRACE_CAPTURE_BASE_DIR: "trace-captures" + TRACE_CAPTURE_LOG: "trace-captures/output.log" + + steps: + + - name: Set environment variables with sanitized paths + run: | + JOB_NAME="test-fips" + + echo "TEST_RESULTS_DIR=$TEST_RESULTS_BASE_DIR/$JOB_NAME" >> $GITHUB_ENV + echo "TRACE_CAPTURE_FILE=$TRACE_CAPTURE_BASE_DIR/$JOB_NAME" >> $GITHUB_ENV + + - uses: actions/checkout@v4 + + - name: Set up Python ${{ env.PYTHON_VERSION }} + uses: actions/setup-python@v5 + with: + python-version: "${{ env.PYTHON_VERSION }}" + cache: 'pip' + + - name: Restore cache + uses: actions/cache/restore@v4 + with: + path: '~/.cache/pip' + key: >- + ${{ format( + 'v01-python-{0}-{1}-{2}-{3}', + env.pythonLocation, + hashFiles('datadog_checks_base/pyproject.toml'), + hashFiles('datadog_checks_dev/pyproject.toml'), + hashFiles('ddev/pyproject.toml') + )}} + restore-keys: |- + v01-python-${{ env.pythonLocation }} + + - name: Install ddev from local folder + run: |- + pip install -e ./datadog_checks_dev[cli] + pip install -e ./ddev + + - name: Configure ddev + run: |- + ddev config set repos.core . + ddev config set repo core + + - name: Prepare for testing + env: + PYTHONUNBUFFERED: "1" + DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} + DOCKER_ACCESS_TOKEN: ${{ secrets.DOCKER_ACCESS_TOKEN }} + ORACLE_DOCKER_USERNAME: ${{ secrets.ORACLE_DOCKER_USERNAME }} + ORACLE_DOCKER_PASSWORD: ${{ secrets.ORACLE_DOCKER_PASSWORD }} + SINGLESTORE_LICENSE: ${{ secrets.SINGLESTORE_LICENSE }} + DD_GITHUB_USER: ${{ github.actor }} + DD_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: ddev ci setup ${{ inputs.target || 'tls' }} + + - name: Set up trace capturing + env: + PYTHONUNBUFFERED: "1" + run: |- + mkdir "${{ env.TRACE_CAPTURE_BASE_DIR }}" + python .ddev/ci/scripts/traces.py capture --port "8126" --record-file "${{ env.TRACE_CAPTURE_FILE }}" > "${{ env.TRACE_CAPTURE_LOG }}" 2>&1 & + + - name: Run E2E tests with FIPS disabled + env: + DD_API_KEY: "${{ secrets.DD_API_KEY }}" + run: | + ddev env test -e GOFIPS=0 --new-env --junit ${{ inputs.target || 'tls' }} -- all -m "fips_off" + + - name: Run E2E tests with FIPS enabled + env: + DD_API_KEY: "${{ secrets.DD_API_KEY }}" + run: | + ddev env test -e GOFIPS=1 --new-env --junit ${{ inputs.target || 'tls' }} -- all -k "fips_on" + + - name: View trace log + if: always() + run: cat "${{ env.TRACE_CAPTURE_LOG }}" + + - name: Upload captured traces + if: always() + uses: actions/upload-artifact@v4 + with: + name: "traces-${{ inputs.target || 'tls' }}" + path: "${{ env.TRACE_CAPTURE_FILE }}" + + - name: Finalize test results + if: always() + run: |- + mkdir -p "${{ env.TEST_RESULTS_DIR }}" + if [[ -d ${{ inputs.target || 'tls' }}/.junit ]]; then + mv ${{ inputs.target || 'tls' }}/.junit/*.xml "${{ env.TEST_RESULTS_DIR }}" + fi + + - name: Upload test results + if: always() + uses: actions/upload-artifact@v4 + with: + name: "test-results-${{ inputs.target || 'tls' }}" + path: "${{ env.TEST_RESULTS_BASE_DIR }}" + + - name: Upload coverage data + if: > + !github.event.repository.private && + always() + uses: codecov/codecov-action@v4 + with: + token: ${{ secrets.CODECOV_TOKEN }} + files: "${{ inputs.target || 'tls' }}/coverage.xml" + flags: "${{ inputs.target || 'tls' }}" diff --git a/.github/workflows/test-target.yml b/.github/workflows/test-target.yml index 489620e36dd94..b56355a44ea66 100644 --- a/.github/workflows/test-target.yml +++ b/.github/workflows/test-target.yml @@ -225,7 +225,7 @@ jobs: run: | if [ '${{ inputs.pytest-args }}' = '-m flaky' ]; then set +e # Disable immediate exit - ddev test --cov --junit ${{ inputs.target }} -- ${{ inputs.pytest-args }} + ddev test --cov --junit ${{ inputs.target }} -- ${{ inputs.pytest-args }} -- '-k "not fips"' exit_code=$? if [ $exit_code -eq 5 ]; then # Flaky test count can be zero, this is done to avoid pipeline failure @@ -235,7 +235,7 @@ jobs: exit $exit_code fi else - ddev test --cov --junit ${{ inputs.target }} ${{ inputs.pytest-args != '' && format('-- {0}', inputs.pytest-args) || '' }} + ddev test --cov --junit ${{ inputs.target }} ${{ inputs.pytest-args != '' && format('-- {0} -k "not fips"', inputs.pytest-args) || '-- -k "not fips"' }} fi - name: Run Unit & Integration tests with minimum version of base package @@ -243,7 +243,7 @@ jobs: run: | if [ '${{ inputs.pytest-args }}' = '-m flaky' ]; then set +e # Disable immediate exit - ddev test --compat --recreate --junit ${{ inputs.target }} -- ${{ inputs.pytest-args }} + ddev test --compat --recreate --junit ${{ inputs.target }} -- ${{ inputs.pytest-args }} -k "not fips" exit_code=$? if [ $exit_code -eq 5 ]; then # Flaky test count can be zero, this is done to avoid pipeline failure @@ -253,7 +253,7 @@ jobs: exit $exit_code fi else - ddev test --compat --recreate --junit ${{ inputs.target }} ${{ inputs.pytest-args != '' && format('-- {0}', inputs.pytest-args) || '' }} + ddev test --compat --recreate --junit ${{ inputs.target }} ${{ inputs.pytest-args != '' && format('-- {0} -k "not fips"', inputs.pytest-args) || '-- -k "not fips"' }} fi - name: Run E2E tests with latest base package @@ -270,7 +270,7 @@ jobs: # by default if [ '${{ inputs.pytest-args }}' = '-m flaky' ]; then set +e # Disable immediate exit - ddev env test --base --new-env --junit ${{ inputs.target }} -- all ${{ inputs.pytest-args }} + ddev env test --base --new-env --junit ${{ inputs.target }} -- all ${{ inputs.pytest-args }} -k "not fips" exit_code=$? if [ $exit_code -eq 5 ]; then # Flaky test count can be zero, this is done to avoid pipeline failure @@ -281,7 +281,7 @@ jobs: fi elif [ '${{ inputs.pytest-args }}' = '-m "not flaky"' ]; then set +e # Disable immediate exit - ddev env test --base --new-env --junit ${{ inputs.target }} -- all ${{ inputs.pytest-args }} + ddev env test --base --new-env --junit ${{ inputs.target }} -- all ${{ inputs.pytest-args }} -k "not fips" exit_code=$? if [ $exit_code -eq 5 ]; then # Flaky test count can be zero, this is done to avoid pipeline failure @@ -291,7 +291,7 @@ jobs: exit $exit_code fi else - ddev env test --base --new-env --junit ${{ inputs.target }} ${{ inputs.pytest-args != '' && format('-- all {0}', inputs.pytest-args) || '' }} + ddev env test --base --new-env --junit ${{ inputs.target }} ${{ inputs.pytest-args != '' && format('-- all {0} -k "not fips"', inputs.pytest-args) || '-- all -k "not fips"' }} fi - name: Run E2E tests @@ -308,7 +308,7 @@ jobs: # by default if [ '${{ inputs.pytest-args }}' = '-m flaky' ]; then set +e # Disable immediate exit - ddev env test --new-env --junit ${{ inputs.target }} -- all ${{ inputs.pytest-args }} + ddev env test --new-env --junit ${{ inputs.target }} -- all ${{ inputs.pytest-args }} -k "not fips" exit_code=$? if [ $exit_code -eq 5 ]; then # Flaky test count can be zero, this is done to avoid pipeline failure @@ -319,7 +319,7 @@ jobs: fi elif [ '${{ inputs.pytest-args }}' = '-m "not flaky"' ]; then set +e # Disable immediate exit - ddev env test --new-env --junit ${{ inputs.target }} -- all ${{ inputs.pytest-args }} + ddev env test --new-env --junit ${{ inputs.target }} -- all ${{ inputs.pytest-args }} -k "not fips" exit_code=$? if [ $exit_code -eq 5 ]; then # Flaky test count can be zero, this is done to avoid pipeline failure @@ -329,7 +329,7 @@ jobs: exit $exit_code fi else - ddev env test --new-env --junit ${{ inputs.target }} ${{ inputs.pytest-args != '' && format('-- all {0}', inputs.pytest-args) || '' }} + ddev env test --new-env --junit ${{ inputs.target }} ${{ inputs.pytest-args != '' && format('-- all {0} -k "not fips"', inputs.pytest-args) || '-- all -k "not fips"' }} fi - name: Run benchmarks @@ -355,7 +355,7 @@ jobs: # by default if [ '${{ inputs.pytest-args }}' = '-m flaky' ]; then set +e # Disable immediate exit - ddev env test --base --new-env --junit ${{ inputs.target }}:latest -- all ${{ inputs.pytest-args }} + ddev env test --base --new-env --junit ${{ inputs.target }}:latest -- all ${{ inputs.pytest-args }} -k "not fips" exit_code=$? if [ $exit_code -eq 5 ]; then # Flaky test count can be zero, this is done to avoid pipeline failure @@ -376,7 +376,7 @@ jobs: exit $exit_code fi else - ddev env test --base --new-env --junit ${{ inputs.target }}:latest ${{ inputs.pytest-args != '' && format('-- all {0}', inputs.pytest-args) || '' }} + ddev env test --base --new-env --junit ${{ inputs.target }}:latest ${{ inputs.pytest-args != '' && format('-- all {0} -k "not fips"', inputs.pytest-args) || '-- all -k "not fips"' }} fi - name: View trace log diff --git a/.in-toto/tag.ec45eb9d.link b/.in-toto/tag.ec45eb9d.link index b5c31c81fd0f9..344bc830f1bd5 100644 --- a/.in-toto/tag.ec45eb9d.link +++ b/.in-toto/tag.ec45eb9d.link @@ -1 +1 @@ -{"signatures":[{"keyid":"ec45eb9dac9d6ba1bcd131314583c67851bebf0d","other_headers":"04000108001d162104ec45eb9dac9d6ba1bcd131314583c67851bebf0d05026734b1a9","signature":"7a83646a1c81788036cc4fa2520792c3c51ea9271e224f7c665954599d4cdae1d6c55d607bfd8e19cb8c82d4cab95bfc9278879de8e3ac1f003a96e61d5a22d3205e387302f39c40277f380cf1121984d324e9ffac2b2bfdee62ecf3b9ddbb75d9ad6be928da8c039af4e71afa8cac2e51e31e3e4c7de0fad940ba871cd4325ce193d610b2c3d40f55bf51415d9f09a5d88f283241ebf287e3bf23bd8b0d3c2e95364b15b373325faa9fd52711e0265df0f1a75d15b1febbe86b34b5cdf78b61979c96c6bb056096dbfe9276dfd40075fbed1a76cfe39ecc9e7206e7bfeb7e245febff612ae1128fa120f073b4331e440e86d49c7d95574ac286411a0955b6b2e67c6f27acc0dd229e4c3b8ad9246f745e291cb6f30686c6dfa6e6c8898a9702cbaf35b73bc561e6eb8a356d9fc198d010efadb3e7625240adee00c9f1eae08ec64d6c19a9c63560c38a29907547c18fc2daa71bf893c4f6cacc808d5158d4df95ce50170d7cdf5b9b879bd06bafd916b1a24a38b7bd9f2d3befd6ac94472f3bd404947a247a063c26857c94408c953812792177646bd16c5da3fd83320fc37929b8acf29eb3c833238d082769bcd722350bbb3f932e58c637996f746d69e7703df1966d113af43085df2421a7eae861d98bdbad2e62dd71fd1a5535cb487cdec24e690f88ff29362d6fceb0c0824e59e520312765ed2f5c6a0f7010356f7e83"}],"signed":{"_type":"link","byproducts":{},"command":[],"environment":{},"materials":{},"name":"tag","products":{"datadog_checks_dev/datadog_checks/__init__.py":{"sha256":"9a3c64b8b00c94da4b4f34618d803d3255808caf21b8afa9195c84b61da66b6a"},"datadog_checks_dev/datadog_checks/dev/__about__.py":{"sha256":"e3b4812d911cac31665dc8233eb63e2f5d85c38d8a7d6bc4179325e3fe060ef8"},"datadog_checks_dev/datadog_checks/dev/__init__.py":{"sha256":"a719d056d27f47df5fcd37b6f4d584d1e6a7cbccef33ae01da2520b19c7dd722"},"datadog_checks_dev/datadog_checks/dev/__main__.py":{"sha256":"0b1c4bcaa055bf74aabb2b821b4b1c26b0833439ab59d91193feb086d1627ac2"},"datadog_checks_dev/datadog_checks/dev/_env.py":{"sha256":"278a8a9d9890480717189a046f00ca9b95bb5972436ebc6ba42150d617463c8d"},"datadog_checks_dev/datadog_checks/dev/ci.py":{"sha256":"4f936a4d0e8ba8fb56731022100507fe6658f2daed5b31a0e9ea0070e84c7668"},"datadog_checks_dev/datadog_checks/dev/conditions.py":{"sha256":"aecd4c17f6dd95a4102d18193912178f7cc6bf71c7b7bbfb58a675403984120d"},"datadog_checks_dev/datadog_checks/dev/docker.py":{"sha256":"0fe8ddb666133783f9b20d2805d3109e7a9c4b64eaaf758ea90c512212695477"},"datadog_checks_dev/datadog_checks/dev/env.py":{"sha256":"8d90808a53ac87a0db9bad4c4babfd003ad116a84af507262f57ccfb7b694053"},"datadog_checks_dev/datadog_checks/dev/errors.py":{"sha256":"b048cb0dab6b7cab91e38ae0de73b8e167a62c03b940a7e679040b5a3eff8605"},"datadog_checks_dev/datadog_checks/dev/fs.py":{"sha256":"7fa0fe8d32e4a55354007212dd57500cc8f6a19aa8129962e846d66ea276975f"},"datadog_checks_dev/datadog_checks/dev/http.py":{"sha256":"7c7191d5d772f02a808f7c873da62d61bd47ca0efe292f9a757061ee6df6945e"},"datadog_checks_dev/datadog_checks/dev/jmx.py":{"sha256":"0c9fe850e36987cfc91711f37e26ca9e094880855293f26c58ae368d375976a4"},"datadog_checks_dev/datadog_checks/dev/kind.py":{"sha256":"91d8c59362af0acbf6f48f7d455e72e1b7257d25a89a5bd4a19e24047712487b"},"datadog_checks_dev/datadog_checks/dev/kube_port_forward.py":{"sha256":"39690a158ba1f82521e54a438a1502090a1a6d74539e21dbbab2bf6c2c02877f"},"datadog_checks_dev/datadog_checks/dev/plugin/__init__.py":{"sha256":"4c2d2aee209b36a7188df5396b304da429e2f0b01060e7d8e8500313749910f0"},"datadog_checks_dev/datadog_checks/dev/plugin/pytest.py":{"sha256":"b2f31f813309a8e04e2415a8577aa1aa5864d740878db22bbf4fd80644355b72"},"datadog_checks_dev/datadog_checks/dev/spec.py":{"sha256":"145c7b93d41da315b567f8dba92c07069ee0ddd67ef508d8b4540ca997bd3911"},"datadog_checks_dev/datadog_checks/dev/ssh_tunnel.py":{"sha256":"131c88d649ac11f70e81be3b047f56e816284bd5a2210ac14fb830db53baaa6c"},"datadog_checks_dev/datadog_checks/dev/structures.py":{"sha256":"0c660cab8cb5528ed495e9c213fca923648e6d22620f21fd94bfd89ce3f31004"},"datadog_checks_dev/datadog_checks/dev/subprocess.py":{"sha256":"3c1bd68f56d571b663ee79a3b6866696c49ebd9aba880202615ef6fd77b2d872"},"datadog_checks_dev/datadog_checks/dev/terraform.py":{"sha256":"49e80a404fb068f3fb9447c93735ed75238b134300136201a298cafd940b6c64"},"datadog_checks_dev/datadog_checks/dev/testing.py":{"sha256":"e9591c42cec41120723945352d5d00e356d068e71fd8e68c224b3cd8adcebd39"},"datadog_checks_dev/datadog_checks/dev/tooling/__init__.py":{"sha256":"4c2d2aee209b36a7188df5396b304da429e2f0b01060e7d8e8500313749910f0"},"datadog_checks_dev/datadog_checks/dev/tooling/__main__.py":{"sha256":"60b67815a138da81e4984c304402f61c4ed7282374e9e8cdfe8fca21200f57af"},"datadog_checks_dev/datadog_checks/dev/tooling/catalog_const.py":{"sha256":"22d1c5932d586df0e894eaf22b496871148cae968086c9239523417000c7e176"},"datadog_checks_dev/datadog_checks/dev/tooling/clean.py":{"sha256":"8411d7605bc7e0bdf56560e7cdf50b21132c1cb3d1d5cf5d4af47b5fb89267d7"},"datadog_checks_dev/datadog_checks/dev/tooling/cli.py":{"sha256":"333506143c67d50e491acdf6b666a768d794900b891b54d01972e1a546beea29"},"datadog_checks_dev/datadog_checks/dev/tooling/codeowners.py":{"sha256":"4395fc4b7cf59c749a85d567e53537c35283b2fb698a432ddfce0396bc3a4b09"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/__init__.py":{"sha256":"cd559ab9d0222c6566cbc40acec1d07e34b32d1e24f9a7a5c3b1315cda0d74d6"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/agent/__init__.py":{"sha256":"21f07a02f2dab8873d774e0c096945a5a871c9a74d898860c984a11528a1f13b"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/agent/requirements.py":{"sha256":"8aa8d2feac7f3ec9ac54d72508c32fb7c52e6d26b2f14173a14da90bb3cfe893"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/ci/__init__.py":{"sha256":"191a8d4d6140548f010dab64e178201c27cdce9ad85a5f76c1a1baba9151130c"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/ci/setup.py":{"sha256":"f645e34d547fe5935e7c0d3c19163fed712dd53e769bee71587c4d969bd2cc32"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/clean.py":{"sha256":"cd7fa0092ba01bc741a64dbb602ad956334b775338cc3f9ce9933dd95eac9ae3"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/config.py":{"sha256":"4633f9e5a47f51725add1440777526561bf165192fd1fd3d14315b78f6bf4f7b"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/console.py":{"sha256":"1366bc7a68a374c96246a2f8dab219ebad95c2b1239a691af721f95ebc737537"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/create.py":{"sha256":"f5e1024ba1840f7b93840dad77ef4d8e009a38d13f37e484b915bb6c5fc6dbca"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/__init__.py":{"sha256":"7f89a95c9b5565479c9ecce8e2bc99edea02448afaaa73b216448025c9459078"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/catalog.py":{"sha256":"77e30473cabd41c81d20d382552f9ae086d5ef07ed01f119f8e48fd257a9cff9"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/changes.py":{"sha256":"e4eb0e99c569356e10c493112f0d4b562a120dbf7f79628f2579ea9359696a26"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/create_commits.py":{"sha256":"f015f1ccddd7e0e86362c8d73dea3f9abd632ce1896dbdd6202b17d9828dde3f"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/dashboard.py":{"sha256":"3998486b9c1d29fa7497347cf00e6892fa80212362fd7970d6e14e96d8a78dc9"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/jmx.py":{"sha256":"689144e492c269efa8d99ec4cd8a8597ef40e58ec512f4900e55c44451dea4a0"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/manifest.py":{"sha256":"81e4925ff5de3519ff16b841f3a946f399d929693ccb4174bf5e77a9385d7eb8"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/prometheus.py":{"sha256":"77da687c3ed4f735c0fb585c404a8512707ae497afbca3a172d3db02cdc1380c"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/scripts/__init__.py":{"sha256":"c28c912cf99522f2fefd97444a9ab5117881e76db6a0d095afafcae0b8c182bc"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/scripts/github_user.py":{"sha256":"35148be0ada9603d66ac6fc333dfe938125f0844e52bd91b6153a42a4d9ecbf0"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/scripts/metrics2md.py":{"sha256":"e1ee509547503f2939e2176df637d42285d94f361e7d45c5b44d6bca925aac91"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/scripts/remove_labels.py":{"sha256":"ed595382817323bc09722c0bf39b5f8a96454ca0f848e35ad110a5b6a9eb92e7"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/snmp/__init__.py":{"sha256":"b05b300236e8c7852bd02fae245ce8c905e4d7bcb2b0d6543366b030f460d97d"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/snmp/constants.py":{"sha256":"91bc112010f6f9fd4e040e89421a2b97fb07060fc402b21bbac3a3d00fce38ae"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/snmp/generate_profile.py":{"sha256":"97babd0c29a68d5a12b8785430ffdd8ffe3783399ebfe9f13f7ea0cbfe33a2b6"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/snmp/generate_traps_db.py":{"sha256":"3608038e42af10351a03a3f2f76084861f7362075b82e945ec75f5b70e5c32e9"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/snmp/translate_profile.py":{"sha256":"0ec4822a35d4f8011ca9c11f06ea04e57f74aa349f9f7788384e14f6ff8eb0df"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/snmp/validate_mib_filenames.py":{"sha256":"235f3d225571a9957493698dae41149eeaeb0714df31fd7707161b1d8da5f503"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/snmp/validate_snmp_profiles.py":{"sha256":"8562cefa771a52f86b4b90458fa0d0019997ccb4a9da85366e4de6201ba0cbbe"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/snmp/validators/__init__.py":{"sha256":"083f2ce9f2d3e8104a83c7c94be9ab6562cb5c9294581b913992d5441e07b82a"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/snmp/validators/profile_schema.json":{"sha256":"2bfde4d49c86e8c1906e2182f3a0be827f4f90b6cf42a156fe30c0c102735ae0"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/snmp/validators/utils.py":{"sha256":"16659cd61d8dfa4fd09179c2bf12ecfa0e01f9d6dfdb58ec6030060655fd8018"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/snmp/validators/validator.py":{"sha256":"f4f888a47370619a57a6eabc15162fc82644736bf8047db21e3a289d13b2cb60"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/windows/__init__.py":{"sha256":"42adde34af2d619ba62aa4a20b345a413f66f6799b9cffa595a1d35ee77f84ec"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/windows/pdh.py":{"sha256":"612b974d4b222f26f4b21665637ef0fefa0855f78bb3ff8ea5f08093249f0f72"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/release/__init__.py":{"sha256":"2f6c05d3a9dfea59350a775201a1f57de5f71c6cc5bcfd412a523481c15f29b9"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/release/build.py":{"sha256":"0e4f2bab40cb30f9c02d755bfa2c6943dce668e9291e9afa59b24cc05be1d235"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/release/changelog.py":{"sha256":"1d10448cbc88e73818e9a4d8691d72bd57e520e61c0786c83203855af3e053a2"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/release/make.py":{"sha256":"86ef70d6de290931a6aba63179bed92354eccd06970ef6708acc76b048160dd9"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/release/show/__init__.py":{"sha256":"76701cad3b42c9ad904ab8f92caeb26818cad3599818e45eb672e6147d6c6a1f"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/release/show/changes.py":{"sha256":"6005e6bb08fc14d0269c643df4d5d394c0425663b566983ba1aba792a121c671"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/release/show/ready.py":{"sha256":"bf6203afe8ad5a62f321d597a4b037bcfd20c56d9c0cc7a8811ee803d492e1f7"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/release/stats/__init__.py":{"sha256":"5b5187c36eb3953d3ef79a67ef28677078139b5f2849496f091176f682578a78"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/release/stats/common.py":{"sha256":"ea365d818ca5d160dff43a18fc5dec1c0ffeb4db8375784357cf56b0d075409e"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/release/stats/stats.py":{"sha256":"78a1ae027e8816a2f152f6054df1c6595524dc34c47274bcdc6b38a88983803f"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/release/tag.py":{"sha256":"fdf6a3498acaee4b9ff4ba5b96cba537064a91c33f8e4890f36ce6968f0bb611"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/release/upload.py":{"sha256":"32ba19a345693a5224f0b47bb428595dc222c6f2e80ee5ce55ca570a716e2717"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/run.py":{"sha256":"4562a57b1fec3719fdcd402b4e6fb60727997b3d65738dd51f8b058a84447f58"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/test.py":{"sha256":"7665c44e173fb39c57f628642a1602b75f6ee4be33ee3b7b6f69fd1b514baaae"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/__init__.py":{"sha256":"5fc69ba42f0b93d144d18b24c4a764a3ffbff6168f04db6dd5b2d4746490b670"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/agent_reqs.py":{"sha256":"326b3b8ff592970607afff39dc789ce5a8e87cce401c93466c8d3d182cd1ab1b"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/agent_signature.py":{"sha256":"776028eca5f17fd76bddcefece677164e80a6f46c065f47adf746b8357577750"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/all_validations.py":{"sha256":"e5bec12b3870c16aaf934c2e5d915e9d00ed6d8bbea70e90d7c0050e8ec36a2a"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/codeowners.py":{"sha256":"b53816da23c5aa75b0847af340653e0ac508293775b3de8622f07af94f431252"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/config.py":{"sha256":"be832f8e8bbf1ec6f416eb87a4dcda79acdfa0a86e6f34a29c00c9428bd2e818"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/dashboards.py":{"sha256":"7dec95d45836e1ade04b4b8861fff9a788c68ddbcaa9731ba88fb419066f9b00"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/dep.py":{"sha256":"5b1cd5770548b9033f0495416302dc1354605599df5b215ce22821f6c1077180"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/eula.py":{"sha256":"be7a782a6ed082cfa3b392749e22c91fe5dc31e47a3df9e3969009d2b2f02a6f"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/imports.py":{"sha256":"4a57ae1e1c2aa89409653524fffb14cf560c5fc007ba7e0adedb1bbe7c9e6182"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/integration_style.py":{"sha256":"3361d7f7cc49c7071b79ea9584ae1328acf8a87e7e88bb2162fe274dc1fabb0a"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/jmx_metrics.py":{"sha256":"191262b6666e4a8e353fa9becbead4c37fe651455be78b16ae18cd2f85478901"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/license_headers.py":{"sha256":"2f106f2f63ec01f8a57324311ca16d06fbf8440fe4ac6ae67b44662da6e162ce"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/licenses.py":{"sha256":"901dd1700a55ee95d714643c7465da18fc52dc740ff07003b483621131496d08"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/models.py":{"sha256":"c1f28db5d42a284b6a308fca583104a7742128d44492216155f5da94e67d167b"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/package.py":{"sha256":"489624b1a7c1c964f69068c0e93e876f96465d5be97fcc6353f16ab86609239f"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/readmes.py":{"sha256":"063c457c317cd79941df231535d63246e9dbfe6f9e910f40e688897359b0928c"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/saved_views.py":{"sha256":"699491a6f758c8bd051063c4c2edd096f5b2eadb7f10824664caffa86b7e284a"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/service_checks.py":{"sha256":"5527f8ab8e94b02c3d87a48e432097f6e3a4ee1e417db244e3d72b8be6db61f0"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/typos.py":{"sha256":"71b1dcce3e92412249556efc50b78c5ae87d9f0b73f1d0f06c9375ac8fe3c8ab"},"datadog_checks_dev/datadog_checks/dev/tooling/config.py":{"sha256":"1522dd49cdd49cd808fe18fc55a94abbf3c57eca5012bae93c0e6cd381dc5d77"},"datadog_checks_dev/datadog_checks/dev/tooling/config_validator/__init__.py":{"sha256":"a37696bf2dcf872903fe1ed84f7b3adbc3b45b66291e2b3436542c495d4f234e"},"datadog_checks_dev/datadog_checks/dev/tooling/config_validator/config_block.py":{"sha256":"4eb448f403bee6396bc17bce6053897ef5a69bdcdff6ccfefeeb9519c5c8a14d"},"datadog_checks_dev/datadog_checks/dev/tooling/config_validator/utils.py":{"sha256":"2e88a21bcf38d3357aa68ab3a3bf688eddc796960be25c3b7952bde36fbb0697"},"datadog_checks_dev/datadog_checks/dev/tooling/config_validator/validator.py":{"sha256":"f2749d57ab97175ecf9626864e3414e361ab2332802d742f60d97c26a9828a7a"},"datadog_checks_dev/datadog_checks/dev/tooling/config_validator/validator_errors.py":{"sha256":"e3920ed0847a7537d45fddb5398e657c1742cee9c27cfaf453add2fc795a63fd"},"datadog_checks_dev/datadog_checks/dev/tooling/configuration/__init__.py":{"sha256":"7f40321b073a61d5d03139ebf0167051f11838ca7b6242332dabcbbb9bbe325e"},"datadog_checks_dev/datadog_checks/dev/tooling/configuration/constants.py":{"sha256":"69dc667a35940e6c488ed9f0197333b6b3e2765bdd1ca5a9bcf23266977a111d"},"datadog_checks_dev/datadog_checks/dev/tooling/configuration/consumers/__init__.py":{"sha256":"3fad58a28211e9fb13fcab3d29843e726d800c1c2202b87295eb423cfd206046"},"datadog_checks_dev/datadog_checks/dev/tooling/configuration/consumers/example.py":{"sha256":"86ff8fe1ecd7d0a8a0fbb3d053175ebadc713636fbbbcc528cfbfd8a8d6f0cd5"},"datadog_checks_dev/datadog_checks/dev/tooling/configuration/consumers/model/__init__.py":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_dev/datadog_checks/dev/tooling/configuration/consumers/model/model_consumer.py":{"sha256":"af3adf329489175f6ab6f22710c60b4f2a0dd650250b3653ed032bb63d250696"},"datadog_checks_dev/datadog_checks/dev/tooling/configuration/consumers/model/model_file.py":{"sha256":"6dbb93d2c46ef27829eab53baa6adb87c9ae3a4e2dfe02733c45e2b1c7fbea45"},"datadog_checks_dev/datadog_checks/dev/tooling/configuration/consumers/model/model_info.py":{"sha256":"b90c7f6f112cc8c86813bea24d25fcc37bc11255cfb7695364cb87fecda0334b"},"datadog_checks_dev/datadog_checks/dev/tooling/configuration/consumers/openapi_document.py":{"sha256":"15c28390df75afa8eac070ac96fb73f443a1aedc2828119dd4d05546205906d8"},"datadog_checks_dev/datadog_checks/dev/tooling/configuration/core.py":{"sha256":"2580c9a8d48a6bbf00f2413ad65e689fbdc772f9a94ad1ab33ef7960d08d8da6"},"datadog_checks_dev/datadog_checks/dev/tooling/configuration/spec.py":{"sha256":"d166dd9667efca90028873ec6639732ec13da76829a795a191d922a611534ff7"},"datadog_checks_dev/datadog_checks/dev/tooling/configuration/template.py":{"sha256":"1b47dad5b7f1df41059ec35db383e8a844c295cf46327389fa478433f8b1f624"},"datadog_checks_dev/datadog_checks/dev/tooling/constants.py":{"sha256":"9be45796438d14d98c0c4b95e61616872cf18121f87f5a3c5a14e6073e67a8b8"},"datadog_checks_dev/datadog_checks/dev/tooling/create.py":{"sha256":"55d851ea04f86c8281b4a5e7c94c619c8d57e355542297bc5ce7fac1aa8606e8"},"datadog_checks_dev/datadog_checks/dev/tooling/datastructures.py":{"sha256":"58411715d64348e60f964fb58a55481e58a0859a33a75de5eaa86b64ade0bf58"},"datadog_checks_dev/datadog_checks/dev/tooling/dependencies.py":{"sha256":"5c93c0c716f8d11730ae12b87c8f59b945efbc260c7fadb352d649c5b3ab3f37"},"datadog_checks_dev/datadog_checks/dev/tooling/e2e/__init__.py":{"sha256":"041770d4abd7a492615e0a23c062ffd3748f7f26678f2abc42450caf6ba7cd0a"},"datadog_checks_dev/datadog_checks/dev/tooling/e2e/agent.py":{"sha256":"7039c9dc9f04beb95bde3a244a6747fa5a5ce310495f776def606a021e38b017"},"datadog_checks_dev/datadog_checks/dev/tooling/e2e/config.py":{"sha256":"9e0903ce05e4352477233a49dd40d40ff66801fb1acb1c0fdc32b06509341a15"},"datadog_checks_dev/datadog_checks/dev/tooling/e2e/core.py":{"sha256":"1f810b8a4b0e9f6b6fb7811a2ee575a499cc2cba2563901d80ff2fdb50c47d81"},"datadog_checks_dev/datadog_checks/dev/tooling/e2e/docker.py":{"sha256":"2b0b70865768b3a7f636aa69470c3caac2c23a80c4c9ebf598799a6753eb2173"},"datadog_checks_dev/datadog_checks/dev/tooling/e2e/format.py":{"sha256":"70de3b669c59f3e52961e7251cc110e5e15b88f81a35b95479afa24f512acc18"},"datadog_checks_dev/datadog_checks/dev/tooling/e2e/local.py":{"sha256":"13050ed07abb86cf80db545104a3e23c3bb254b5f8ee6498643f27d74718952a"},"datadog_checks_dev/datadog_checks/dev/tooling/e2e/platform.py":{"sha256":"dfb3a670a0cc6e64c496f7cd6905650347d80b869630ad7aedab0a4dbbfc5f11"},"datadog_checks_dev/datadog_checks/dev/tooling/e2e/run.py":{"sha256":"fb2ce093a1f2e473e000623f4cc2bc51b8dd95f6407c8ecc0bb59c20503e7ea3"},"datadog_checks_dev/datadog_checks/dev/tooling/git.py":{"sha256":"8a430108786d2659caf8e5e22fb3f2f5dc3e41d2ea4461a0dcee79a817cfe93e"},"datadog_checks_dev/datadog_checks/dev/tooling/github.py":{"sha256":"213601f1ca2f8ef9e03ff3d5221de0b610218d65117b6f5cc8661d93b80ca398"},"datadog_checks_dev/datadog_checks/dev/tooling/license_headers.py":{"sha256":"90af3fc8a5118ec0f961ecbd2cb6d250bce0498a07f25d65dcb5803d99e6c5a5"},"datadog_checks_dev/datadog_checks/dev/tooling/manifest_utils.py":{"sha256":"7ad2c25ae433b1ce9286ceaa545fae7a199309f6d71a5b409a0083f349eafafc"},"datadog_checks_dev/datadog_checks/dev/tooling/manifest_validator/__init__.py":{"sha256":"afb09b643841291ec16223b06f073404a4b1ecf9d1432719f319a75153528b53"},"datadog_checks_dev/datadog_checks/dev/tooling/manifest_validator/common/__init__.py":{"sha256":"72811b8621e09b2c2584eef89feee2146cd5cb8e0e775c80c1af031152ebeba4"},"datadog_checks_dev/datadog_checks/dev/tooling/manifest_validator/common/validator.py":{"sha256":"ae4bdd4cbb3fa842dac3bcf95cb4fc05755a3422d36645434c5a5f5724770d17"},"datadog_checks_dev/datadog_checks/dev/tooling/manifest_validator/constants.py":{"sha256":"8ddfd4b9bf11df5930e9a8eb343d48cc8e92be019c2baf613ce4dec3860ad9cf"},"datadog_checks_dev/datadog_checks/dev/tooling/manifest_validator/v2/__init__.py":{"sha256":"72811b8621e09b2c2584eef89feee2146cd5cb8e0e775c80c1af031152ebeba4"},"datadog_checks_dev/datadog_checks/dev/tooling/manifest_validator/v2/migration.py":{"sha256":"83c6442192b8109b46b8fcdd66728a6a3f02ada36441cdbeb6bd6d41eff113b9"},"datadog_checks_dev/datadog_checks/dev/tooling/manifest_validator/v2/validator.py":{"sha256":"c4ec194d39fe469a1663c98e7c75b98b913de36ee426d11c90e7a774ecff92a4"},"datadog_checks_dev/datadog_checks/dev/tooling/release.py":{"sha256":"596c82ede4a4ff577e48b93371c8adf763c0eeff4af81fcd61d3a15676190e53"},"datadog_checks_dev/datadog_checks/dev/tooling/signing.py":{"sha256":"8c70be46d3e4a14191abe91bbcf080cc2066f8117f1c1b78fff5031053b72c85"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/ad_identifiers.yaml":{"sha256":"debad2c0564fceaa7bc21481ce43ba4d1309a769ac8fb67e65c707a6b9e49f42"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/common/perf_counters.yaml":{"sha256":"9001b82611b1107c4014161991967e21eb725b251abcb50965c80ca9255b313c"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/init_config.yaml":{"sha256":"0b03d07105545af963c8e1d332236612961de12ebc473a917dbfc29d8ec3198b"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/init_config/db.yaml":{"sha256":"341c52cdd3d44c7540107ee791b6cd0be7f225143a57c930cf33ba87b7e24d1f"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/init_config/default.yaml":{"sha256":"d1d899a235b713e1dc277e4e149f03b9caaf3a657c2900776d02f1a5ca54b5c6"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/init_config/http.yaml":{"sha256":"b0d3492b6ed3eafa40a5cf23bd9e6a8e4074960e060f7733da4f11b0bc75894f"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/init_config/jmx.yaml":{"sha256":"56996f66f2ec8ecbaef25cadad04743760f3d36711bdfbff337f61c4c4616e4f"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/init_config/openmetrics.yaml":{"sha256":"5bd455963b3ba5b4e8b1cd835630d06c91231aaa5c88bbba0870c35917ce6df4"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/init_config/openmetrics_legacy.yaml":{"sha256":"5bd455963b3ba5b4e8b1cd835630d06c91231aaa5c88bbba0870c35917ce6df4"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/init_config/openmetrics_legacy_base":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/init_config/perf_counters.yaml":{"sha256":"b72ab4bf57cda61f33a759905fa6070482e21f174ffd7ca304c2b781d2458ee5"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/init_config/service.yaml":{"sha256":"9be396dbf8f78327572d7ee1c391f2145cf9140c8241b2788e15278b8208425c"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/init_config/tags.yaml":{"sha256":"70797d15cfd8c7310cbe428ea21b3717045fc933184431736ab8bcdf4e4773af"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/instances.yaml":{"sha256":"bab548f2d4f06a84e38c03e5ba71df6b5b4727a9c6fd9b10744d62b1324a3ea9"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/instances/all_integrations.yaml":{"sha256":"de9afd1055fb7e6402e226f9ab1bf51fb3a78d3deeb7eb2dd6e17af4dc3bc78e"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/instances/db.yaml":{"sha256":"a3d238a1d2d313ccc94f8ab95d077312103c89e369b0d444bb141ae97815aa66"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/instances/default.yaml":{"sha256":"ad2523157985ba4af6b02e3b040dac9fc6e1e9279e74d33593a84632296e149a"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/instances/global.yaml":{"sha256":"f2372b75408c2516c5d2cf805e9d66cbbba83b62774ebf95bc2a4d6459708413"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/instances/http.yaml":{"sha256":"6ae792c7aa0890ab7dbcd38781808a34871d72b83f3ce9de3d3db831c6dccc8a"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/instances/jmx.yaml":{"sha256":"6046c9eb0d2e7063ebdbc93012e69ba3c698ad5092b1c1d4b58217bd4539ea45"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/instances/openmetrics.yaml":{"sha256":"d92ef2fb83ea5073d463a3aa410b8549edb4ddbb2e5ea423c36f2cd4135eaf22"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/instances/openmetrics_legacy.yaml":{"sha256":"f0cc0ba9b5eac9450b9a3f5c96c480dc096badf94aab0bfbbbede5cf5c6f9229"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/instances/openmetrics_legacy_base.yaml":{"sha256":"7b67f8b59ca7f85488a838f0beed74831ee30f1847fad86be9e8ffde3bb7136e"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/instances/pdh_legacy.yaml":{"sha256":"3ea8930990f557d90527ef9a41b7f5c35ad203638cbeb67ef5e2fdc2aee25912"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/instances/perf_counters.yaml":{"sha256":"20666e2e25b311cc7c6c65355a918ff968c4e791506d74f625ac9fcfb1c0522c"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/instances/service.yaml":{"sha256":"a2a42e1b8d014caca43db48877828a0f889f2cc16847068862e0f4bd2decb033"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/instances/tags.yaml":{"sha256":"5ae1915ed8f262a3c0db6f554f9a7f5daf52b5705cb50b3c4c2e319692a4925b"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/instances/tls.yaml":{"sha256":"98f252624fb180d0087aa60760ef892670ee3fe53066f3ff1e472637bb96c3d4"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/logs.yaml":{"sha256":"1182fc46b7d01535900e3b7373eb99b5d51d899fc6a5b827ee81db8c8e54bb8d"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/README.md":{"sha256":"05ae39533fd5b3774d9b63d34d96e5bed68d40a66f3868ee17ccde977916c640"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/CHANGELOG.md":{"sha256":"cd08a7bcb971c3046e8cf9217f98c19c86e38032c558d8b8ec786c9536f9e9c4"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/README.md":{"sha256":"981d74dd2b21b95dd207b2caae08d1dcc79d4cd8bfa0438e4c10f0ad17aeda63"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/assets/configuration/spec.yaml":{"sha256":"2ace40aafc7e0b9f6440d50101b8cecc32ba00d0bc9820e99a2e8632be05170d"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/assets/dashboards/{check_name}_overview.json":{"sha256":"7d5a3f4ecb4ed7c6d6efd236fb0bd94f31b0772472d9db02d4a063e759138222"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/assets/service_checks.json":{"sha256":"37517e5f3dc66819f61f5a7bb8ace1921282415f10551d2defa5c3eb0985b570"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/changelog.d/1.added":{"sha256":"38c1fb60f69cf5b525500782b5ac0eaf0b4b8f0fcf4fa6f8cde96d83c316e07c"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/__init__.py":{"sha256":"3a342b814bc9e33b34b99058154d75b092d7240e31df938fb73b90d39be0edf5"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/__about__.py":{"sha256":"49c83d52895bf69c2ffbf1a32d3c3491961018693f68413bf494c63bbbd59c17"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/__init__.py":{"sha256":"7afb749ce0829e4abed181098d56b8da0a07fbe5444ee8ed7037c04f04feca23"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/check.py":{"sha256":"defbe9c17509f34ca10ce5c16f501c17a71208475b2af6d9302f3cbd608bbcb7"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/config_models/__init__.py":{"sha256":"a380172194927e06060e6560bde84d4316decc8a7acb39fa4b909cf952e06852"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/config_models/defaults.py":{"sha256":"9072c1a8aa9ba1ac06242d21f76f4aac757d746978d1d49a9a440d45eda64103"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/config_models/instance.py":{"sha256":"13bd3f64d4e9f48771cc84fd9b9ece5d9f2eff5353b34900587381e81d1ab435"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/config_models/shared.py":{"sha256":"5c43d21e0178045706b3de56810b5be58a45ea3b65bedfc3beed3800819bb278"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/config_models/validators.py":{"sha256":"ff8691fdd4c4bc21e4feeab75b714040176265ffe25c6e056c521aec5c0299b5"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/data/conf.yaml.example":{"sha256":"5e0de7f83fac0dba09f081681c16e127fb4be027f08eac6166a102268d26efad"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/hatch.toml":{"sha256":"fee3c389ba9967e7788481bfe72ae04b27079e59a9cd49de7b1411bb20b0d3d0"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/images/.gitkeep":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/manifest.json":{"sha256":"7ae62710bdefefd6003b24c55620d5c1444f35bd508dbcb06c7a7e2b433a94d8"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/metadata.csv":{"sha256":"6ee51511155f603e08403c3df5713cf44d2437dbb27ec063dcf7bec61509694b"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/pyproject.toml":{"sha256":"9c7ba0aeae0110de4c9f13f051d44d2280566484714388bbbca428e7221b5b52"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/tests/__init__.py":{"sha256":"d8dc47fd44c6ab1577c4fc677c41d029c4c6a9aac573ce844297b3fce71bb091"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/tests/conftest.py":{"sha256":"288ab38141b5fc11c7cbcce1d5850b5c85a461969a73a181bd9057a1b2919cf4"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/tests/test_unit.py":{"sha256":"9a3a0016b8305670142ae517f7a376f0181578249a6944925dd54b318e0fe347"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check_only/{check_name}/assets/configuration/spec.yaml":{"sha256":"2ace40aafc7e0b9f6440d50101b8cecc32ba00d0bc9820e99a2e8632be05170d"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check_only/{check_name}/assets/service_checks.json":{"sha256":"37517e5f3dc66819f61f5a7bb8ace1921282415f10551d2defa5c3eb0985b570"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check_only/{check_name}/changelog.d/1.added":{"sha256":"38c1fb60f69cf5b525500782b5ac0eaf0b4b8f0fcf4fa6f8cde96d83c316e07c"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check_only/{check_name}/datadog_checks/__init__.py":{"sha256":"3a342b814bc9e33b34b99058154d75b092d7240e31df938fb73b90d39be0edf5"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check_only/{check_name}/datadog_checks/{check_name}/__about__.py":{"sha256":"49c83d52895bf69c2ffbf1a32d3c3491961018693f68413bf494c63bbbd59c17"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check_only/{check_name}/datadog_checks/{check_name}/__init__.py":{"sha256":"7afb749ce0829e4abed181098d56b8da0a07fbe5444ee8ed7037c04f04feca23"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check_only/{check_name}/datadog_checks/{check_name}/check.py":{"sha256":"defbe9c17509f34ca10ce5c16f501c17a71208475b2af6d9302f3cbd608bbcb7"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check_only/{check_name}/datadog_checks/{check_name}/config_models/__init__.py":{"sha256":"a380172194927e06060e6560bde84d4316decc8a7acb39fa4b909cf952e06852"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check_only/{check_name}/datadog_checks/{check_name}/config_models/defaults.py":{"sha256":"9072c1a8aa9ba1ac06242d21f76f4aac757d746978d1d49a9a440d45eda64103"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check_only/{check_name}/datadog_checks/{check_name}/config_models/instance.py":{"sha256":"13bd3f64d4e9f48771cc84fd9b9ece5d9f2eff5353b34900587381e81d1ab435"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check_only/{check_name}/datadog_checks/{check_name}/config_models/shared.py":{"sha256":"5c43d21e0178045706b3de56810b5be58a45ea3b65bedfc3beed3800819bb278"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check_only/{check_name}/datadog_checks/{check_name}/config_models/validators.py":{"sha256":"ff8691fdd4c4bc21e4feeab75b714040176265ffe25c6e056c521aec5c0299b5"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check_only/{check_name}/datadog_checks/{check_name}/data/conf.yaml.example":{"sha256":"1c57a5f19076030f8fbd6a57bf7b1073f61e70a261614bb8ca984a50c85f0383"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check_only/{check_name}/hatch.toml":{"sha256":"45bcb4cd88549703f8dfedf4779ab85820267ba1eeef064a959ba7de31ce263c"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check_only/{check_name}/pyproject.toml":{"sha256":"37da6c7f7bc5c1bacafac16307a2276875546bd08f812b31ab3e3dd2001ebde0"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check_only/{check_name}/tests/__init__.py":{"sha256":"d8dc47fd44c6ab1577c4fc677c41d029c4c6a9aac573ce844297b3fce71bb091"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check_only/{check_name}/tests/conftest.py":{"sha256":"288ab38141b5fc11c7cbcce1d5850b5c85a461969a73a181bd9057a1b2919cf4"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check_only/{check_name}/tests/test_unit.py":{"sha256":"9a3a0016b8305670142ae517f7a376f0181578249a6944925dd54b318e0fe347"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/CHANGELOG.md":{"sha256":"cd08a7bcb971c3046e8cf9217f98c19c86e38032c558d8b8ec786c9536f9e9c4"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/README.md":{"sha256":"c5ef038605890d7ba9a6448c328506dfc9855a5516459f167614543146ae1f05"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/assets/configuration/spec.yaml":{"sha256":"a7dd7aaa3a27ef53f689ea9e7bd135c97c7071896897a3fac925aec0866c7121"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/assets/dashboards/{check_name}_overview.json":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/assets/service_checks.json":{"sha256":"37517e5f3dc66819f61f5a7bb8ace1921282415f10551d2defa5c3eb0985b570"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/changelog.d/1.added":{"sha256":"38c1fb60f69cf5b525500782b5ac0eaf0b4b8f0fcf4fa6f8cde96d83c316e07c"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/datadog_checks/__init__.py":{"sha256":"3a342b814bc9e33b34b99058154d75b092d7240e31df938fb73b90d39be0edf5"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/datadog_checks/{check_name}/__about__.py":{"sha256":"49c83d52895bf69c2ffbf1a32d3c3491961018693f68413bf494c63bbbd59c17"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/datadog_checks/{check_name}/__init__.py":{"sha256":"4a4f6dea91ffd477a50b09301f706382044f366efe4b3c41e86b039da7842e0a"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/datadog_checks/{check_name}/config_models/__init__.py":{"sha256":"c1e9bb781f20c5dfb6bdafdce5f13c46832e31a23f9fb0ccfec4a2ed83b97d04"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/datadog_checks/{check_name}/config_models/defaults.py":{"sha256":"96d1babba497ad906b332a2eabbe9178f5df0bc8651e63da2ed99000a0b17190"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/datadog_checks/{check_name}/config_models/instance.py":{"sha256":"279c7fc0f14b15f06a416338fc333f51ded1c10619d62dcda9c54c4747dc2e6a"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/datadog_checks/{check_name}/config_models/shared.py":{"sha256":"308f28e01943d2c15ddc9b9000695681b4664cc7d504c727b8a19c9d70165ea1"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/datadog_checks/{check_name}/config_models/validators.py":{"sha256":"d8dc47fd44c6ab1577c4fc677c41d029c4c6a9aac573ce844297b3fce71bb091"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/datadog_checks/{check_name}/data/conf.yaml.example":{"sha256":"479c850a145914012425917c62e26c75a4f5f95b99ef6ec0671183a9f821dc32"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/datadog_checks/{check_name}/data/metrics.yaml":{"sha256":"529823f85918da2a317d966266cef9fca8c26ed81134ee1bd5b338f649723e83"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/hatch.toml":{"sha256":"fee3c389ba9967e7788481bfe72ae04b27079e59a9cd49de7b1411bb20b0d3d0"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/images/.gitkeep":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/manifest.json":{"sha256":"7d065580bb6f766a35dcabaa881e0d90e7f50ab6e662355178387082208389df"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/metadata.csv":{"sha256":"6ee51511155f603e08403c3df5713cf44d2437dbb27ec063dcf7bec61509694b"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/pyproject.toml":{"sha256":"9c7ba0aeae0110de4c9f13f051d44d2280566484714388bbbca428e7221b5b52"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/tests/__init__.py":{"sha256":"d8dc47fd44c6ab1577c4fc677c41d029c4c6a9aac573ce844297b3fce71bb091"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/tests/common.py":{"sha256":"7a9fddcbc85c2d0db1d40e126f4b6b632dab83dfd1e771de303d58b9c7468491"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/tests/conftest.py":{"sha256":"3d692efee3524ac1de5a7d116877b84f749674b5e95f0a5eff66d56d1687f860"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/tests/metrics.py":{"sha256":"aa3800824032efd81a9ec139accffbdb326d26389d7b9617251b6738169274fd"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/tests/test_e2e.py":{"sha256":"d52bb59384d89abfbd670e1c92149acbd1a6c21f9ce2209905c115ae253a7b00"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/logs/README.md":{"sha256":"b6c3d311af66922a1c2313c5f3dbbcdf46fbbe18ec7eacc56f9d13c9528f4d06"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/logs/{check_name}/CHANGELOG.md":{"sha256":"cd08a7bcb971c3046e8cf9217f98c19c86e38032c558d8b8ec786c9536f9e9c4"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/logs/{check_name}/README.md":{"sha256":"c142025004870f1db648289bdab2976b6beb160f47c28d941a603d9fd6b3d1bd"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/logs/{check_name}/assets/configuration/spec.yaml":{"sha256":"e678062aba11f1b6a03e2e56a99277e6112c2e06f3dbce0a097cf5cf6b0abc59"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/logs/{check_name}/assets/dashboards/{check_name}_overview.json":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/logs/{check_name}/assets/service_checks.json":{"sha256":"37517e5f3dc66819f61f5a7bb8ace1921282415f10551d2defa5c3eb0985b570"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/logs/{check_name}/changelog.d/1.added":{"sha256":"38c1fb60f69cf5b525500782b5ac0eaf0b4b8f0fcf4fa6f8cde96d83c316e07c"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/logs/{check_name}/datadog_checks/__init__.py":{"sha256":"3a342b814bc9e33b34b99058154d75b092d7240e31df938fb73b90d39be0edf5"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/logs/{check_name}/datadog_checks/{check_name}/__about__.py":{"sha256":"49c83d52895bf69c2ffbf1a32d3c3491961018693f68413bf494c63bbbd59c17"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/logs/{check_name}/datadog_checks/{check_name}/__init__.py":{"sha256":"4a4f6dea91ffd477a50b09301f706382044f366efe4b3c41e86b039da7842e0a"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/logs/{check_name}/datadog_checks/{check_name}/data/conf.yaml.example":{"sha256":"4b30c4dc85fd460595889ebc1de22469b97b0ec0b43906d0d750e7af3cdd2aa3"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/logs/{check_name}/images/.gitkeep":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/logs/{check_name}/manifest.json":{"sha256":"c35469fdaac57a792fd5a389651fccc437d5567288624d9f20bdd044256dcd13"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/logs/{check_name}/metadata.csv":{"sha256":"6ee51511155f603e08403c3df5713cf44d2437dbb27ec063dcf7bec61509694b"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/logs/{check_name}/pyproject.toml":{"sha256":"68eb27b4fa410355ccd2e5e0ba9fcc1c404481941a5743cc04b9296072e3291e"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/marketplace/IMAGES_README.md":{"sha256":"4eec6290559b12169131832997ddbd496cfe536708fde9b3ccd96879a7f298a1"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/marketplace/README.md":{"sha256":"d1afa7ae5595dd1c1ddf58528318ee82d06cca8a8e8b3c17ca236c2114f28c11"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/metrics_crawler/README.md":{"sha256":"06339a7e7c52457284412c4c280cf48c75f79afa83dbb2bb82756e7ae3561ca9"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/metrics_crawler/{check_name}/CHANGELOG.md":{"sha256":"a94d80ae88154fdce2f874cf02f2f1523ec9765020708c52c21c6182fc132f18"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/metrics_crawler/{check_name}/README.md":{"sha256":"c40ca8b60758af07470b173ec7780a98da754cceebcd650f56bda650e36eedf2"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/metrics_crawler/{check_name}/assets/dashboards/{check_name}_overview.json":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/metrics_crawler/{check_name}/assets/service_checks.json":{"sha256":"37517e5f3dc66819f61f5a7bb8ace1921282415f10551d2defa5c3eb0985b570"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/metrics_crawler/{check_name}/images/.gitkeep":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/metrics_crawler/{check_name}/manifest.json":{"sha256":"0bc9b3a1e3cf2c92f7a3f4478da2a1ab719be26ff48f35c09b3ee27e28aba987"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/metrics_crawler/{check_name}/metadata.csv":{"sha256":"6ee51511155f603e08403c3df5713cf44d2437dbb27ec063dcf7bec61509694b"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/snmp_tile/snmp_{check_name}/CHANGELOG.md":{"sha256":"a94d80ae88154fdce2f874cf02f2f1523ec9765020708c52c21c6182fc132f18"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/snmp_tile/snmp_{check_name}/README.md":{"sha256":"586fc1f0daf83cb1e523a4cfe654f2cb5c1ea1e62a6cbb2217c94528a03d86ba"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/snmp_tile/snmp_{check_name}/assets/service_checks.json":{"sha256":"37517e5f3dc66819f61f5a7bb8ace1921282415f10551d2defa5c3eb0985b570"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/snmp_tile/snmp_{check_name}/manifest.json":{"sha256":"9854a853bc050080597cf21059d8027c84849872d89bad091e7e33f7734ce064"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/tile/README.md":{"sha256":"b87b3a5b2eea807d8d4ea36fc8248b103a88e9e9457b31f23f26c5abe2ffb368"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/tile/{check_name}/CHANGELOG.md":{"sha256":"697b0270a62fc28c87e92cb9b0cda9b4ce448e919b3ea26d12d8331204e05f1b"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/tile/{check_name}/README.md":{"sha256":"f55b1ae108249609e86e6e80ec6292f55e5c6cb471472f189cb6cdbaebb18b63"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/tile/{check_name}/assets/dashboards/{check_name}_overview.json":{"sha256":"9c4f16ce9f369b6da6ce978b08d136e07e35827713259a6dcabea74408844d7d"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/tile/{check_name}/assets/service_checks.json":{"sha256":"37517e5f3dc66819f61f5a7bb8ace1921282415f10551d2defa5c3eb0985b570"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/tile/{check_name}/images/.gitkeep":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/tile/{check_name}/manifest.json":{"sha256":"beb03fd2d4d54dbe04f2d31cfcaa88e3e4c348ccd51534c115212d8b53e1e17c"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/tile/{check_name}/metadata.csv":{"sha256":"6ee51511155f603e08403c3df5713cf44d2437dbb27ec063dcf7bec61509694b"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/terraform/gke_config.tf":{"sha256":"29e482b800d5a8dad7bc0fc3f42212e2467ceecdf8601d6aca3d6ad30b9c541c"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/terraform/providers.tf":{"sha256":"b99c6d5e4e07431b992580cd437044dd3cb3371b33766cd24497a7cbf16f032f"},"datadog_checks_dev/datadog_checks/dev/tooling/testing.py":{"sha256":"54e43171062fafa83d40898e3338ed38310ca05f80389352b0354149edb45ec0"},"datadog_checks_dev/datadog_checks/dev/tooling/trello.py":{"sha256":"c6b90bbcee84b183d0201456d9f5e04edca0b7872b7a01c58b7d14ccb386e203"},"datadog_checks_dev/datadog_checks/dev/tooling/utils.py":{"sha256":"e2f7c9ce071f20d6ab4abf63040fc617efdc42212485d5642be5f27a92534452"},"datadog_checks_dev/datadog_checks/dev/utils.py":{"sha256":"a487bae64c97d4c8dc57a1537a0589bde70aae9811c24ca2ebedfefc8f6aa7c1"},"datadog_checks_dev/datadog_checks/dev/warn.py":{"sha256":"6445be66a38a2fa46d36dd72b42ad40f7f730353815b33e7a48f0566da7c61fd"},"datadog_checks_dev/pyproject.toml":{"sha256":"6bb55524326daa96e1c5eaaff94d53fd076656d8bf9af07da6e8db21eb2d9dd8"}}}} \ No newline at end of file +{"signatures":[{"keyid":"ec45eb9dac9d6ba1bcd131314583c67851bebf0d","other_headers":"04000108001d162104ec45eb9dac9d6ba1bcd131314583c67851bebf0d0502676e2f4f","signature":"27342749ea632a201ebc922642593569f2630efe06400243864c9e72b7cc25d834894c5bc72ec36fa5d5e931fac2503d0267bd7b8817cc94868c5ddece03a90d99d5003aa424a930080cf1a5fb91f040046faec8eb52d0ec170448c02f3dcdc88a6a5e71532bf98dbc42b91a85317ec429d20499eaac51a2dd3cbd2985f58a6456c836465262bdd1e7c79b096fa254f5f1434ade1a59c67f888a6ae9a2fa2c29a4403f3ef0d1b3f19eb5bd3af264cbd88574e17fe9d3f434ad1f00898e9b3af0e2e9dfc2377280cd77d4fc28c9050c5497e6ae32bf0830abaee28d7c7a6cf978e174d0c2fe6138e97d9f058578c1c4a52eeb6f12b3420ae829ed257c066e8bd8fa0dfaf99bdb01e2ead8fadd421e3b0fac9d3bb10c5cee30a9af937d143e211588d467f673f32779f99c150ec77667413c0931bf339f6e21e95b425828f4e0edc05f580c552049a3e98c358649cd4bfa13d5c72016dfbf188fdf036cae1366074bbb3ae67d096cd960b63f50cfef8490d2639229cb0994ad2b3ac226dc1705798c23eb2fd1bf7d9d3431fcae8fd1f6a80d386ddde165f58a119bc64222676405b9fd69d01670449af337b78901976f638fa7c2c762b5a55decfc7047f87238f406b67751033214da6a549359cb9c74028f3087f728f51545ba42065e606dc928212cc65b224a60c794385e67f83dcc6174b5d61aeedb7125ad2bd6be44453467"}],"signed":{"_type":"link","byproducts":{},"command":[],"environment":{},"materials":{},"name":"tag","products":{"aerospike/datadog_checks/__init__.py":{"sha256":"cd19a5ddb9a3b462fb28e9ef2e5afbec744a6a439a252fbda3638f8e1839b61d"},"aerospike/datadog_checks/aerospike/__about__.py":{"sha256":"1ed16b008e3ac4d6225df8ea5e0e983d87f6240ecf8bbb929bc823190f3272a1"},"aerospike/datadog_checks/aerospike/__init__.py":{"sha256":"abc501ff519c0f918c105cbcc5eb130af5146d53d589a3e9ddc0fab4b675742e"},"aerospike/datadog_checks/aerospike/aerospike.py":{"sha256":"2baa1b7046f9ce33c8ce89b4dba385a12c5e207ba1d5015c95588e3e4b32a6ee"},"aerospike/datadog_checks/aerospike/check.py":{"sha256":"4776aedade59b4ee69dad9f68e30a474ecadb939f256f80865c25a4a54657657"},"aerospike/datadog_checks/aerospike/config_models/__init__.py":{"sha256":"c9cf5c66894430e7edbb00d00613b58ccfd38360f2fe490a23c17cf71ed294dc"},"aerospike/datadog_checks/aerospike/config_models/defaults.py":{"sha256":"a12b6ee5bc203a87803e6eca6508a938427c6e309e6f1f1cbb52ba74dd2f3822"},"aerospike/datadog_checks/aerospike/config_models/instance.py":{"sha256":"62ea4f7b512643ad271e895cd0395d1d4aca172d159dd07d810870b03a3fb125"},"aerospike/datadog_checks/aerospike/config_models/shared.py":{"sha256":"69cda184c99bdf48f89ca90f35a436d69ea2a44ad896a742a1d7169381459829"},"aerospike/datadog_checks/aerospike/config_models/validators.py":{"sha256":"ef421486e47d7264a37540eb6f738c4541767e156c48bc509e1e08bf6172b655"},"aerospike/datadog_checks/aerospike/data/conf.yaml.example":{"sha256":"b167d2472a4a8eedabae52cea8711408ea1b4ffeaf148f0223e09e7517eb0512"},"aerospike/datadog_checks/aerospike/metrics.py":{"sha256":"47a533d2cb3dc4a6e6c5d5f74733003e3e10671bf1e1961dbda5344ce892a252"},"aerospike/pyproject.toml":{"sha256":"f3ff9912d4c9573dd46e43542b0244824926b5ab10cc2319ab1f1dc8a5369f64"},"airflow/datadog_checks/__init__.py":{"sha256":"6db7d3f7eb9237e5b092df82dc2f4e1b8ddca27e7c0530bdf2b9f14c9a8e102e"},"airflow/datadog_checks/airflow/__about__.py":{"sha256":"94538ceae050ee5f94de08cea433967a708249bff1fc164178d8ab9af6543b11"},"airflow/datadog_checks/airflow/__init__.py":{"sha256":"d8c6d8337954ebb155f1397dc289143909b1cc7b69a4d09f0279023c37e9a0cd"},"airflow/datadog_checks/airflow/airflow.py":{"sha256":"c3a2a519017c9ab459a54173ae778ea22b8a195751516af6a79457e100c3ea8f"},"airflow/datadog_checks/airflow/config_models/__init__.py":{"sha256":"c9cf5c66894430e7edbb00d00613b58ccfd38360f2fe490a23c17cf71ed294dc"},"airflow/datadog_checks/airflow/config_models/defaults.py":{"sha256":"2f4e08b99751d02c13e4aa2c596c4d476a726d1e856b46e2f75be36c14dee5c1"},"airflow/datadog_checks/airflow/config_models/instance.py":{"sha256":"f8fe24155831548c85bd8374d18e41a3928e8bae0d6830e903200295d17e26b0"},"airflow/datadog_checks/airflow/config_models/shared.py":{"sha256":"6aeb47e93fb5c0a0ff0e0202bb3bb3cf1ae045eadb5749c34337df9abbf78c84"},"airflow/datadog_checks/airflow/config_models/validators.py":{"sha256":"0424fe17778b76e1b589b9564d0d543d1b71dba1edd6e5d71a7c528dddf68e0b"},"airflow/datadog_checks/airflow/data/conf.yaml.example":{"sha256":"2b2e8cb697d2f704e243939117dc11dba5c62763eb692de72564713cb4243fd4"},"airflow/pyproject.toml":{"sha256":"a15728c6bd8377fcce146aed6494879fa88b14c44e35837816eb86696231f813"},"cisco_aci/datadog_checks/__init__.py":{"sha256":"fe73dc8f45d35205d366455edc6e8c3a8b29870d773b6f103d5261e25f757b25"},"cisco_aci/datadog_checks/cisco_aci/__about__.py":{"sha256":"462c80d69cb82edd7a019811dd2aa582f00c1d4e0d83d05e43f11c1e5ba3aebf"},"cisco_aci/datadog_checks/cisco_aci/__init__.py":{"sha256":"b7f298bcabd603a6d18b1547b21717ca39dd286aa97f823a5f08ca0c6eed7b65"},"cisco_aci/datadog_checks/cisco_aci/aci_metrics.py":{"sha256":"5750051d58575470ee62f0f0f6c8664b668daa3ab9fa05a07366407cc08d5438"},"cisco_aci/datadog_checks/cisco_aci/api.py":{"sha256":"2f0cf491d4fb7738e125ce50e9e07fb6eeb1b1d36b27223afd9fe28353489ea9"},"cisco_aci/datadog_checks/cisco_aci/capacity.py":{"sha256":"29f4c792fc16f8fa3e92e709990e795050a6c767b800a84fe49a2c576d877e37"},"cisco_aci/datadog_checks/cisco_aci/cisco.py":{"sha256":"14f4d512fdf0daac340f7e981d99ccc8827bde1440469339088f89a6f7bd5d3c"},"cisco_aci/datadog_checks/cisco_aci/config_models/__init__.py":{"sha256":"c9cf5c66894430e7edbb00d00613b58ccfd38360f2fe490a23c17cf71ed294dc"},"cisco_aci/datadog_checks/cisco_aci/config_models/defaults.py":{"sha256":"91ca9460c5302c365329020d83266171dce9f636d74ee53470f73afd135d2ed4"},"cisco_aci/datadog_checks/cisco_aci/config_models/instance.py":{"sha256":"62b29f4619ad844a5733057afb34456d8cdd0947c51c8dfbe1f991bca06a4789"},"cisco_aci/datadog_checks/cisco_aci/config_models/shared.py":{"sha256":"6aeb47e93fb5c0a0ff0e0202bb3bb3cf1ae045eadb5749c34337df9abbf78c84"},"cisco_aci/datadog_checks/cisco_aci/config_models/validators.py":{"sha256":"0424fe17778b76e1b589b9564d0d543d1b71dba1edd6e5d71a7c528dddf68e0b"},"cisco_aci/datadog_checks/cisco_aci/data/conf.yaml.example":{"sha256":"d46fceda118af84a15f089547541221a52b7c508ddb9fd842500da188b864dd7"},"cisco_aci/datadog_checks/cisco_aci/exceptions.py":{"sha256":"e382eeb9265605558e2f99399647e768bc3fa37d690aeee9b3b32da3d0743237"},"cisco_aci/datadog_checks/cisco_aci/fabric.py":{"sha256":"1f4ca5f756a2f318eef68dabc1f62391958f8323222e0367206da22f94851752"},"cisco_aci/datadog_checks/cisco_aci/helpers.py":{"sha256":"b34774c5af56723015a1fcae43d9146d2593e9f46ae4473a6917b4f2b8b5d436"},"cisco_aci/datadog_checks/cisco_aci/models.py":{"sha256":"b0f365414a4ffdca33dcb36ed09aa589eaa28ab85fda3bc716ffec385bf59df1"},"cisco_aci/datadog_checks/cisco_aci/ndm.py":{"sha256":"ae9bdba9fd8046b2b9651e3968c7b026814ee9c3240926c632ccd8b962188d8d"},"cisco_aci/datadog_checks/cisco_aci/tags.py":{"sha256":"9397a338988ec85746ef7f266e9085e0131316e86bee1ea71beb298a69a873db"},"cisco_aci/datadog_checks/cisco_aci/tenant.py":{"sha256":"0220ef23f0be1d9a8eecae69e420e97aae4a9ff64012f7a647d8643d098a2872"},"cisco_aci/pyproject.toml":{"sha256":"f3020c48c1d1427cdc77dca17d0b8f004027d55f59ffcf79eedb487a489864e6"},"datadog_checks_base/datadog_checks/__init__.py":{"sha256":"9a3c64b8b00c94da4b4f34618d803d3255808caf21b8afa9195c84b61da66b6a"},"datadog_checks_base/datadog_checks/base/__about__.py":{"sha256":"411337fdbc098736fbb892e7d83b9c049f868af8d6ab243555063b4a3f12582f"},"datadog_checks_base/datadog_checks/base/__init__.py":{"sha256":"ad24c3a3afb6a7f457e88eb09ee5eb34c82569ab682d1c99153139c1b1d963cb"},"datadog_checks_base/datadog_checks/base/agent.py":{"sha256":"9d7f71fc16188b344a18d61f5eae73ed2d0567dc5c5ffc1ddadbb1a467b7ffc9"},"datadog_checks_base/datadog_checks/base/checks/__init__.py":{"sha256":"6b45aff8e774058500e39cf7ede54ebee81f95364c8a380648eb89aa7744dc35"},"datadog_checks_base/datadog_checks/base/checks/base.py":{"sha256":"4250c0ed4c676e2376a18e6c299e3385be61d52f6ab9ff41908335a3d01e7645"},"datadog_checks_base/datadog_checks/base/checks/kube_leader/__init__.py":{"sha256":"ac4335c2a324c7c24bbc9a5834730ecba39d3e60b0438e8948e7c4dd00c0a726"},"datadog_checks_base/datadog_checks/base/checks/kube_leader/base_check.py":{"sha256":"d8b21153a6b67096f86f2338437bf54955498d05bc363549affc9428e7e32a35"},"datadog_checks_base/datadog_checks/base/checks/kube_leader/mixins.py":{"sha256":"611acba9c969866f4feca52ad89b05757da1ab60393fdd2211f078da95b61400"},"datadog_checks_base/datadog_checks/base/checks/kube_leader/record.py":{"sha256":"6aa334545b055aeda90343b976cfbabf959038cee58103321b0a26e90eaa09a5"},"datadog_checks_base/datadog_checks/base/checks/kubelet_base/__init__.py":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_base/datadog_checks/base/checks/kubelet_base/base.py":{"sha256":"97ec3af5e262a9f1a3dcc0664f01cca4df95241771c4bf53d09fa06b4a8fbc23"},"datadog_checks_base/datadog_checks/base/checks/libs/__init__.py":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_base/datadog_checks/base/checks/libs/prometheus.py":{"sha256":"08f897cd26aa17193cd7c36d4c02d1251199ea7a789eeb0a42c7d4bb7212b7dd"},"datadog_checks_base/datadog_checks/base/checks/libs/thread_pool.py":{"sha256":"981219dc63fe452f96075e0fc914bee141179735463179e4bfc93e2c20b8bab7"},"datadog_checks_base/datadog_checks/base/checks/libs/timer.py":{"sha256":"8ac17c602136ed7a5e7a1bb39389782190afc505574dd6cd8a46c1db146780c4"},"datadog_checks_base/datadog_checks/base/checks/libs/vmware/__init__.py":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_base/datadog_checks/base/checks/libs/vmware/all_metrics.py":{"sha256":"4f89b8c40a8abc0f57b6abbea2227be3cd8a0a000e34a134b48800fc4a0842c6"},"datadog_checks_base/datadog_checks/base/checks/libs/vmware/basic_metrics.py":{"sha256":"37d804c9398bb121d2f89162729347394ab2b3b627ad1affb8f608eb34ab35bb"},"datadog_checks_base/datadog_checks/base/checks/logs/__init__.py":{"sha256":"ee840b22269e35c03869d3b7223c20ab5c65895ae23153a8d273c0f866d5b99d"},"datadog_checks_base/datadog_checks/base/checks/logs/crawler/__init__.py":{"sha256":"ee840b22269e35c03869d3b7223c20ab5c65895ae23153a8d273c0f866d5b99d"},"datadog_checks_base/datadog_checks/base/checks/logs/crawler/base.py":{"sha256":"90ae36a4dc349b0d0fc1ac102f3c21b1d0100b55688ce5e0149e3844da37ac8c"},"datadog_checks_base/datadog_checks/base/checks/logs/crawler/stream.py":{"sha256":"300a7c23ecc48f972f4e9ee9349345977d800ddcea359c4682425fc9dadf847e"},"datadog_checks_base/datadog_checks/base/checks/network.py":{"sha256":"5228cfd4e5410a908d28ccba6d590d6b31e0cba49d9bca82bc26063da5ae4c3a"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/__init__.py":{"sha256":"3876cda6f0d3eb38d15b8d91cd85991f383e692f2a5d83984292aea2e9942771"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/base_check.py":{"sha256":"6dc2cfa5d441b02dd156e87734d800252fd19b8200f57600c5244520a3931aac"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py":{"sha256":"6e4ac4fdc43aa5e5b5e3b040098f86d2d1f276227743a50a44b333979521e427"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/__init__.py":{"sha256":"3fcd4506124b03d306a73e0bee8ffb0bea6f13077803ff235855906758e0d048"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/base.py":{"sha256":"517cad1b4cded34ad7535dd507404cfd92bf63fa82eb07f103e3a27459bf3326"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/first_scrape_handler.py":{"sha256":"227fad65733389e49d2f6397265200162efc29b415c2e26718fd2268b1fdf7be"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/labels.py":{"sha256":"d05d084a1d37c12daf56c8db9ecdc5ad80e7ea0bf18f45effb67e40361e1f43f"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/metrics.py":{"sha256":"b835593871d158c5bc614419058f6308bcde70de43b9e44d8c89b1066d4af369"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/scraper.py":{"sha256":"7783dc24cfdbef09075014396ff219a3f556ff4ee3423fe0c1381f0eb929d410"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transform.py":{"sha256":"aafed8d72bea5a6fb6ebd98653b3aad22b7cc77564594654da6d9ddeaf95259d"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/__init__.py":{"sha256":"84f667f162ef41faf32d2689c6d15b61802d2b576df084174942cbefdb2b663b"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/counter.py":{"sha256":"9a6362e041ad2cd605c566b4c88ff7f0d63d681222e35e0cf6945268635b71a2"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/counter_gauge.py":{"sha256":"a1bd42bc2747afe56b73905295a4f73972f917633a07b3866a15007a4545dc5c"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/gauge.py":{"sha256":"ff6a19d789bfe7f6fb94e47eb4cc49461b1e17aafa7fd0ec3bee0b6c023288f1"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/histogram.py":{"sha256":"872b69c3785029d57037ccb991e5ba58672adebe3efb11272431f1c167fa8e52"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/metadata.py":{"sha256":"069b093750fd272f78bb12deee4a472f5e042dd961530c939a5e51f3d3003aea"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/rate.py":{"sha256":"7beb75edc142b002a77d7810add521f79c3496c972de2b80d36322cc63ffa1c3"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/service_check.py":{"sha256":"19d05af37f1f8b912dbe0d1a08762132968408616c255db5f62b1a59548ada3c"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/summary.py":{"sha256":"d01d5693b79ae07da77ddb0e5fca10122a2804636aca914372304f2a31d5b52e"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/temporal_percent.py":{"sha256":"c02a8ea971a8550de5c99066fc04e7830a6f21d81c7ce905ff59461397e88625"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/time_elapsed.py":{"sha256":"c8fb3bd9478e82bd9e40e7610638c507a7add21327c034beaee516388f160db1"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/utils.py":{"sha256":"b6993786d240cff2b0091a85b360938da8c790b0acff64db19d069e75e2b58e4"},"datadog_checks_base/datadog_checks/base/checks/prometheus/__init__.py":{"sha256":"35c57ac8d1d9555c42ac0ac80ece6d4a459fae72f05398b195374d5c57284a30"},"datadog_checks_base/datadog_checks/base/checks/prometheus/base_check.py":{"sha256":"125025250dbab097776c028c0d7405b36fa92343252cf8b3781e3816e027e924"},"datadog_checks_base/datadog_checks/base/checks/prometheus/mixins.py":{"sha256":"e6f4737a20c8516d7b4ff6fe351fafa33db00cf38f4148bdf60ca4902e9d3157"},"datadog_checks_base/datadog_checks/base/checks/prometheus/prometheus_base.py":{"sha256":"9f35823bf488a24646a04ee8f01269a254cfa160bbfe471625f90b1c05de057e"},"datadog_checks_base/datadog_checks/base/checks/win/__init__.py":{"sha256":"9083ff7fefc6d7404110ec4ee3e1a7cb29730a8d6439ff5deb291388151a7a4a"},"datadog_checks_base/datadog_checks/base/checks/win/winpdh.py":{"sha256":"8a1178041d7fdc4a16bc54c2019e1331048e2da5a5bf59d633c000aa5b6105ae"},"datadog_checks_base/datadog_checks/base/checks/win/winpdh_base.py":{"sha256":"c6156b3c399663ee3d12a0fae723adc75ee118d90817593bf9a004c1b24f9628"},"datadog_checks_base/datadog_checks/base/checks/win/winpdh_stub.py":{"sha256":"3397f2064cc0b842afa19ac6f64b506a9c241ffecaf8a388605e55a52f372cc9"},"datadog_checks_base/datadog_checks/base/checks/win/wmi/__init__.py":{"sha256":"9c3f86014e9390adb4514cc9515eb98f87d81c838fc687b5cfd0e657f6da03f4"},"datadog_checks_base/datadog_checks/base/checks/win/wmi/counter_type.py":{"sha256":"efbef65c6b56c8f2577b0caf698ca8c15983cbd0f1678da48bd583e5769bb951"},"datadog_checks_base/datadog_checks/base/checks/win/wmi/sampler.py":{"sha256":"b11c15b54b9df599a50375cba2a4cb9ab7d3b259ea073f3c4e70223df08ca584"},"datadog_checks_base/datadog_checks/base/checks/win/wmi/types.py":{"sha256":"e04f1ed72a69d8ff9e3b180bb11adfb656aeaaf6a9582b956803e872a0abc158"},"datadog_checks_base/datadog_checks/base/checks/windows/__init__.py":{"sha256":"b2e1a32eb8591a9d541a935aa5c56f20fa7ebbc3de68cf24df3a650198f2712a"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/__init__.py":{"sha256":"c4ced6dabda1b7e2b1fe3d22f03bae7bf94433606ffdbab7be0d04b34009e4a1"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/base.py":{"sha256":"209ca7cc7f6dd944309034f981b91635a6c85ff9d95ccfc92d2844be17e690b5"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/connection.py":{"sha256":"124462f2699e89a71bb2ead225be6f014cc523f94091459c9d20bb4ce42c006e"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/constants.py":{"sha256":"fdaa806dab5cbbd790bfd36a6d509a43d7394be1e4922504455e68a82dc39799"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/counter.py":{"sha256":"8498fbe90224b5934558588a21110d2224c4d0d6334e5bd52dac213c2566f49e"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transform.py":{"sha256":"6d93f17ed0f0d1dd55157e3dca21486be9da18e62529c320a6fb9e491920133f"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/__init__.py":{"sha256":"a8b142ebeee6817e16846d57125966018eac45ef4a9870efba31fbc9c2555e92"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/count.py":{"sha256":"8263467bddb648fe101243270ff9dcf30edba0a616fa65b69f9fbabe975c9a37"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/gauge.py":{"sha256":"73be1f652e85addc433ba64aa2fa75ee1daf85322691a351d8e2deb35af4d681"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/monotonic_count.py":{"sha256":"479c167c31bd2e471baab21d49ce9dce3470b40729dabe153ee5456aa3a5ce2d"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/rate.py":{"sha256":"3e4c739755cf6cfb68fb942b882a23361e5684c4e3c03710c2a63f8b6310052f"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/service_check.py":{"sha256":"c2f74b0d2b871ca2276f35bcb8cf10f764dc454b90975d70b2fb7475266dac70"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/temporal_percent.py":{"sha256":"2071f661338679e8b63d53790a1f7df200ea620facd4939bbfd6b44e602f3a75"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/time_elapsed.py":{"sha256":"85633c087612a859c562b35daf5345638eb89cc01514e88df238658594ce6fbf"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/utils.py":{"sha256":"a2ffb8f0936f650e8a3c4671cfa5f42187009776d50059ccd79faf9467eab18d"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/utils_win32pdh_fix.py":{"sha256":"9d64eb01b07d5400ff0da693ad2055d8a0e606f8a9185988d38fb00df9f151af"},"datadog_checks_base/datadog_checks/base/config.py":{"sha256":"a9c000e17f6c5d065177041ef0382219ddbdf34541a7549003477af79b57fed5"},"datadog_checks_base/datadog_checks/base/constants.py":{"sha256":"711d7db40a95cac3056dd056a88199a0720a9359064f2a91b029fd15f1503a7c"},"datadog_checks_base/datadog_checks/base/ddyaml.py":{"sha256":"65884443d4540373efc97e19ecf2d595154b958eb9f0248472423b9f4bd16498"},"datadog_checks_base/datadog_checks/base/errors.py":{"sha256":"7cf705691df31e94a4328f1ea6f1779517bd2ab1274bc83827475733f9efcecc"},"datadog_checks_base/datadog_checks/base/log.py":{"sha256":"dcfe53abe916664ab1e40b6f421524f3d3b5b3ee589c4e3d2c9f3179e29cbc1f"},"datadog_checks_base/datadog_checks/base/stubs/__init__.py":{"sha256":"c2958047dbfb0624db6e64ceea9569b21a9aff3f8f59a613af7df049364bcf77"},"datadog_checks_base/datadog_checks/base/stubs/_util.py":{"sha256":"6431ad41af05ddc1dff3e42f4951cc0780462370bd5600bbb067061af3b46a92"},"datadog_checks_base/datadog_checks/base/stubs/aggregator.py":{"sha256":"18855f7b42006ef9682a59bcf3a3cb8cbca3572cfd9e439dab9d686692bd9626"},"datadog_checks_base/datadog_checks/base/stubs/common.py":{"sha256":"646cc5d9d5f2d6e545406746fdbbf3fe930c8942da05ca73adafe4f70a3d7f4e"},"datadog_checks_base/datadog_checks/base/stubs/datadog_agent.py":{"sha256":"bb87bc6454ade023bd9b1b99e148a7b5e2093b22ca420cdc1c56b4a53cd16826"},"datadog_checks_base/datadog_checks/base/stubs/log.py":{"sha256":"03e7969f3639813a535b8d59721f96e4255c97395d96684c4d6faf0cd15d4f5a"},"datadog_checks_base/datadog_checks/base/stubs/similar.py":{"sha256":"dc73bd86dc7d2d5ea945a1356462ac3c4c5abd36e33c73cae9930cd813d88f76"},"datadog_checks_base/datadog_checks/base/stubs/tagging.py":{"sha256":"cf12dd3c2e04a87c46892fc71216da3ac2ffb399d922137c043931d810133aab"},"datadog_checks_base/datadog_checks/base/types.py":{"sha256":"6a76a3652d16d13b31507250c3e24738fd8d49eb82f418ac5d2cbd9804ad9714"},"datadog_checks_base/datadog_checks/base/utils/__init__.py":{"sha256":"4c2d2aee209b36a7188df5396b304da429e2f0b01060e7d8e8500313749910f0"},"datadog_checks_base/datadog_checks/base/utils/agent/__init__.py":{"sha256":"a37696bf2dcf872903fe1ed84f7b3adbc3b45b66291e2b3436542c495d4f234e"},"datadog_checks_base/datadog_checks/base/utils/agent/common.py":{"sha256":"d5746c3c959da666f100a2815471813d49b858776f8aad28bee820b69c7b2d6a"},"datadog_checks_base/datadog_checks/base/utils/agent/debug.py":{"sha256":"cde05b34bb7763f5b1a5ff4e74092595d2f2d6098bd14e9b30398e1d20c63373"},"datadog_checks_base/datadog_checks/base/utils/agent/memory.py":{"sha256":"986d743c51608d608b7ad17a8a10b8eb7550cf55774b849332cfc5f1b00392f6"},"datadog_checks_base/datadog_checks/base/utils/agent/packages.py":{"sha256":"ab88bbece8f9d0a38db114f121b3aa7babf43239debbe4069dd236cfc742d8f9"},"datadog_checks_base/datadog_checks/base/utils/agent/utils.py":{"sha256":"155fe8eab71c53907432b5f299afb8c80aa62a08649734de39fd6785872663ba"},"datadog_checks_base/datadog_checks/base/utils/aws.py":{"sha256":"c3114b5a5545b6fe7f11445db17cc384e45c4e93348c1940a2470c88f575c43f"},"datadog_checks_base/datadog_checks/base/utils/common.py":{"sha256":"3ad98a77914a8e5c09c116e113868898e0563ceccd3d5a9f5d79875e4e0c308c"},"datadog_checks_base/datadog_checks/base/utils/concurrency/__init__.py":{"sha256":"391b2c1396d766e62e6b95022deb067cfbdcad029c0031da0ec5eb5327c0445d"},"datadog_checks_base/datadog_checks/base/utils/concurrency/limiter.py":{"sha256":"8acd99a541237ec2389c11ad5c6d83d1982fd060f7f3f8b98c940291dccf5bf6"},"datadog_checks_base/datadog_checks/base/utils/constants.py":{"sha256":"4304decb8096074340c66dab703fb03d84641328257a4408ac0cc531a6c46b7f"},"datadog_checks_base/datadog_checks/base/utils/containers.py":{"sha256":"2c1a94cc4447c71e537607165504ead40d5668608c8735d3f64e069f47d7dc9f"},"datadog_checks_base/datadog_checks/base/utils/date.py":{"sha256":"2499aa3fce0281570527472f02632ef04b4ceaff7ab48112b9c40d9bd78a7847"},"datadog_checks_base/datadog_checks/base/utils/db/__init__.py":{"sha256":"f964bc85274b6a1cf6a9b115c13d38d419f422ef40f4e96ec3a0b8ff36f17e36"},"datadog_checks_base/datadog_checks/base/utils/db/core.py":{"sha256":"b151dd639674771e9433a21667007bd07c62583213344dba4637dfc0b2acf490"},"datadog_checks_base/datadog_checks/base/utils/db/query.py":{"sha256":"4e3c95363c63348076d5b7087ede9ffb96f76a58033ed76db477004b605d52e7"},"datadog_checks_base/datadog_checks/base/utils/db/sql.py":{"sha256":"a0f94966a841cf408601aecc10d3dba4e83e39fb878feddbffeaefec981a344b"},"datadog_checks_base/datadog_checks/base/utils/db/sql_commenter.py":{"sha256":"aff79f2ba4c6450c4cffe423fa774425d102938ec9e270858e4572c6ed579af5"},"datadog_checks_base/datadog_checks/base/utils/db/statement_metrics.py":{"sha256":"4b49073e816a099b514d9a1323a3e2f99ba5e88b9ef18de3d7f92c347fb3128f"},"datadog_checks_base/datadog_checks/base/utils/db/timed_cache.py":{"sha256":"6bd2ace4a294bc4c41e863804d74d208b6a656d2c3f75f439fa1e9c1e18ab45c"},"datadog_checks_base/datadog_checks/base/utils/db/transform.py":{"sha256":"90d762059568666d6835135621ab6b01f80e4636e444a86cd40816fda4028f72"},"datadog_checks_base/datadog_checks/base/utils/db/types.py":{"sha256":"38b5f63aae7c2503c5043e28a94a422e400fee8bd118dfe2d6f164d44d0df0b8"},"datadog_checks_base/datadog_checks/base/utils/db/utils.py":{"sha256":"f792e6b58eeb65cacf70d3dabaef3b44335392d8dbec6135926ee73af9913fd5"},"datadog_checks_base/datadog_checks/base/utils/diagnose.py":{"sha256":"78b31ed2d212a64cd2df2c5547cdc81f1a2b4097c74ff5e2e82ab8cf344dc552"},"datadog_checks_base/datadog_checks/base/utils/discovery/__init__.py":{"sha256":"7c896046fefe58933a21fa392211462b829c0caac24865ff7eb5e0899f73e8c9"},"datadog_checks_base/datadog_checks/base/utils/discovery/cache.py":{"sha256":"7fd2f703b619a59fa6a59a452308ec6b969bf5c64c1a4a9d7ad76bf44a5a95b2"},"datadog_checks_base/datadog_checks/base/utils/discovery/discovery.py":{"sha256":"9bcc3d55162eefc79dd202af56ee1c84dad1752302aae24edd85359b378e734d"},"datadog_checks_base/datadog_checks/base/utils/discovery/filter.py":{"sha256":"459f13ddb2cfe2e84af0a5f01aa76860c254f4edc402f3924e119ae702d0311f"},"datadog_checks_base/datadog_checks/base/utils/fips.py":{"sha256":"1bdd91368664acca236932a2d4389c71bbdf3053b769d92d0d980a8e9c64913c"},"datadog_checks_base/datadog_checks/base/utils/functions.py":{"sha256":"8869726f147a68f3c494dc4d6f610b3b36e4df6f23f4e541031ade749c5d091c"},"datadog_checks_base/datadog_checks/base/utils/headers.py":{"sha256":"d1249d0bbd63c1a07ad413837eeb1a84256bd5ce7a1af4fd8b0b7b72275c1cfd"},"datadog_checks_base/datadog_checks/base/utils/http.py":{"sha256":"31698f56c090124974d97d09e2022d7b50028c1dcb78addcc192f680f0e1217f"},"datadog_checks_base/datadog_checks/base/utils/limiter.py":{"sha256":"6114eb3c25f54b912d1cb55c3fff8611fcd1a2db3f2d3732d5ffee1d956cf748"},"datadog_checks_base/datadog_checks/base/utils/metadata/__init__.py":{"sha256":"6d36a6f7a190f43be4ea287c70aabc5b16b69640e48feed3b89de85875d432cb"},"datadog_checks_base/datadog_checks/base/utils/metadata/constants.py":{"sha256":"5c77cfc2f40c6f2344d8562607fed7c968862343761b17415dbb572f87839e27"},"datadog_checks_base/datadog_checks/base/utils/metadata/core.py":{"sha256":"3ac91ad1a6d9d0279dacc04672e6ae25aaab4937ccf636903415dc7460ad19e2"},"datadog_checks_base/datadog_checks/base/utils/metadata/utils.py":{"sha256":"d3756946a79fa280add151008bd06fa25643de6dfeba14f6af642e276c5ce03e"},"datadog_checks_base/datadog_checks/base/utils/metadata/version.py":{"sha256":"7257bc2c7c2a72ee364ea14a24625d16d1c098e7a2b423a2ce34cd43606cc534"},"datadog_checks_base/datadog_checks/base/utils/models/__init__.py":{"sha256":"b2e1a32eb8591a9d541a935aa5c56f20fa7ebbc3de68cf24df3a650198f2712a"},"datadog_checks_base/datadog_checks/base/utils/models/types.py":{"sha256":"c9b504a7dcffac396bdbea089158c4581fa6440bd4c28103a4051c6504c4208c"},"datadog_checks_base/datadog_checks/base/utils/models/validation/__init__.py":{"sha256":"699557dfc5b5a642c793b9281e02b9267d8f3824f940a28f1b35bfc3d2e082da"},"datadog_checks_base/datadog_checks/base/utils/models/validation/core.py":{"sha256":"dab25d1f87af4729ec0530dc0b04ed788c0c09906f9e674113a736f2c8c3e5a0"},"datadog_checks_base/datadog_checks/base/utils/models/validation/utils.py":{"sha256":"748ad183c8795ee93e403b08c89285f68b6a45fc34aeeebd1f67c548dcc8b0e8"},"datadog_checks_base/datadog_checks/base/utils/network.py":{"sha256":"617a3d25081652d167e1d15e7d665aa76d0e32649178b48f3a3833a1d25b75dc"},"datadog_checks_base/datadog_checks/base/utils/platform.py":{"sha256":"c16f1fe972a8e091f1beed6c3740e92c39988d2fdc0aef06a0abdf4e3223e323"},"datadog_checks_base/datadog_checks/base/utils/prometheus/__init__.py":{"sha256":"f794783ecff74f6713b846470f28eaaa841ed20c0d1681bcd18186135e2c150f"},"datadog_checks_base/datadog_checks/base/utils/prometheus/functions.py":{"sha256":"c674f283d8937acffcde65a57351acf05150d6349163e94b3e8e530801626f2b"},"datadog_checks_base/datadog_checks/base/utils/prometheus/metrics_pb2.py":{"sha256":"d98a7a599cb428afbf3c1f1ada8368b12e9cfa598002788d398b897ab00c4de7"},"datadog_checks_base/datadog_checks/base/utils/replay/__init__.py":{"sha256":"391b2c1396d766e62e6b95022deb067cfbdcad029c0031da0ec5eb5327c0445d"},"datadog_checks_base/datadog_checks/base/utils/replay/constants.py":{"sha256":"7b10c6b0380b23bbb5196bde7a55dd8335894e3c47a18266413ee8ef0d4509e3"},"datadog_checks_base/datadog_checks/base/utils/replay/execute.py":{"sha256":"9046d9b996440a3087c9447969ec52ce4382e7c74e5d0c95fdded3d8a2c4324a"},"datadog_checks_base/datadog_checks/base/utils/replay/redirect.py":{"sha256":"f21e13c07ed95637a211ddaacb1f4a68752755137bfcd1a6535745890c252697"},"datadog_checks_base/datadog_checks/base/utils/secrets.py":{"sha256":"4e3e4c04ea321975c3581dc7afeed4283cb9195d4d6499053d8e13e2ff4f1c78"},"datadog_checks_base/datadog_checks/base/utils/serialization.py":{"sha256":"0eafc28756b17e888cf64b65f6e5a02a6c242aef85a65575a3b8a79388ca596c"},"datadog_checks_base/datadog_checks/base/utils/subprocess_output.py":{"sha256":"da651d3d3a097dbcdc816ff5ec2c8bf87997e125b5c31b3fab64313189721120"},"datadog_checks_base/datadog_checks/base/utils/tagging.py":{"sha256":"004504188c498cdbe8388110405922b7c653d8ec91c62ca6d45cc21227080acb"},"datadog_checks_base/datadog_checks/base/utils/tailfile.py":{"sha256":"c7fa4ce6982655a5b87890704ba19764a3aa89fa66a9faf01ce537816b6162d3"},"datadog_checks_base/datadog_checks/base/utils/time.py":{"sha256":"70dcbb0adb09cd250c8bb27edd645e65552f37260e1a427022da89318d35aaa0"},"datadog_checks_base/datadog_checks/base/utils/timeout.py":{"sha256":"78e059a1f14dfa13aee7125e30e17769cfe87dccbd118ebe92f981bcfe101058"},"datadog_checks_base/datadog_checks/base/utils/tls.py":{"sha256":"314a196c064a0cc4de5512b44839986088a0c828ce2538b560506813ace9b3c3"},"datadog_checks_base/datadog_checks/base/utils/tracing.py":{"sha256":"34fb7987e4174ff5cbf03d8584d94d6c13be2e7c2baffb1d4b80c5a4a455f485"},"datadog_checks_base/datadog_checks/base/utils/tracking.py":{"sha256":"158228baabb7281f89b31831335897e48a6ffdc5a1b3ccd03933784b15ce909e"},"datadog_checks_base/datadog_checks/checks/__init__.py":{"sha256":"76381faa72acfce6863031501a53b955d2d047150023e5f828424b2e53e95483"},"datadog_checks_base/datadog_checks/checks/base.py":{"sha256":"df061b86e80d0375a5aedffe104a4198949297c9472ae52dad13fe9d5d8a05f4"},"datadog_checks_base/datadog_checks/checks/libs/__init__.py":{"sha256":"2300c3103843a8f3d4d63e0fcaf78691dbb508cbfd91b7de2bdd0802f981c777"},"datadog_checks_base/datadog_checks/checks/libs/thread_pool.py":{"sha256":"747147080f5d03c2742e233e4bd635d4c58abff8cf5c803fe5881f776c1603b4"},"datadog_checks_base/datadog_checks/checks/libs/timer.py":{"sha256":"a35b1970916e9035ae71d185b6506bbd2b798e26ef1b7b91fc3b30788b19dd49"},"datadog_checks_base/datadog_checks/checks/libs/vmware/__init__.py":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_base/datadog_checks/checks/libs/vmware/all_metrics.py":{"sha256":"849ca20bfd3b86aa8c8eeb88732b4026fd8a0e74a0a96ab6cbe269270b71a8c3"},"datadog_checks_base/datadog_checks/checks/libs/vmware/basic_metrics.py":{"sha256":"683af414d39b8d77d66fd97146999de4bd7a7be9ab934ed9224867a0b74c099f"},"datadog_checks_base/datadog_checks/checks/libs/wmi/__init__.py":{"sha256":"2300c3103843a8f3d4d63e0fcaf78691dbb508cbfd91b7de2bdd0802f981c777"},"datadog_checks_base/datadog_checks/checks/libs/wmi/sampler.py":{"sha256":"55310a8bffb4b55af9552268e627c83c284db55c99c47f754fa05e5bb33ccd3b"},"datadog_checks_base/datadog_checks/checks/network.py":{"sha256":"e5b7d11f19ac5286897259c2ac1e8209ee7bee94df199d5155e1b174cce6afbc"},"datadog_checks_base/datadog_checks/checks/network_checks.py":{"sha256":"cdb77741ebeaa7b6a13616873b5725009b7566a77ceef013fe5dd1c76cbdd81b"},"datadog_checks_base/datadog_checks/checks/openmetrics/__init__.py":{"sha256":"96b910b1c359a0b37a0a8753b6e1e50c803ae35bf4f1e7a31418678cf16792c0"},"datadog_checks_base/datadog_checks/checks/openmetrics/base_check.py":{"sha256":"a9ccd2133baa1058f744e494e2bb43cb5221fbbb41b6c3b8a0553ead7f6a851f"},"datadog_checks_base/datadog_checks/checks/openmetrics/mixins.py":{"sha256":"b5b9cf9d77c8dda6d752a4d37d303405fa1935ac071b17a4e8b055160b7d2a28"},"datadog_checks_base/datadog_checks/checks/prometheus/__init__.py":{"sha256":"a12ac852908b6eb9f3b594e893c41a38951a9a1e030b6286c20e76aec9965c8b"},"datadog_checks_base/datadog_checks/checks/prometheus/base_check.py":{"sha256":"882984f28171bfde4a2c34c25f609b5a758917f0c9b9e00b2ba9a0d489ba1e19"},"datadog_checks_base/datadog_checks/checks/prometheus/mixins.py":{"sha256":"aa652ec3963f90c1e7c552d35243d3863f737fa9de5e45131ca55758c5e00de5"},"datadog_checks_base/datadog_checks/checks/prometheus/prometheus_base.py":{"sha256":"434576db21c019a7366f5350b8e305c0790509aadd9cbd980f6c3ac22b87874f"},"datadog_checks_base/datadog_checks/checks/prometheus_check/__init__.py":{"sha256":"9b5434e894e03018e342ee726f635de62122bf0e1d8f59d3f0109f89a95d890d"},"datadog_checks_base/datadog_checks/checks/win/__init__.py":{"sha256":"4441d475ac7181f8e7edf1037da1310d776c647883d0362a5acfb88e44e4d45e"},"datadog_checks_base/datadog_checks/checks/win/winpdh.py":{"sha256":"864f9f437a351bb82a2962e15a1198cc6271f9cc25c3c73522402954fa42ef9e"},"datadog_checks_base/datadog_checks/checks/win/winpdh_base.py":{"sha256":"269e7c39a6562687aa6ad022b1b88c85b5f6be403c11a8412775b3c55715196e"},"datadog_checks_base/datadog_checks/checks/win/winpdh_stub.py":{"sha256":"a1895061a3be05415b331c1b17d2c02b1f912f36ed238948c46d7af68025d7ed"},"datadog_checks_base/datadog_checks/checks/win/wmi/__init__.py":{"sha256":"51008a1bf148ec1c7bc0bb8f6ad3be19ba950e6c13b5cb84a2fd2684fb66327e"},"datadog_checks_base/datadog_checks/checks/win/wmi/counter_type.py":{"sha256":"01ac5dc9d1518c74c4b621a1da0b901da5ffc954abcf81972f8e478abdb098a7"},"datadog_checks_base/datadog_checks/checks/win/wmi/sampler.py":{"sha256":"bbfeea3683c40059f5e1ff52fc459e82200727041a187328d65f58b92b445cdd"},"datadog_checks_base/datadog_checks/checks/winwmi_check.py":{"sha256":"907ea75f4055e675b732171687ce6148d6484d067b36726b942b4e9c5e1d747b"},"datadog_checks_base/datadog_checks/config.py":{"sha256":"3eb01719d94b9e857654c41f7ff9e849a489274c20e010221a7c3b8c240b4a29"},"datadog_checks_base/datadog_checks/errors.py":{"sha256":"785c269eb5fe6dab4881bbbe889cac7aa00f34eff8ae4d54b9a28af3d7af2e18"},"datadog_checks_base/datadog_checks/log.py":{"sha256":"a2bbce80c286344b2a4932da942010a563fea2ea2ba46d40ec69fe8f69910fcd"},"datadog_checks_base/datadog_checks/py.typed":{"sha256":"95aebb28195b8d737effe0df18d71d39c8d8ba6569286fd3930fbc9f9767181e"},"datadog_checks_base/datadog_checks/stubs/__init__.py":{"sha256":"331c49e4d1a353ded64777893a96906746f386f03cf32b30135aa260665377ec"},"datadog_checks_base/datadog_checks/stubs/_util.py":{"sha256":"9989e9f7fbeca50c1359c06188f60c309dc421c40f4a9a407147d18c42c180b5"},"datadog_checks_base/datadog_checks/stubs/aggregator.py":{"sha256":"ea230225c9c41fbb1885a38741cccf1809034d9b1fd696eb6efde1de6bfba762"},"datadog_checks_base/datadog_checks/stubs/datadog_agent.py":{"sha256":"b167bb8f650441e0f3762aa3e0ffe73e1921157f34ff90abe47723db963ec6bf"},"datadog_checks_base/datadog_checks/utils/__init__.py":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_base/datadog_checks/utils/common.py":{"sha256":"470a0220a572f2f6df6b61353fb63b82447d569274d15b6e3f5f0c12b5e7f6b9"},"datadog_checks_base/datadog_checks/utils/containers.py":{"sha256":"ca56286c527f7d5f940b0baf5a66a4eba6ad2df036759f6000b0985835c63a29"},"datadog_checks_base/datadog_checks/utils/headers.py":{"sha256":"8f92abe525d4947ba4dd41870d0367f4cda884df95e57d9fce62718f0e8fe8da"},"datadog_checks_base/datadog_checks/utils/limiter.py":{"sha256":"1aad3f848119c3aaa92394962ffbb331a22991bcef173b242347a54cffef22c1"},"datadog_checks_base/datadog_checks/utils/platform.py":{"sha256":"76ca8472c0bf63b866e28aea3bc449e4de63905a41ee9c8662f09e5eb6fef571"},"datadog_checks_base/datadog_checks/utils/prometheus/__init__.py":{"sha256":"f16c179ccf60d6c7d2e76ebb4180895ff85df0c665e6446006043f4b374d757b"},"datadog_checks_base/datadog_checks/utils/prometheus/functions.py":{"sha256":"e2f5ac4c62e0ba31f0758668d2d940424a833c7a1fa9424cde4f5e22d26a1114"},"datadog_checks_base/datadog_checks/utils/prometheus/metrics_pb2.py":{"sha256":"c60dd47541dee138de478b35dcb50a6765355954ade94eb38ec5511ba957e9d7"},"datadog_checks_base/datadog_checks/utils/proxy.py":{"sha256":"723edeaf00a1ee813aa4a6467fc34c3ae419f354c6b6172de678e39b864a8bd7"},"datadog_checks_base/datadog_checks/utils/subprocess_output.py":{"sha256":"80dea34445564d9eee264533455310e69c72b4d640e369ea8c97a365717a64c2"},"datadog_checks_base/datadog_checks/utils/tailfile.py":{"sha256":"6c4aa2725fac2e0cb0f660f545d5a8b3c1954a50a49f1e58c86ddf5cc068c137"},"datadog_checks_base/datadog_checks/utils/timeout.py":{"sha256":"42e848b0a6686bf335dfe8040b07acbb9219c12317cfdf564f0a9a974262c913"},"datadog_checks_base/datadog_checks/utils/tracing.py":{"sha256":"1d06d06a428cf8bc3be4c0e422d6982588a94ba60edb867fca60d5c43b31e435"},"datadog_checks_base/pyproject.toml":{"sha256":"6e047c97ce6b560917a5880e80629158f2be4b1eb5766fd63678d224c0d28c10"},"datadog_cluster_agent/datadog_checks/__init__.py":{"sha256":"9bf116cc5e14139f58802ece354b270c165226e0cd229e867e2cb291b6855643"},"datadog_cluster_agent/datadog_checks/datadog_cluster_agent/__about__.py":{"sha256":"d71d1b9532001b731090b6343e8b96809fc97e43fc3c57a8d7e37dd558b07f1c"},"datadog_cluster_agent/datadog_checks/datadog_cluster_agent/__init__.py":{"sha256":"f2111f23fdd51bfe1675ee777f927a9abf036dc4035c81e048d315925b23f6f9"},"datadog_cluster_agent/datadog_checks/datadog_cluster_agent/check.py":{"sha256":"edd72c798391d5f3ddec023482fc5f6dec4b82ae4a1173f12535fd79cf2e1914"},"datadog_cluster_agent/datadog_checks/datadog_cluster_agent/config_models/__init__.py":{"sha256":"c9cf5c66894430e7edbb00d00613b58ccfd38360f2fe490a23c17cf71ed294dc"},"datadog_cluster_agent/datadog_checks/datadog_cluster_agent/config_models/defaults.py":{"sha256":"16cc866302710ce999a93d7637a6465bcfe95dc43202410fa57da2308ef513db"},"datadog_cluster_agent/datadog_checks/datadog_cluster_agent/config_models/instance.py":{"sha256":"4dee84a87aa8134723997e798ef49557b10e78a2b772d2bbdb40023ca843b6b5"},"datadog_cluster_agent/datadog_checks/datadog_cluster_agent/config_models/shared.py":{"sha256":"6aeb47e93fb5c0a0ff0e0202bb3bb3cf1ae045eadb5749c34337df9abbf78c84"},"datadog_cluster_agent/datadog_checks/datadog_cluster_agent/config_models/validators.py":{"sha256":"0424fe17778b76e1b589b9564d0d543d1b71dba1edd6e5d71a7c528dddf68e0b"},"datadog_cluster_agent/datadog_checks/datadog_cluster_agent/data/auto_conf.yaml":{"sha256":"c1cdfd21cccbc2d08fe4681dbc1c7a47dd9715ab7336a562414d63cc65384a20"},"datadog_cluster_agent/datadog_checks/datadog_cluster_agent/data/conf.yaml.example":{"sha256":"0ea54ebe3c490adc8f79d2c9431256dd4e9d548996573a2398d68444ea2aa216"},"datadog_cluster_agent/pyproject.toml":{"sha256":"61d4b5fedd2508c73f52dd46907572c04422503616a1e6a3589b5ee32eae7b43"},"dns_check/datadog_checks/__init__.py":{"sha256":"fe73dc8f45d35205d366455edc6e8c3a8b29870d773b6f103d5261e25f757b25"},"dns_check/datadog_checks/dns_check/__about__.py":{"sha256":"404703027133730352ec2831dfacac351d58b9669901ef32b2e0fcb47adafd27"},"dns_check/datadog_checks/dns_check/__init__.py":{"sha256":"3e65673287e6731fe4cc5fbfb42d2841c6146fa8ac53971a0b313753c72bf72b"},"dns_check/datadog_checks/dns_check/config_models/__init__.py":{"sha256":"c9cf5c66894430e7edbb00d00613b58ccfd38360f2fe490a23c17cf71ed294dc"},"dns_check/datadog_checks/dns_check/config_models/defaults.py":{"sha256":"6a6824907cf9f122e5793ce997e1f2c3386861174ff8befa9dc7c54733ed6a7a"},"dns_check/datadog_checks/dns_check/config_models/instance.py":{"sha256":"5921a719c0dda75076c54312d57ce794f6dc0eb3f6bd40b5c0209254e5c93475"},"dns_check/datadog_checks/dns_check/config_models/shared.py":{"sha256":"7d42e3c6fcdbfdec55c9d6616937af3a55e971b1320222b8df42ccb3882c353a"},"dns_check/datadog_checks/dns_check/config_models/validators.py":{"sha256":"0424fe17778b76e1b589b9564d0d543d1b71dba1edd6e5d71a7c528dddf68e0b"},"dns_check/datadog_checks/dns_check/data/conf.yaml.example":{"sha256":"64abb8c6e9cbe755c60faba4820978d838b2873fa450f8106ea55b58156aabe9"},"dns_check/datadog_checks/dns_check/dns_check.py":{"sha256":"3da3a0698a1d8f18ec0ac4e4e2b53ef24ff86ae3f6aecf17e7b23d60a0fe3241"},"dns_check/pyproject.toml":{"sha256":"0491bde5d54d79bcb1dafc25ffe56ab1ea2c17faa50f91151a9f513753cd6e52"},"mongo/datadog_checks/__init__.py":{"sha256":"9a3c64b8b00c94da4b4f34618d803d3255808caf21b8afa9195c84b61da66b6a"},"mongo/datadog_checks/mongo/__about__.py":{"sha256":"11d3640c8e045832468b7bfd78aeaf58446723d5c5d96c2cc9bcfb75d3003ab8"},"mongo/datadog_checks/mongo/__init__.py":{"sha256":"61d075d33fa89e5885372d5f3a785337466b5eeebed21233575516434b7b912d"},"mongo/datadog_checks/mongo/api.py":{"sha256":"23a10ac274c02b9dd1a88f150a3a70d04f69382230c42221152c3bcbec537a8d"},"mongo/datadog_checks/mongo/collectors/__init__.py":{"sha256":"ea7880c2ee86eb11efffb297c6a78559a3fd3d02522007f4ff30928d083e00bc"},"mongo/datadog_checks/mongo/collectors/base.py":{"sha256":"059a13b29ff09de742e28c8de388bf4707ac4c6ad3c9abf5b477d84a24d9e29e"},"mongo/datadog_checks/mongo/collectors/coll_stats.py":{"sha256":"e32b15f03a63ad4e7640698d0ff2d7a1d778a5d1596f199257775b4acf224975"},"mongo/datadog_checks/mongo/collectors/conn_pool_stats.py":{"sha256":"37da60e010e13f58c48f7e159d8a2e357479f9e9832aef1c65babe742118d0a2"},"mongo/datadog_checks/mongo/collectors/custom_queries.py":{"sha256":"f631516ce001db4a75caecf1f43c968b427abc75e8a6db77a0fabd1e8d3601d8"},"mongo/datadog_checks/mongo/collectors/db_stat.py":{"sha256":"59fdc96716b1222ef74338855cbd7089b9c8a50ffe545cfa26514cee84419f9d"},"mongo/datadog_checks/mongo/collectors/fsynclock.py":{"sha256":"7dc6cc7527c0d75f11eadb95095ea019af39541af5a0596211a1f9ab25d2e5ee"},"mongo/datadog_checks/mongo/collectors/host_info.py":{"sha256":"132649b269a6aa522ba4971f3275e75e32c7e704383897b3313f512648f58354"},"mongo/datadog_checks/mongo/collectors/index_stats.py":{"sha256":"c19aaffe1a48bf14c0b3304324825f37b77db277ded422364b0c5072322bb532"},"mongo/datadog_checks/mongo/collectors/jumbo_stats.py":{"sha256":"52c8b97a4a9eb38b3248b5154ce3db3b5b0fde7ec11988457d4c82ecd319d510"},"mongo/datadog_checks/mongo/collectors/process_stats.py":{"sha256":"243d568a4211b0570c53999749fc78114501daaf88930a3a20d0dc313be15673"},"mongo/datadog_checks/mongo/collectors/replica.py":{"sha256":"540d191f7a1e32417b613b0854c43c6f39759e97f39b2c627b7f9fc244a60909"},"mongo/datadog_checks/mongo/collectors/replication_info.py":{"sha256":"1816d410df08ee4cb35f20a5bd95e0140acdda90db45853f9f9a138b95129563"},"mongo/datadog_checks/mongo/collectors/server_status.py":{"sha256":"553b3ffe83ed909c30dadd39efe56d807be12b86815916bf8d3cec32618fe193"},"mongo/datadog_checks/mongo/collectors/session_stats.py":{"sha256":"f0fadd88121a0c01621fef8f5b2b39063562e40fd8fef6fcfc68902a10cbdba8"},"mongo/datadog_checks/mongo/collectors/sharded_data_distribution_stats.py":{"sha256":"cad14ad478913b0b3bf96ca59b92dc9aa5a27824b01b9da0be3bb8b1dc679af2"},"mongo/datadog_checks/mongo/collectors/top.py":{"sha256":"2ae019703fb9d854400178bab84bb8c72c8b05b60b25ae87b0856f44e7fd5b09"},"mongo/datadog_checks/mongo/common.py":{"sha256":"01b55dea3aa97b30a943186a05056e8fec6fc561cf000178ccf85c1d6a27eb6e"},"mongo/datadog_checks/mongo/config.py":{"sha256":"d7fd197a4c4d77b81a2a0d6af89be997a6716046e21573978c256d771d509052"},"mongo/datadog_checks/mongo/config_models/__init__.py":{"sha256":"c9cf5c66894430e7edbb00d00613b58ccfd38360f2fe490a23c17cf71ed294dc"},"mongo/datadog_checks/mongo/config_models/defaults.py":{"sha256":"ce4cdfb2d9a7aa264afc61e0f2f16b6d1f3156a6665ad5cf874b7d8fed993323"},"mongo/datadog_checks/mongo/config_models/deprecations.py":{"sha256":"8f7a2da4ae21110d6a35a1c19c533c5bba8fafcd3db333e17552cf2c9ef29c35"},"mongo/datadog_checks/mongo/config_models/instance.py":{"sha256":"5b11c478a242324b632af646f3b1b788a13609d18c07e21dafab883ea62bd7ce"},"mongo/datadog_checks/mongo/config_models/shared.py":{"sha256":"59e3e9392c6d66622003fa9382bcc1975c31df77f379ac370c154619be29dc30"},"mongo/datadog_checks/mongo/config_models/validators.py":{"sha256":"fd66b0faa82cb277479ee3da5492f6b67a6ce44bafb1d6c3cc319e1e473a9e3d"},"mongo/datadog_checks/mongo/data/conf.yaml.example":{"sha256":"7d45703d296a891d9099bf25eea9dbdf667f5252ad0d2e2675dad0ee4f8e3069"},"mongo/datadog_checks/mongo/dbm/__init__.py":{"sha256":"ee840b22269e35c03869d3b7223c20ab5c65895ae23153a8d273c0f866d5b99d"},"mongo/datadog_checks/mongo/dbm/operation_samples.py":{"sha256":"e34ddaebddbd13d8b2a84ced4e59cac7cbc908fa1d29fe070d607930d2bde83c"},"mongo/datadog_checks/mongo/dbm/schemas.py":{"sha256":"710591a768d7683b79ef3de664c42c6cca6d629efb9d38573f980076380ac3e9"},"mongo/datadog_checks/mongo/dbm/slow_operations.py":{"sha256":"61294e225e809aef697bc24c25f562938846b68f0b2c65fecda810e9f3d4af94"},"mongo/datadog_checks/mongo/dbm/types.py":{"sha256":"4ba6e0c051ec3db44ea79864cb4bfe2596692958630f206a79e4eef87ebbb4e8"},"mongo/datadog_checks/mongo/dbm/utils.py":{"sha256":"d8ecc82bd7df0949eafa404e929b11422056cf77649d27a6ad4f323f9eb3d911"},"mongo/datadog_checks/mongo/discovery.py":{"sha256":"e500bf87b5539e5c9cb9fe49b3b7c62f6c7a8beab24d22422105c8147b908dea"},"mongo/datadog_checks/mongo/metrics.py":{"sha256":"b926adc0b9ba32a00878d367a780a56eec46bc4b8bfebb871118eb6278a383b9"},"mongo/datadog_checks/mongo/mongo.py":{"sha256":"ddcf2feb2a72cb8bc2a4d07058174775e0e4dc1dbbceb0855321c3e3ff5e1864"},"mongo/datadog_checks/mongo/utils.py":{"sha256":"4785466e3e5c3b72a51ac715fd789e63e653c4c9982316ddba15a73d5cb5546e"},"mongo/pyproject.toml":{"sha256":"5a0d6393f2fbdfcf1ba60f6663f7a52188b0384792cc519f37a051950a73de3f"},"mysql/datadog_checks/__init__.py":{"sha256":"9a3c64b8b00c94da4b4f34618d803d3255808caf21b8afa9195c84b61da66b6a"},"mysql/datadog_checks/mysql/__about__.py":{"sha256":"ecc7528c2b9587342efc10e3b5348c7f4b4e6153ba70889a98ee6a21465e3dd6"},"mysql/datadog_checks/mysql/__init__.py":{"sha256":"4c8b91ad58403cc613a89e3e3aff174eabc45c5c3341eed2097b92140e14c5b9"},"mysql/datadog_checks/mysql/activity.py":{"sha256":"a238af06051ff07cd5def717a1584a2debf43ddcb888c2284ccf1dffe2774e46"},"mysql/datadog_checks/mysql/collection_utils.py":{"sha256":"b4e9269603f28c08cd51169d1da215c3d7fe43d2c374aecf20319c0397efeb8f"},"mysql/datadog_checks/mysql/config.py":{"sha256":"bdc43bfb692986d3c44eb4aa469f76e801641baae8f0297a03667a4110319265"},"mysql/datadog_checks/mysql/config_models/__init__.py":{"sha256":"c9cf5c66894430e7edbb00d00613b58ccfd38360f2fe490a23c17cf71ed294dc"},"mysql/datadog_checks/mysql/config_models/defaults.py":{"sha256":"f8561dcad6351709702c1f0ffea640a021a48c6195793d19e99451513f6c1f14"},"mysql/datadog_checks/mysql/config_models/instance.py":{"sha256":"8c55e5a8a426e91b83167c5c0b030f489f8071bed45eb0505e0d653583befb3e"},"mysql/datadog_checks/mysql/config_models/shared.py":{"sha256":"0e28bdef38bf42f742007bc48081a27b7229228f8bc0888b8a54b0e833e3c30b"},"mysql/datadog_checks/mysql/config_models/validators.py":{"sha256":"9beccee4f007680175106485383200d3b84d198897f0af516b91097f5794586e"},"mysql/datadog_checks/mysql/const.py":{"sha256":"bbe1a29f16982f187f769af148f09c8b72c347d24c941fd94c6a6031b81d491e"},"mysql/datadog_checks/mysql/cursor.py":{"sha256":"36579b19d3407dfb04c2bb2227f73ccb2713cc802dd673e0d8243554bafb42e4"},"mysql/datadog_checks/mysql/data/conf.yaml.example":{"sha256":"d4afe84e0c57f4c1b4ca6a4905a1f1046f801d1731d1374bf6e931241ad778d4"},"mysql/datadog_checks/mysql/databases_data.py":{"sha256":"7ab742fd54bbb41c43d4beb69971744029c6a52c0ffb5eecead0f2ab1933ea7e"},"mysql/datadog_checks/mysql/innodb_metrics.py":{"sha256":"bf0b19a48091440e35def5d551bf7442c86ebd32eae335ca11b5a2da9163c6a3"},"mysql/datadog_checks/mysql/metadata.py":{"sha256":"5f2b60da3b1d5c06f5fd9fc9dce251b11717f84a0663a516c342b32c9875e658"},"mysql/datadog_checks/mysql/mysql.py":{"sha256":"fd20e00e899723de78868754bffce41272249d4814002a0a27d18a1e6f41b4ff"},"mysql/datadog_checks/mysql/queries.py":{"sha256":"f5693bd5f6b6cb93c37573648dbc8cbf106037ebc07e61e4b49986379ed7e576"},"mysql/datadog_checks/mysql/statement_samples.py":{"sha256":"a58ca2e1833fd515523ea2705bbbe9dca1808f079a71df55573ef4e3f1a764df"},"mysql/datadog_checks/mysql/statements.py":{"sha256":"f75349869f259ec2af90e17a370cba6b76335399d66d5a722e0808cb9df6f3d8"},"mysql/datadog_checks/mysql/util.py":{"sha256":"ed6c9b2ce3eb327f1660e485c4bff2ef9788f51c9244fdc2e105a9ff2a0e30bf"},"mysql/datadog_checks/mysql/version_utils.py":{"sha256":"393097b6536264021b4421ed8adccbfece01202671cd371409b7a7656b8570c4"},"mysql/pyproject.toml":{"sha256":"c35c1d363584a493e98c1ae7bb511a76bdfdcc54660ec65e440018a3a4ddbafd"},"postgres/datadog_checks/__init__.py":{"sha256":"9a3c64b8b00c94da4b4f34618d803d3255808caf21b8afa9195c84b61da66b6a"},"postgres/datadog_checks/postgres/__about__.py":{"sha256":"aca1e95cc9f996863822795043aea06c45560afd443827d61bbcf691891e6148"},"postgres/datadog_checks/postgres/__init__.py":{"sha256":"beb65398de80eb50d7350ecb3c69d146335fa642e8ec842108a618be88e59bf2"},"postgres/datadog_checks/postgres/aws.py":{"sha256":"86c712de2d4b99ca7a8e50c6d57552a38b03ee4858be042806c112ea6fe5f26c"},"postgres/datadog_checks/postgres/azure.py":{"sha256":"f22dd7ae2a0b4ddbb35af1147fcd17a007c481ca0c5fe73bec1439d2cfbf6100"},"postgres/datadog_checks/postgres/config.py":{"sha256":"e159a17503bc0d05ebf3a0aca8c006d8fe7ae1956d67051400c08edd13af9bca"},"postgres/datadog_checks/postgres/config_models/__init__.py":{"sha256":"c9cf5c66894430e7edbb00d00613b58ccfd38360f2fe490a23c17cf71ed294dc"},"postgres/datadog_checks/postgres/config_models/defaults.py":{"sha256":"05110354debc8190d5c87e83e66da3e76beb379609977de848d23c5d4a8fd091"},"postgres/datadog_checks/postgres/config_models/instance.py":{"sha256":"570f94e2ea5703c5bf9bb3ca1cd9a9fbdb7489c79bb0a6591f416f26a958d5be"},"postgres/datadog_checks/postgres/config_models/shared.py":{"sha256":"0e28bdef38bf42f742007bc48081a27b7229228f8bc0888b8a54b0e833e3c30b"},"postgres/datadog_checks/postgres/config_models/validators.py":{"sha256":"6fa54c66701f75695f5c0a7970c720e6a737d5e56fa2a0220010dee5e93496ea"},"postgres/datadog_checks/postgres/connections.py":{"sha256":"756507dc5245aae28db86ce13d17f358741e166422504a682fc8767c4ebb5076"},"postgres/datadog_checks/postgres/cursor.py":{"sha256":"21b46f57d89839c724f3cbcc9e198adced62b8ab84299d39229130ae09231422"},"postgres/datadog_checks/postgres/data/conf.yaml.example":{"sha256":"52ea65886c958ed86aa26e0edc36f1c55e65d17131330d282d9986fce88104e0"},"postgres/datadog_checks/postgres/discovery.py":{"sha256":"e08d4c113e824c490ca92178bb559a3f7091499914b032e473014f20d9a06610"},"postgres/datadog_checks/postgres/explain_parameterized_queries.py":{"sha256":"130a841398a1bffe07c355fd9f317ba0d18ad68d2911cb924302549ad922234e"},"postgres/datadog_checks/postgres/metadata.py":{"sha256":"7dbfbddd09b881f51a58a89760705e0d4be637e5f63397676f9613a8e2340049"},"postgres/datadog_checks/postgres/metrics_cache.py":{"sha256":"ab8327cc641e2f05cce926e9a0acaab46529856168f8f87e933af4878a8c326f"},"postgres/datadog_checks/postgres/postgres.py":{"sha256":"156e63a332f13f453991ac35858aac6951902b6f104e4b608da61fe33a2694c7"},"postgres/datadog_checks/postgres/query_calls_cache.py":{"sha256":"f8058ffbdd750c9cbe49fb45ced0590e0f0418c5635f99cfb4240920f3779f7b"},"postgres/datadog_checks/postgres/relationsmanager.py":{"sha256":"7fe204f4070c97d825430cc15ef70174f3a35c60b582c104a85bfc477044208b"},"postgres/datadog_checks/postgres/statement_samples.py":{"sha256":"382d5337f5f748bb18e56c8357e667a59abc4d2cb692372c4903f29dae77e3f5"},"postgres/datadog_checks/postgres/statements.py":{"sha256":"bf846b252e6cc0c76a1867afa60942e3a23271c32d30a737fdaf5a1ddd70d665"},"postgres/datadog_checks/postgres/util.py":{"sha256":"779b0b7c95f9488ed51b3adfe30f0f62c887ef60b8fb386f1685607b8868a767"},"postgres/datadog_checks/postgres/version_utils.py":{"sha256":"a61999af782afe9d0aadf7ff49817e093de4739319291330df66157641102e0e"},"postgres/pyproject.toml":{"sha256":"984e3c4cf2e27443307f5477f422910f78228ed6667d8f07ce54f2547d33c0f4"},"sqlserver/datadog_checks/__init__.py":{"sha256":"9a3c64b8b00c94da4b4f34618d803d3255808caf21b8afa9195c84b61da66b6a"},"sqlserver/datadog_checks/sqlserver/__about__.py":{"sha256":"cf584d1aa1c901459a1a55e64097339a34b3533fb9ad33b5964f002b14445997"},"sqlserver/datadog_checks/sqlserver/__init__.py":{"sha256":"5cb0fa9aa3dfe21fcdc6404b1fd475678300408f00989133bb0b761aa4fac391"},"sqlserver/datadog_checks/sqlserver/activity.py":{"sha256":"bfb2efde67e6f36145127fc93108f544adf5df0679228fb910e38a39c408d0be"},"sqlserver/datadog_checks/sqlserver/agent_history.py":{"sha256":"6ca6ec85ffab1742a368489067472a5c988ce7764a4484310bc830d2139148b2"},"sqlserver/datadog_checks/sqlserver/azure.py":{"sha256":"46bbf99a3c119c19784a2c111d494fc9d5ca97aa8ad4077b33c341fa9a1f7dd3"},"sqlserver/datadog_checks/sqlserver/config.py":{"sha256":"664da14044876bec779597d7a6237db956a79e0ea2685dfb1b220b939931b1f3"},"sqlserver/datadog_checks/sqlserver/config_models/__init__.py":{"sha256":"c9cf5c66894430e7edbb00d00613b58ccfd38360f2fe490a23c17cf71ed294dc"},"sqlserver/datadog_checks/sqlserver/config_models/defaults.py":{"sha256":"fd287bf46bf10029d9e770e8f35eb90ee5bf8c280a6a2ac307568c40e8f0f5ba"},"sqlserver/datadog_checks/sqlserver/config_models/instance.py":{"sha256":"37f964640a7d03cd4492b7f4a8de939fef9ed3c5b6547e708569daf99824fd5b"},"sqlserver/datadog_checks/sqlserver/config_models/shared.py":{"sha256":"fd4667b5473e8617aa585a0d1260b7d84617c9a940743c52fe167f5fa5b2aad5"},"sqlserver/datadog_checks/sqlserver/config_models/validators.py":{"sha256":"0424fe17778b76e1b589b9564d0d543d1b71dba1edd6e5d71a7c528dddf68e0b"},"sqlserver/datadog_checks/sqlserver/connection.py":{"sha256":"7ca08f4e8a1a3050a81d0fe41dc420db0d0aca499bff0db186edd5a781a63b72"},"sqlserver/datadog_checks/sqlserver/connection_errors.py":{"sha256":"46e49f8c93f4d4c9ecb9db085e0025a1cebb753de98bd22116229710d5690922"},"sqlserver/datadog_checks/sqlserver/const.py":{"sha256":"9acddb9fc5bb8d0e64810e3098560820feefc6337a7bd319898ae04ca08ccaf7"},"sqlserver/datadog_checks/sqlserver/cursor.py":{"sha256":"c4ba12972d28f5e9eef59ab31216961022e4b26f47024bc17ecb814c29a77158"},"sqlserver/datadog_checks/sqlserver/data/conf.yaml.example":{"sha256":"bc306cb0b6ade2e4e2dd68cc2e7e26e7e44a664b410a661636e3ee594294426c"},"sqlserver/datadog_checks/sqlserver/data/driver_config/odbcinst.ini":{"sha256":"55b383a55036ff3af5def0be4444e52ddb163e64972c926a44b082d1e5e62397"},"sqlserver/datadog_checks/sqlserver/database_metrics/__init__.py":{"sha256":"47a8b6420859c2dccc6bd1e822ed8c09474e5d4522d118660e0868cb167e900d"},"sqlserver/datadog_checks/sqlserver/database_metrics/ao_metrics.py":{"sha256":"70f7e64a38011bc901a27e094bc79614714d2149ad8e8ec06ae018568652746d"},"sqlserver/datadog_checks/sqlserver/database_metrics/availability_groups_metrics.py":{"sha256":"73f80a5407684bab7e32a144cd34b1a7975e4689f2ec9db33c8cd2c36f96a3ac"},"sqlserver/datadog_checks/sqlserver/database_metrics/availability_replicas_metrics.py":{"sha256":"9998162cc35398b873faf6e1ee80c2c2bb27d21d9ad62938d8edf223548e0401"},"sqlserver/datadog_checks/sqlserver/database_metrics/base.py":{"sha256":"9005c36c97d05cb7fec3365f5412c5d13447ea4bbc0acca691c3939d6f62c699"},"sqlserver/datadog_checks/sqlserver/database_metrics/database_agent_metrics.py":{"sha256":"2a0d727da73dafef0038ac3047dc61316daf9778f4f2d72f4a59be85f0d727ce"},"sqlserver/datadog_checks/sqlserver/database_metrics/database_backup_metrics.py":{"sha256":"d14117812bd9dd58569bca26a6d8d68bdcccbb27d77096b53f62e0c12db219ab"},"sqlserver/datadog_checks/sqlserver/database_metrics/database_files_metrics.py":{"sha256":"7c79b2a1ebc425c8f87b7aaf2ffa3022a5581fd1d0c4a45adfe941e2aec635cc"},"sqlserver/datadog_checks/sqlserver/database_metrics/database_replication_stats_metrics.py":{"sha256":"4938194ee34f453d76498789ec1cddd1090e7670c8d2b59ed4328e53b937c13a"},"sqlserver/datadog_checks/sqlserver/database_metrics/database_stats_metrics.py":{"sha256":"f874a4497a2e0f82c040c480a557c432b2c7e38663f0eccc3f3f373e0365fdd5"},"sqlserver/datadog_checks/sqlserver/database_metrics/db_fragmentation_metrics.py":{"sha256":"9f003fabc18bacddd0c242be6711591fdbb7c2919bf6ea6e1a560752f0f4491f"},"sqlserver/datadog_checks/sqlserver/database_metrics/fci_metrics.py":{"sha256":"ee766adfea48b3c4b71d3aeeee445fedd64461ad1efdeac4dab2b38e33a49c84"},"sqlserver/datadog_checks/sqlserver/database_metrics/file_stats_metrics.py":{"sha256":"90063d87c2421196740c9eccc5d0b1e3c2df943c7510361ea50aa9e9a8b4d15f"},"sqlserver/datadog_checks/sqlserver/database_metrics/index_usage_metrics.py":{"sha256":"753b281beed7108b119cd1030eb2794d8e60d4e793556b3fc468a1920000243c"},"sqlserver/datadog_checks/sqlserver/database_metrics/master_files_metrics.py":{"sha256":"4f2a4deecace42b1cc485977e2744069dc917f6918f6664c3899d08ad7d6023d"},"sqlserver/datadog_checks/sqlserver/database_metrics/os_schedulers_metrics.py":{"sha256":"99d632ae2570d9fcb52df02f47540b1d8eca75bd37e237d80474c5e2d64cc1f3"},"sqlserver/datadog_checks/sqlserver/database_metrics/os_tasks_metrics.py":{"sha256":"755b0147be8d77913d9f719c277bf68f0e684178387dff781e36f3017623aea0"},"sqlserver/datadog_checks/sqlserver/database_metrics/primary_log_shipping_metrics.py":{"sha256":"dddff1141070f0787aa104dc7dc9153891cb4264391beee20362b09394f9e3d5"},"sqlserver/datadog_checks/sqlserver/database_metrics/secondary_log_shipping_metrics.py":{"sha256":"1a06aa3d6b87f5ec801af33c30e61f48deb9a431b07d2d9b43636ccf2dfce666"},"sqlserver/datadog_checks/sqlserver/database_metrics/server_state_metrics.py":{"sha256":"3592b9bdad0e509bfaa0fa760495222cfcd642568c1c28a13d667c57a0e42ed0"},"sqlserver/datadog_checks/sqlserver/database_metrics/tempdb_file_space_usage_metrics.py":{"sha256":"ec61f3721c6cb89be8a3973416afc809bba9bf7440e0b992fde0f5b206afd4b4"},"sqlserver/datadog_checks/sqlserver/database_metrics/xe_session_metrics.py":{"sha256":"0bb577e85673592cae4cf546b56678697a666c3d2b27a1e333e3df7b5af7ead1"},"sqlserver/datadog_checks/sqlserver/deadlocks.py":{"sha256":"ce0581aafd1f6428c9e4492bd10ce3b10a20cd7f6a7eb87dfc68ebc67cf6f89d"},"sqlserver/datadog_checks/sqlserver/metadata.py":{"sha256":"668afc31e1235fe5048bccfc701532335405fe85452eec575572e650b3ed1f3b"},"sqlserver/datadog_checks/sqlserver/metrics.py":{"sha256":"803016ab8e3ed568e776331b4dd6d9c0fca41e3faa59f2393818fba02170429e"},"sqlserver/datadog_checks/sqlserver/queries.py":{"sha256":"83f90e5164c552b951b86b37b9fcc85c1b462c8fb2df87f3b07fcbb265fed59b"},"sqlserver/datadog_checks/sqlserver/schemas.py":{"sha256":"9b2788fa2ffc6b72831a8f46a4081adefee574eb44ceb55b5e419842bc5153dc"},"sqlserver/datadog_checks/sqlserver/sqlserver.py":{"sha256":"fde60134ea9877ea8de26b2b8b002e1da2175bf4accef857912eaad8eea2f21b"},"sqlserver/datadog_checks/sqlserver/statements.py":{"sha256":"52a22233c8660d51e017be36fb14916abc401244d177bbe01894f90fd980cf17"},"sqlserver/datadog_checks/sqlserver/stored_procedures.py":{"sha256":"52d1330ecc60f6324330cd05bcaf926c42062add9b728adccb818902d5eb8071"},"sqlserver/datadog_checks/sqlserver/utils.py":{"sha256":"bcca615b23b00cc54b5d2bc46a4f7148fd49bafdf7a98ef08dcb9e0e93ca2e3b"},"sqlserver/pyproject.toml":{"sha256":"73e00df6e84ff54c6df36e150ae5336b80654ef8d4cea3dbe5c09eeb6788e23d"}}}} \ No newline at end of file diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv index 8af1cd58fd1d1..747ef923c3f87 100644 --- a/LICENSE-3rdparty.csv +++ b/LICENSE-3rdparty.csv @@ -22,6 +22,7 @@ cryptography,PyPI,BSD-3-Clause,Copyright (c) Individual contributors. cryptography,PyPI,PSF,Copyright (c) Individual contributors. ddtrace,PyPI,BSD-3-Clause,"Copyright 2016 Datadog, Inc." dnspython,PyPI,ISC,Copyright (C) Dnspython Contributors +duckdb,PyPI,MIT,Copyright (c) Hannes Muehleisen flup,Vendor,BSD-3-Clause,Copyright (c) 2005 Allan Saddi. All Rights Reserved. flup-py3,Vendor,BSD-3-Clause,"Copyright (c) 2005, 2006 Allan Saddi All rights reserved." foundationdb,PyPI,Apache-2.0,Copyright 2017 FoundationDB diff --git a/README.md b/README.md index fd081156a41a1..734d8ba46033f 100644 --- a/README.md +++ b/README.md @@ -54,4 +54,4 @@ For more information on integrations, please reference our [documentation][11] a [28]: https://img.shields.io/badge/typing-Mypy-blue.svg [29]: https://github.com/python/mypy [30]: https://img.shields.io/badge/license-BSD--3--Clause-9400d3.svg -[31]: https://spdx.org/licenses/BSD-3-Clause.html +[31]: https://spdx.org/licenses/BSD-3-Clause.html \ No newline at end of file diff --git a/aerospike/CHANGELOG.md b/aerospike/CHANGELOG.md index ceb3be456ee3c..baa0c5576c721 100644 --- a/aerospike/CHANGELOG.md +++ b/aerospike/CHANGELOG.md @@ -2,6 +2,12 @@ +## 4.0.1 / 2024-12-26 + +***Fixed***: + +* Don't skip last index in each namespace ([#18996](https://github.com/DataDog/integrations-core/pull/18996)) + ## 4.0.0 / 2024-10-04 / Agent 7.59.0 ***Removed***: diff --git a/aerospike/changelog.d/18996.fixed b/aerospike/changelog.d/18996.fixed deleted file mode 100644 index f46ba036b3612..0000000000000 --- a/aerospike/changelog.d/18996.fixed +++ /dev/null @@ -1 +0,0 @@ -Don't skip last index in each namespace diff --git a/aerospike/datadog_checks/aerospike/__about__.py b/aerospike/datadog_checks/aerospike/__about__.py index 78967d676d1af..59269a7bb5672 100644 --- a/aerospike/datadog_checks/aerospike/__about__.py +++ b/aerospike/datadog_checks/aerospike/__about__.py @@ -1,4 +1,4 @@ # (C) Datadog, Inc. 2019-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) -__version__ = '4.0.0' +__version__ = '4.0.1' diff --git a/agent_requirements.in b/agent_requirements.in index b96253e8f081f..572dee1782565 100644 --- a/agent_requirements.in +++ b/agent_requirements.in @@ -13,6 +13,7 @@ confluent-kafka==2.6.1 cryptography==43.0.1 ddtrace==2.10.6 dnspython==2.6.1 +duckdb==1.1.1 foundationdb==6.3.24 hazelcast-python-client==5.4.0 importlib-metadata==2.1.3; python_version < '3.8' diff --git a/airflow/CHANGELOG.md b/airflow/CHANGELOG.md index 9dc1367093190..5e0f849f845d9 100644 --- a/airflow/CHANGELOG.md +++ b/airflow/CHANGELOG.md @@ -2,6 +2,12 @@ +## 6.2.0 / 2024-12-26 + +***Added***: + +* Use `start_date` instead of `execution_date` for ongoing duration metrics ([#19278](https://github.com/DataDog/integrations-core/pull/19278)) + ## 6.1.0 / 2024-10-04 / Agent 7.59.0 ***Fixed***: diff --git a/airflow/changelog.d/19278.added b/airflow/changelog.d/19278.added deleted file mode 100644 index 45bf7d91d95ab..0000000000000 --- a/airflow/changelog.d/19278.added +++ /dev/null @@ -1 +0,0 @@ -Use `start_date` instead of `execution_date` for ongoing duration metrics \ No newline at end of file diff --git a/airflow/datadog_checks/airflow/__about__.py b/airflow/datadog_checks/airflow/__about__.py index a1743c47e0d86..dc781a1f01ffc 100644 --- a/airflow/datadog_checks/airflow/__about__.py +++ b/airflow/datadog_checks/airflow/__about__.py @@ -1,4 +1,4 @@ # (C) Datadog, Inc. 2019 # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) -__version__ = '6.1.0' +__version__ = '6.2.0' diff --git a/cisco_aci/CHANGELOG.md b/cisco_aci/CHANGELOG.md index 550eef386c658..8586a4daa27e7 100644 --- a/cisco_aci/CHANGELOG.md +++ b/cisco_aci/CHANGELOG.md @@ -2,6 +2,16 @@ +## 4.2.0 / 2024-12-26 + +***Added***: + +* [NDM] [Cisco ACI] Support submitting topology metadata (utilizing LLDP neighbor information) ([#18675](https://github.com/DataDog/integrations-core/pull/18675)) + +***Fixed***: + +* [NDM] [Cisco ACI] Fix APIC device status ([#19204](https://github.com/DataDog/integrations-core/pull/19204)) + ## 4.1.0 / 2024-10-31 / Agent 7.60.0 ***Added***: diff --git a/cisco_aci/changelog.d/18675.added b/cisco_aci/changelog.d/18675.added deleted file mode 100644 index 72ee9491e1b34..0000000000000 --- a/cisco_aci/changelog.d/18675.added +++ /dev/null @@ -1 +0,0 @@ -[NDM] [Cisco ACI] Support submitting topology metadata (utilizing LLDP neighbor information) diff --git a/cisco_aci/changelog.d/19204.fixed b/cisco_aci/changelog.d/19204.fixed deleted file mode 100644 index 2ffc1d5bac760..0000000000000 --- a/cisco_aci/changelog.d/19204.fixed +++ /dev/null @@ -1 +0,0 @@ -[NDM] [Cisco ACI] Fix APIC device status diff --git a/cisco_aci/datadog_checks/cisco_aci/__about__.py b/cisco_aci/datadog_checks/cisco_aci/__about__.py index ccf6bd22bcecb..d396e637fa72e 100644 --- a/cisco_aci/datadog_checks/cisco_aci/__about__.py +++ b/cisco_aci/datadog_checks/cisco_aci/__about__.py @@ -2,4 +2,4 @@ # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) -__version__ = "4.1.0" +__version__ = "4.2.0" diff --git a/datadog_checks_base/CHANGELOG.md b/datadog_checks_base/CHANGELOG.md index 03d73bac13c24..e70755f160925 100644 --- a/datadog_checks_base/CHANGELOG.md +++ b/datadog_checks_base/CHANGELOG.md @@ -2,6 +2,21 @@ +## 37.3.0 / 2024-12-26 + +***Security***: + +* Add FIPS switch ([#19179](https://github.com/DataDog/integrations-core/pull/19179)) + +***Added***: + +* Show diff to closest metric match when metric test fails ([#18975](https://github.com/DataDog/integrations-core/pull/18975)) +* Use `time_elapsed` metric type for process start time and last GC time metrics ([#19309](https://github.com/DataDog/integrations-core/pull/19309)) + +***Fixed***: + +* Fix "no snapshot data found" error when `agent check --profile-memory` ([#19197](https://github.com/DataDog/integrations-core/pull/19197)) + ## 37.2.0 / 2024-12-05 / Agent 7.60.0 ***Added***: diff --git a/datadog_checks_base/changelog.d/18975.added b/datadog_checks_base/changelog.d/18975.added deleted file mode 100644 index d95d103203c3e..0000000000000 --- a/datadog_checks_base/changelog.d/18975.added +++ /dev/null @@ -1 +0,0 @@ -Show diff to closest metric match when metric test fails diff --git a/datadog_checks_base/changelog.d/19197.fixed b/datadog_checks_base/changelog.d/19197.fixed deleted file mode 100644 index 31cd6530b5639..0000000000000 --- a/datadog_checks_base/changelog.d/19197.fixed +++ /dev/null @@ -1 +0,0 @@ -Fix "no snapshot data found" error when `agent check --profile-memory` diff --git a/datadog_checks_base/datadog_checks/base/__about__.py b/datadog_checks_base/datadog_checks/base/__about__.py index d2262989bc126..1d96edfc82e48 100644 --- a/datadog_checks_base/datadog_checks/base/__about__.py +++ b/datadog_checks_base/datadog_checks/base/__about__.py @@ -1,4 +1,4 @@ # (C) Datadog, Inc. 2018-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) -__version__ = "37.2.0" +__version__ = "37.3.0" diff --git a/datadog_checks_base/datadog_checks/base/checks/base.py b/datadog_checks_base/datadog_checks/base/checks/base.py index 9508dcc518dd4..5b5bcfb386c0d 100644 --- a/datadog_checks_base/datadog_checks/base/checks/base.py +++ b/datadog_checks_base/datadog_checks/base/checks/base.py @@ -6,6 +6,7 @@ import importlib import inspect import logging +import os import re import traceback import unicodedata @@ -46,6 +47,7 @@ from ..utils.agent.utils import should_profile_memory from ..utils.common import ensure_bytes, to_native_string from ..utils.diagnose import Diagnosis +from ..utils.fips import enable_fips from ..utils.http import RequestsWrapper from ..utils.limiter import Limiter from ..utils.metadata import MetadataManager @@ -307,6 +309,9 @@ def __init__(self, *args, **kwargs): self.__formatted_tags = None self.__logs_enabled = None + if os.environ.get("GOFIPS", "0") == "1": + enable_fips() + def _create_metrics_pattern(self, metric_patterns, option_name): all_patterns = metric_patterns.get(option_name, []) diff --git a/datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/metrics.py b/datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/metrics.py index 45dfe4d07361c..71ece5169e100 100644 --- a/datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/metrics.py +++ b/datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/metrics.py @@ -1,6 +1,7 @@ DEFAULT_GO_METRICS = { 'go_gc_duration_seconds': 'go.gc.duration.seconds', 'go_goroutines': 'go.goroutines', + 'go_memstats_alloc_bytes': {'name': 'go.memstats.alloc_bytes', 'type': 'native_dynamic'}, 'go_memstats_buck_hash_sys_bytes': 'go.memstats.buck_hash.sys_bytes', 'go_memstats_frees': 'go.memstats.frees', 'go_memstats_gc_cpu_fraction': 'go.memstats.gc.cpu_fraction', @@ -11,7 +12,10 @@ 'go_memstats_heap_objects': 'go.memstats.heap.objects', 'go_memstats_heap_released_bytes': 'go.memstats.heap.released_bytes', 'go_memstats_heap_sys_bytes': 'go.memstats.heap.sys_bytes', - 'go_memstats_last_gc_time_seconds': 'go.memstats.last_gc_time.seconds', + 'go_memstats_last_gc_time_seconds': { + 'name': 'go.memstats.last_gc_time_seconds', + 'type': 'time_elapsed', + }, 'go_memstats_lookups': 'go.memstats.lookups', 'go_memstats_mallocs': 'go.memstats.mallocs', 'go_memstats_mcache_inuse_bytes': 'go.memstats.mcache.inuse_bytes', @@ -28,7 +32,10 @@ 'process_max_fds': 'process.max_fds', 'process_open_fds': 'process.open_fds', 'process_resident_memory_bytes': 'process.resident_memory.bytes', - 'process_start_time_seconds': 'process.start_time.seconds', + 'process_start_time_seconds': { + 'name': 'process.start_time.seconds', + 'type': 'time_elapsed', + }, 'process_virtual_memory_bytes': 'process.virtual_memory.bytes', 'process_virtual_memory_max_bytes': 'process.virtual_memory.max_bytes', } diff --git a/datadog_checks_base/datadog_checks/base/utils/fips.py b/datadog_checks_base/datadog_checks/base/utils/fips.py new file mode 100644 index 0000000000000..36de6c1e3038c --- /dev/null +++ b/datadog_checks_base/datadog_checks/base/utils/fips.py @@ -0,0 +1,32 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +import os + + +def enable_fips(path_to_openssl_conf=None, path_to_openssl_modules=None): + path_to_embedded = None + if os.getenv("OPENSSL_CONF") is None: + if path_to_openssl_conf is None: + path_to_embedded = _get_embedded_path() if path_to_embedded is None else path_to_embedded + path_to_openssl_conf = path_to_embedded / "ssl" / "openssl.cnf" + if not path_to_openssl_conf.exists(): + raise RuntimeError(f'The configuration file "{path_to_openssl_conf}" does not exist') + os.environ["OPENSSL_CONF"] = str(path_to_openssl_conf) + + if os.getenv("OPENSSL_MODULES") is None: + if path_to_openssl_modules is None: + path_to_embedded = _get_embedded_path() if path_to_embedded is None else path_to_embedded + path_to_openssl_modules = path_to_embedded / "lib" / "ossl-modules" + if not path_to_openssl_conf.exists(): + raise RuntimeError(f'The directory "{path_to_openssl_modules}" does not exist') + os.environ["OPENSSL_MODULES"] = str(path_to_openssl_modules) + + +def _get_embedded_path(): + import sys + from pathlib import Path + + embedded_dir = "embedded3" if os.name == 'nt' else "embedded" + return Path(sys.executable.split("embedded")[0] + embedded_dir) diff --git a/datadog_checks_base/tests/base/checks/test_agent_check.py b/datadog_checks_base/tests/base/checks/test_agent_check.py index 2f77b049a389e..931d4dab9315d 100644 --- a/datadog_checks_base/tests/base/checks/test_agent_check.py +++ b/datadog_checks_base/tests/base/checks/test_agent_check.py @@ -5,6 +5,7 @@ # Licensed under a 3-clause BSD style license (see LICENSE) import json import logging +import os from typing import Any # noqa: F401 import mock @@ -1293,3 +1294,19 @@ def test_detect_typos_configuration_models( assert "Detected potential typo in configuration option" not in caplog.text assert typos == set(unknown_options) + + +def test_env_var_logic_default(): + with mock.patch.dict('os.environ', {'GOFIPS': '0'}): + AgentCheck() + assert os.getenv('OPENSSL_CONF', None) is None + assert os.getenv('OPENSSL_MODULES', None) is None + + +def test_env_var_logic_preset(): + preset_conf = 'path/to/openssl.cnf' + preset_modules = 'path/to/ossl-modules' + with mock.patch.dict('os.environ', {'GOFIPS': '1', 'OPENSSL_CONF': preset_conf, 'OPENSSL_MODULES': preset_modules}): + AgentCheck() + assert os.getenv('OPENSSL_CONF', None) == preset_conf + assert os.getenv('OPENSSL_MODULES', None) == preset_modules diff --git a/datadog_cluster_agent/CHANGELOG.md b/datadog_cluster_agent/CHANGELOG.md index cc44ae90777ee..64c44c58bb5ea 100644 --- a/datadog_cluster_agent/CHANGELOG.md +++ b/datadog_cluster_agent/CHANGELOG.md @@ -2,6 +2,12 @@ +## 5.2.0 / 2024-12-26 + +***Added***: + +* add telemetry for local load store in dca ([#19229](https://github.com/DataDog/integrations-core/pull/19229)) + ## 5.1.0 / 2024-10-31 / Agent 7.60.0 ***Added***: diff --git a/datadog_cluster_agent/changelog.d/19229.added b/datadog_cluster_agent/changelog.d/19229.added deleted file mode 100644 index 96fec650c7fe8..0000000000000 --- a/datadog_cluster_agent/changelog.d/19229.added +++ /dev/null @@ -1 +0,0 @@ -add telemetry for local load store in dca diff --git a/datadog_cluster_agent/datadog_checks/datadog_cluster_agent/__about__.py b/datadog_cluster_agent/datadog_checks/datadog_cluster_agent/__about__.py index 414513f536ddb..c953bf2884887 100644 --- a/datadog_cluster_agent/datadog_checks/datadog_cluster_agent/__about__.py +++ b/datadog_cluster_agent/datadog_checks/datadog_cluster_agent/__about__.py @@ -1,4 +1,4 @@ # (C) Datadog, Inc. 2021-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) -__version__ = '5.1.0' +__version__ = '5.2.0' diff --git a/ddev/changelog.d/19179.security b/ddev/changelog.d/19179.security new file mode 100644 index 0000000000000..e2819d1fafecd --- /dev/null +++ b/ddev/changelog.d/19179.security @@ -0,0 +1 @@ +Add FIPS switch diff --git a/ddev/src/ddev/e2e/agent/docker.py b/ddev/src/ddev/e2e/agent/docker.py index 1821d50d1b346..206d4e2bbc1ee 100644 --- a/ddev/src/ddev/e2e/agent/docker.py +++ b/ddev/src/ddev/e2e/agent/docker.py @@ -113,7 +113,12 @@ def start(self, *, agent_build: str, local_packages: dict[Path, str], env_vars: if agent_build.startswith("datadog/"): # Add a potentially missing `py` suffix for default non-RC builds - if 'rc' not in agent_build and 'py' not in agent_build and not re.match(AGENT_VERSION_REGEX, agent_build): + if ( + 'rc' not in agent_build + and 'py' not in agent_build + and 'fips' not in agent_build + and not re.match(AGENT_VERSION_REGEX, agent_build) + ): agent_build = f'{agent_build}-py{self.python_version[0]}' if self.metadata.get('use_jmx') and not agent_build.endswith('-jmx'): diff --git a/dns_check/CHANGELOG.md b/dns_check/CHANGELOG.md index 13690f92ee70a..a5883d23c66c8 100644 --- a/dns_check/CHANGELOG.md +++ b/dns_check/CHANGELOG.md @@ -2,6 +2,12 @@ +## 5.0.1 / 2024-12-26 + +***Fixed***: + +* Move timing to be more precise with calculating response times ([#19276](https://github.com/DataDog/integrations-core/pull/19276)) + ## 5.0.0 / 2024-10-04 / Agent 7.59.0 ***Removed***: diff --git a/dns_check/datadog_checks/dns_check/__about__.py b/dns_check/datadog_checks/dns_check/__about__.py index d2601db5d5cc0..8c33f486fe48a 100644 --- a/dns_check/datadog_checks/dns_check/__about__.py +++ b/dns_check/datadog_checks/dns_check/__about__.py @@ -2,4 +2,4 @@ # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) -__version__ = '5.0.0' +__version__ = '5.0.1' diff --git a/dns_check/datadog_checks/dns_check/dns_check.py b/dns_check/datadog_checks/dns_check/dns_check.py index e6c7598e23a9b..7b02dea7f5256 100644 --- a/dns_check/datadog_checks/dns_check/dns_check.py +++ b/dns_check/datadog_checks/dns_check/dns_check.py @@ -62,29 +62,30 @@ def _get_resolver(self): return resolver def check(self, _): - resolver = self._get_resolver() - # Perform the DNS query, and report its duration as a gauge - t0 = get_precise_time() + resolver = self._get_resolver() try: self.log.debug('Querying "%s" record for hostname "%s"...', self.record_type, self.hostname) if self.record_type == "NXDOMAIN": try: + t0 = get_precise_time() resolver.query(self.hostname) except dns.resolver.NXDOMAIN: - pass + # Timing here to get the time it takes for us to get the NXDOMAIN Exception + response_time = get_precise_time() - t0 else: raise AssertionError("Expected an NXDOMAIN, got a result.") else: + t0 = get_precise_time() answer = resolver.query(self.hostname, rdtype=self.record_type) # dns.resolver.Answer + response_time = get_precise_time() - t0 + assert any(it.to_text() for it in answer.rrset.items) if self.resolves_as_ips: self._check_answer(answer) - response_time = get_precise_time() - t0 - except dns.exception.Timeout: self.log.error('DNS resolution of %s timed out', self.hostname) self.report_as_service_check(AgentCheck.CRITICAL, 'DNS resolution of {} timed out'.format(self.hostname)) diff --git a/duckdb/CHANGELOG.md b/duckdb/CHANGELOG.md new file mode 100644 index 0000000000000..1a08d7960b6df --- /dev/null +++ b/duckdb/CHANGELOG.md @@ -0,0 +1,4 @@ +# CHANGELOG - DuckDB + + + diff --git a/duckdb/README.md b/duckdb/README.md new file mode 100644 index 0000000000000..e91e3cd5f2e05 --- /dev/null +++ b/duckdb/README.md @@ -0,0 +1,60 @@ +# Agent Check: DuckDB + +## Overview + +This check monitors [DuckDB][1] through the Datadog Agent. + +Include a high level overview of what this integration does: +- What does your product do (in 1-2 sentences)? +- What value will customers get from this integration, and why is it valuable to them? +- What specific data will your integration monitor, and what's the value of that data? + +## Setup + +Follow the instructions below to install and configure this check for an Agent running on a host. For containerized environments, see the [Autodiscovery Integration Templates][3] for guidance on applying these instructions. + +### Installation + +The DuckDB check is included in the [Datadog Agent][2] package. +No additional installation is needed on your server. + +### Configuration + +1. Edit the `duckdb.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your duckdb performance data. See the [sample duckdb.d/conf.yaml][4] for all available configuration options. + +2. [Restart the Agent][5]. + +### Validation + +[Run the Agent's status subcommand][6] and look for `duckdb` under the Checks section. + +## Data Collected + +### Metrics + +See [metadata.csv][7] for a list of metrics provided by this integration. + +### Events + +The DuckDB integration does not include any events. + +### Service Checks + +The DuckDB integration does not include any service checks. + +See [service_checks.json][8] for a list of service checks provided by this integration. + +## Troubleshooting + +Need help? Contact [Datadog support][9]. + + +[1]: **LINK_TO_INTEGRATION_SITE** +[2]: https://app.datadoghq.com/account/settings/agent/latest +[3]: https://docs.datadoghq.com/agent/kubernetes/integrations/ +[4]: https://github.com/DataDog/integrations-core/blob/master/duckdb/datadog_checks/duckdb/data/conf.yaml.example +[5]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent +[6]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information +[7]: https://github.com/DataDog/integrations-core/blob/master/duckdb/metadata.csv +[8]: https://github.com/DataDog/integrations-core/blob/master/duckdb/assets/service_checks.json +[9]: https://docs.datadoghq.com/help/ diff --git a/duckdb/assets/configuration/spec.yaml b/duckdb/assets/configuration/spec.yaml new file mode 100644 index 0000000000000..b7c19b5c4b2e5 --- /dev/null +++ b/duckdb/assets/configuration/spec.yaml @@ -0,0 +1,34 @@ +name: DuckDB +files: +- name: duckdb.yaml + options: + - template: init_config + options: + - template: init_config/default + - template: instances + options: + - name: db_name + required: true + description: | + The database to connect to (file path). + NOTE: DuckDB databases are stored as files. + value: + example: "/path-to-file/my_database.db" + type: string + - name: connection_attempt + description: | + The number of retries to connect to the database in case of failure + value: + type: integer + default: 3 + display_default: 3 + - template: instances/default + overrides: + min_collection_interval.description: | + This changes the collection interval of this check to avoid + the risk of locking the database file. + If your database file is heavily used in write mode, you changes + further increase this value to minimise the monitoring acccess. + min_collection_interval.value.default: 60 + min_collection_interval.value.example: 60 + min_collection_interval.enabled: true diff --git a/duckdb/assets/dashboards/duckdb_overview.json b/duckdb/assets/dashboards/duckdb_overview.json new file mode 100644 index 0000000000000..96e155112c011 --- /dev/null +++ b/duckdb/assets/dashboards/duckdb_overview.json @@ -0,0 +1,29 @@ +{ + "title": "DuckDB Overview", + "description": "[[suggested_dashboards]]", + "widgets": [ + { + "id": 6432334130190000, + "definition": { + "type": "image", + "url": "https://static.datadoghq.com/static/images/logos/duckdb_small.svg", + "sizing": "contain", + "margin": "md", + "has_background": false, + "has_border": false, + "vertical_align": "center", + "horizontal_align": "center" + }, + "layout": { + "x": 0, + "y": 0, + "width": 2, + "height": 2 + } + } + ], + "template_variables": [], + "layout_type": "ordered", + "notify_list": [], + "reflow_type": "fixed" +} \ No newline at end of file diff --git a/duckdb/assets/service_checks.json b/duckdb/assets/service_checks.json new file mode 100644 index 0000000000000..6c87723be2c74 --- /dev/null +++ b/duckdb/assets/service_checks.json @@ -0,0 +1,2 @@ + +[] diff --git a/quarkus/changelog.d/19196.added b/duckdb/changelog.d/1.added similarity index 100% rename from quarkus/changelog.d/19196.added rename to duckdb/changelog.d/1.added diff --git a/duckdb/datadog_checks/__init__.py b/duckdb/datadog_checks/__init__.py new file mode 100644 index 0000000000000..1517d901c0aae --- /dev/null +++ b/duckdb/datadog_checks/__init__.py @@ -0,0 +1,4 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +__path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore diff --git a/duckdb/datadog_checks/duckdb/__about__.py b/duckdb/datadog_checks/duckdb/__about__.py new file mode 100644 index 0000000000000..e9541ce83e9e5 --- /dev/null +++ b/duckdb/datadog_checks/duckdb/__about__.py @@ -0,0 +1,4 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +__version__ = '0.0.1' diff --git a/duckdb/datadog_checks/duckdb/__init__.py b/duckdb/datadog_checks/duckdb/__init__.py new file mode 100644 index 0000000000000..f23ef58f4ed30 --- /dev/null +++ b/duckdb/datadog_checks/duckdb/__init__.py @@ -0,0 +1,7 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from .__about__ import __version__ +from .check import DuckdbCheck + +__all__ = ['__version__', 'DuckdbCheck'] diff --git a/duckdb/datadog_checks/duckdb/check.py b/duckdb/datadog_checks/duckdb/check.py new file mode 100644 index 0000000000000..2fb746bb40c53 --- /dev/null +++ b/duckdb/datadog_checks/duckdb/check.py @@ -0,0 +1,166 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +import json +import os +import re +import time +from contextlib import closing, contextmanager +from copy import deepcopy + +import duckdb + +from datadog_checks.base import AgentCheck +from datadog_checks.base.utils.db import QueryManager + +from .queries import DEFAULT_QUERIES + + +class DuckdbCheck(AgentCheck): + + # This will be the prefix of every metric and service check the integration sends + __NAMESPACE__ = 'duckdb' + + def __init__(self, name, init_config, instances): + super(DuckdbCheck, self).__init__(name, init_config, instances) + + self.db_name = self.instance.get('db_name') + self.connection_attempt = int(self.instance.get('connection_attempt', 3)) + + self.tags = self.instance.get('tags', []) + self._connection = None + self._connect_params = None + self._tags = [] + self._query_errors = 0 + + manager_queries = deepcopy(DEFAULT_QUERIES) + + self._query_manager = QueryManager( + self, + self._execute_query_raw, + queries=manager_queries, + tags=self.tags, + error_handler=self._executor_error_handler, + ) + self.check_initializations.append(self.initialize_config) + self.check_initializations.append(self._query_manager.compile_queries) + + def check(self, _): + retry_delay = 5 + max_retries = self.connection_attempt + for attempt in range(1, max_retries + 1): + try: + with self.connect() as conn: + if conn: + self._connection = conn + self._query_manager.execute() + break + except Exception as e: + self.log.warning('Unable to connect to the database: "%s" , retrying...', e) + if attempt < max_retries: + time.sleep(retry_delay) + else: + self.log.error('Max connection retries reached') + + def _execute_query_raw(self, query): + with closing(self._connection.cursor()) as cursor: + query = query.format(self.db_name) + curs = cursor.execute(query) + if len(curs.fetchall()) < 1: + self._query_errors += 1 + self.log.warning('Failed to fetch records from query: `%s`.', query) + return None + for row in cursor.execute(query).fetchall(): + # Try to find the field name or version from the query, anything else would fail + pattern_version = r"\bversion\b" + query_version = re.search(pattern_version, query) + if query_version: + query_name = 'version' + else: + pattern = r"(?i)\bname\s*=\s*'([^']+)'" + query_name = re.search(pattern, query).group(1) + try: + yield self._queries_processor(row, query_name) + except Exception as e: + self.log.debug( + 'Unable to process row returned from query "%s", skipping row %s. %s', query_name, row, e + ) + yield row + + def _queries_processor(self, row, query_name): + unprocessed_row = row + # Return database version + if query_name == 'version': + self.submit_version(row) + return unprocessed_row + + self.log.debug('Row processor returned: %s. \nFrom query: "%s"', unprocessed_row, query_name) + return unprocessed_row + + @contextmanager + def connect(self): + conn = None + # Only attempt connection if the Database file exists + if os.path.exists(self.db_name): + try: + # Try to establish the connection in read only mode + conn = duckdb.connect(self.db_name, read_only=True) + self.log.info('Connected to DuckDB database.') + yield conn + except Exception as e: + if 'Conflicting lock' in str(e): + self.log.error('Lock conflict detected') + else: + self.log.error('Unable to connect to DuckDB database. %s.', e) + finally: + if conn: + conn.close() + else: + self.log.error('Database file not found') + + def initialize_config(self): + self._connect_params = json.dumps( + { + 'db_name': self.db_name, + } + ) + global_tags = [ + 'db_name:{}'.format(self.instance.get('db_name')), + ] + if self.tags is not None: + global_tags.extend(self.tags) + self._tags = global_tags + self._query_manager.tags = self._tags + + @AgentCheck.metadata_entrypoint + def submit_version(self, row): + """ + Example version: v1.1.1 + """ + try: + duckdb_version_row = row[0] + duckdb_version = duckdb_version_row[1:] + version_split = duckdb_version.split('.') + + if len(version_split) >= 3: + major = version_split[0] + minor = version_split[1] + patch = version_split[2] + + version_raw = f'{major}.{minor}.{patch}' + + version_parts = { + 'major': major, + 'minor': minor, + 'patch': patch, + } + self.set_metadata('version', version_raw, scheme='parts', final_scheme='semver', part_map=version_parts) + else: + self.log.debug("Malformed DuckDB version format: %s", duckdb_version_row) + except Exception as e: + self.log.warning("Could not retrieve version metadata: %s", e) + + def _executor_error_handler(self, error): + self.log.debug('Error from query "%s"', error) + self._query_errors += 1 + return error diff --git a/duckdb/datadog_checks/duckdb/config_models/__init__.py b/duckdb/datadog_checks/duckdb/config_models/__init__.py new file mode 100644 index 0000000000000..106fff2032f68 --- /dev/null +++ b/duckdb/datadog_checks/duckdb/config_models/__init__.py @@ -0,0 +1,24 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +# This file is autogenerated. +# To change this file you should edit assets/configuration/spec.yaml and then run the following commands: +# ddev -x validate config -s +# ddev -x validate models -s + +from .instance import InstanceConfig +from .shared import SharedConfig + + +class ConfigMixin: + _config_model_instance: InstanceConfig + _config_model_shared: SharedConfig + + @property + def config(self) -> InstanceConfig: + return self._config_model_instance + + @property + def shared_config(self) -> SharedConfig: + return self._config_model_shared diff --git a/duckdb/datadog_checks/duckdb/config_models/defaults.py b/duckdb/datadog_checks/duckdb/config_models/defaults.py new file mode 100644 index 0000000000000..c99e7d0251838 --- /dev/null +++ b/duckdb/datadog_checks/duckdb/config_models/defaults.py @@ -0,0 +1,24 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +# This file is autogenerated. +# To change this file you should edit assets/configuration/spec.yaml and then run the following commands: +# ddev -x validate config -s +# ddev -x validate models -s + + +def instance_connection_attempt(): + return 3 + + +def instance_disable_generic_tags(): + return False + + +def instance_empty_default_hostname(): + return False + + +def instance_min_collection_interval(): + return 60 diff --git a/duckdb/datadog_checks/duckdb/config_models/instance.py b/duckdb/datadog_checks/duckdb/config_models/instance.py new file mode 100644 index 0000000000000..7e183b58aad41 --- /dev/null +++ b/duckdb/datadog_checks/duckdb/config_models/instance.py @@ -0,0 +1,63 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +# This file is autogenerated. +# To change this file you should edit assets/configuration/spec.yaml and then run the following commands: +# ddev -x validate config -s +# ddev -x validate models -s + +from __future__ import annotations + +from typing import Optional + +from pydantic import BaseModel, ConfigDict, field_validator, model_validator + +from datadog_checks.base.utils.functions import identity +from datadog_checks.base.utils.models import validation + +from . import defaults, validators + + +class MetricPatterns(BaseModel): + model_config = ConfigDict( + arbitrary_types_allowed=True, + frozen=True, + ) + exclude: Optional[tuple[str, ...]] = None + include: Optional[tuple[str, ...]] = None + + +class InstanceConfig(BaseModel): + model_config = ConfigDict( + validate_default=True, + arbitrary_types_allowed=True, + frozen=True, + ) + connection_attempt: Optional[int] = None + db_name: str + disable_generic_tags: Optional[bool] = None + empty_default_hostname: Optional[bool] = None + metric_patterns: Optional[MetricPatterns] = None + min_collection_interval: Optional[float] = None + service: Optional[str] = None + tags: Optional[tuple[str, ...]] = None + + @model_validator(mode='before') + def _initial_validation(cls, values): + return validation.core.initialize_config(getattr(validators, 'initialize_instance', identity)(values)) + + @field_validator('*', mode='before') + def _validate(cls, value, info): + field = cls.model_fields[info.field_name] + field_name = field.alias or info.field_name + if field_name in info.context['configured_fields']: + value = getattr(validators, f'instance_{info.field_name}', identity)(value, field=field) + else: + value = getattr(defaults, f'instance_{info.field_name}', lambda: value)() + + return validation.utils.make_immutable(value) + + @model_validator(mode='after') + def _final_validation(cls, model): + return validation.core.check_model(getattr(validators, 'check_instance', identity)(model)) diff --git a/duckdb/datadog_checks/duckdb/config_models/shared.py b/duckdb/datadog_checks/duckdb/config_models/shared.py new file mode 100644 index 0000000000000..e39d447dfc4b9 --- /dev/null +++ b/duckdb/datadog_checks/duckdb/config_models/shared.py @@ -0,0 +1,45 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +# This file is autogenerated. +# To change this file you should edit assets/configuration/spec.yaml and then run the following commands: +# ddev -x validate config -s +# ddev -x validate models -s + +from __future__ import annotations + +from typing import Optional + +from pydantic import BaseModel, ConfigDict, field_validator, model_validator + +from datadog_checks.base.utils.functions import identity +from datadog_checks.base.utils.models import validation + +from . import validators + + +class SharedConfig(BaseModel): + model_config = ConfigDict( + validate_default=True, + arbitrary_types_allowed=True, + frozen=True, + ) + service: Optional[str] = None + + @model_validator(mode='before') + def _initial_validation(cls, values): + return validation.core.initialize_config(getattr(validators, 'initialize_shared', identity)(values)) + + @field_validator('*', mode='before') + def _validate(cls, value, info): + field = cls.model_fields[info.field_name] + field_name = field.alias or info.field_name + if field_name in info.context['configured_fields']: + value = getattr(validators, f'shared_{info.field_name}', identity)(value, field=field) + + return validation.utils.make_immutable(value) + + @model_validator(mode='after') + def _final_validation(cls, model): + return validation.core.check_model(getattr(validators, 'check_shared', identity)(model)) diff --git a/duckdb/datadog_checks/duckdb/config_models/validators.py b/duckdb/datadog_checks/duckdb/config_models/validators.py new file mode 100644 index 0000000000000..70150e85e6124 --- /dev/null +++ b/duckdb/datadog_checks/duckdb/config_models/validators.py @@ -0,0 +1,13 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +# Here you can include additional config validators or transformers +# +# def initialize_instance(values, **kwargs): +# if 'my_option' not in values and 'my_legacy_option' in values: +# values['my_option'] = values['my_legacy_option'] +# if values.get('my_number') > 10: +# raise ValueError('my_number max value is 10, got %s' % str(values.get('my_number'))) +# +# return values diff --git a/duckdb/datadog_checks/duckdb/data/conf.yaml.example b/duckdb/datadog_checks/duckdb/data/conf.yaml.example new file mode 100644 index 0000000000000..86f32b93c5534 --- /dev/null +++ b/duckdb/datadog_checks/duckdb/data/conf.yaml.example @@ -0,0 +1,67 @@ +## All options defined here are available to all instances. +# +init_config: + + ## @param service - string - optional + ## Attach the tag `service:` to every metric, event, and service check emitted by this integration. + ## + ## Additionally, this sets the default `service` for every log source. + # + # service: + +## Every instance is scheduled independently of the others. +# +instances: + + ## @param db_name - string - required + ## The database to connect to (file path). + ## NOTE: DuckDB databases are stored as files. + # + - db_name: /path-to-file/my_database.db + + ## @param connection_attempt - integer - optional - default: 3 + ## The number of retries to connect to the database in case of failure + # + # connection_attempt: + + ## @param tags - list of strings - optional + ## A list of tags to attach to every metric and service check emitted by this instance. + ## + ## Learn more about tagging at https://docs.datadoghq.com/tagging + # + # tags: + # - : + # - : + + ## @param service - string - optional + ## Attach the tag `service:` to every metric, event, and service check emitted by this integration. + ## + ## Overrides any `service` defined in the `init_config` section. + # + # service: + + ## @param min_collection_interval - number - optional - default: 60 + ## This changes the collection interval of this check to avoid + ## the risk of locking the database file. + ## If your database file is heavily used in write mode, you changes + ## further increase this value to minimise the monitoring acccess. + # + min_collection_interval: 60 + + ## @param empty_default_hostname - boolean - optional - default: false + ## This forces the check to send metrics with no hostname. + ## + ## This is useful for cluster-level checks. + # + # empty_default_hostname: false + + ## @param metric_patterns - mapping - optional + ## A mapping of metrics to include or exclude, with each entry being a regular expression. + ## + ## Metrics defined in `exclude` will take precedence in case of overlap. + # + # metric_patterns: + # include: + # - + # exclude: + # - diff --git a/duckdb/datadog_checks/duckdb/queries.py b/duckdb/datadog_checks/duckdb/queries.py new file mode 100644 index 0000000000000..e09030edbb3be --- /dev/null +++ b/duckdb/datadog_checks/duckdb/queries.py @@ -0,0 +1,68 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + + +DUCKDB_VERSION = { + 'name': 'version', + 'query': "SELECT version();", + 'columns': [{'name': 'version', 'type': 'source'}], +} + +DUCKDDB_WAL = { + 'name': 'wal_autocheckpoint', + 'query': " SELECT CAST(SUBSTR(value, 1, LENGTH(value) - 3) AS BIGINT) * " + "CASE " + " WHEN RIGHT(value, 3) = 'KiB' THEN 1024 " + " WHEN RIGHT(value, 3) = 'MiB' THEN 1024 * 1024 " + " WHEN RIGHT(value, 3) = 'GiB' THEN 1024 * 1024 * 1024 " + " WHEN RIGHT(value, 3) = 'TiB' THEN 1024 * 1024 * 1024 * 1024 " + " ELSE 1 " + " END AS value_in_bytes FROM duckdb_settings() WHERE name = 'wal_autocheckpoint';", + 'columns': [{'name': 'wal_autocheckpoint', 'type': 'gauge'}], +} + + +DUCKDDB_THREADS = { + 'name': 'worker_threads', + 'query': "select value from duckdb_settings() where name = 'worker_threads';", + 'columns': [{'name': 'worker_threads', 'type': 'gauge'}], +} + + +DUCKDB_MEMORY_LIMIT = { + 'name': 'memory_limit', + 'query': " SELECT CAST(SUBSTR(value, 1, LENGTH(value) - 3) AS BIGINT) * " + "CASE " + " WHEN RIGHT(value, 3) = 'KiB' THEN 1024 " + " WHEN RIGHT(value, 3) = 'MiB' THEN 1024 * 1024 " + " WHEN RIGHT(value, 3) = 'GiB' THEN 1024 * 1024 * 1024 " + " WHEN RIGHT(value, 3) = 'TiB' THEN 1024 * 1024 * 1024 * 1024 " + " ELSE 1 " + " END AS value_in_bytes FROM duckdb_settings() WHERE name = 'memory_limit';", + 'columns': [{'name': 'memory_limit', 'type': 'gauge'}], +} + + +DUCKDB_PART_WRITE_FLUSH_THRESHOLD = { + 'name': 'partitioned_write_flush_threshold', + 'query': " SELECT CAST(value AS INTEGER) AS value_as_integer " + " FROM duckdb_settings() WHERE name = 'partitioned_write_flush_threshold';", + 'columns': [{'name': 'partitioned_write_flush_threshold', 'type': 'gauge'}], +} + +DUCKDB_PART_WRITE_MAX_OPEN_FILES = { + 'name': 'partitioned_write_max_open_files', + 'query': " SELECT CAST(value AS INTEGER) AS value_as_integer " + " FROM duckdb_settings() WHERE name = 'partitioned_write_max_open_files';", + 'columns': [{'name': 'partitioned_write_max_open_files', 'type': 'gauge'}], +} + +DEFAULT_QUERIES = [ + DUCKDB_VERSION, + DUCKDDB_THREADS, + DUCKDDB_WAL, + DUCKDB_MEMORY_LIMIT, + DUCKDB_PART_WRITE_FLUSH_THRESHOLD, + DUCKDB_PART_WRITE_MAX_OPEN_FILES, +] diff --git a/duckdb/hatch.toml b/duckdb/hatch.toml new file mode 100644 index 0000000000000..c79a82d22f2c5 --- /dev/null +++ b/duckdb/hatch.toml @@ -0,0 +1,8 @@ +[env.collectors.datadog-checks] + +[[envs.default.matrix]] +python = ["3.12"] +version = ["1.1.1"] + +[envs.default.overrides] +matrix.version.env-vars = "DUCKDB_VERSION" \ No newline at end of file diff --git a/duckdb/images/IMAGES_README.md b/duckdb/images/IMAGES_README.md new file mode 100644 index 0000000000000..443f3c45e3385 --- /dev/null +++ b/duckdb/images/IMAGES_README.md @@ -0,0 +1,41 @@ +# Marketplace Media Carousel Guidelines + +## Using the media gallery + +Please upload images to use the media gallery. Integrations require a minimum of 3 images. Images should highlight your product, your integration, and a full image of the Datadog integration dashboard. The gallery +can hold a maximum of 8 pieces of media total, and one of these pieces of media +can be a video (guidelines and submission steps below). Images should be +added to your /images directory and referenced in the manifest.json file. + + +## Image and video requirements + +### Images + +``` +File type : .jpg or .png +File size : ~500 KB per image, with a max of 1 MB per image +File dimensions : The image must be between 1440px and 2880px width, with a 16:9 aspect ratio (for example: 1440x810) +File name : Use only letters, numbers, underscores, and hyphens +Color mode : RGB +Color profile : sRGB +Description : 300 characters maximum +``` + +### Video + +To display a video in your media gallery, please send our team the zipped file +or a link to download the video at `marketplace@datadog.com`. In addition, +please upload a thumbnail image for your video as a part of the pull request. +Once approved, we will upload the file to Vimeo and provide you with the +vimeo_id to add to your manifest.json file. Please note that the gallery can +only hold one video. + +``` +File type : MP4 H.264 +File size : Max 1 video; 1 GB maximum size +File dimensions : The aspect ratio must be exactly 16:9, and the resolution must be 1920x1080 or higher +File name : partnerName-appName.mp4 +Run time : Recommendation of 60 seconds or less +Description : 300 characters maximum +``` diff --git a/duckdb/manifest.json b/duckdb/manifest.json new file mode 100644 index 0000000000000..268dffb4db54a --- /dev/null +++ b/duckdb/manifest.json @@ -0,0 +1,53 @@ +{ + "manifest_version": "2.0.0", + "app_uuid": "a905fbe6-135f-4189-b027-4bdc58e51e29", + "app_id": "duckdb", + "display_on_public_website": false, + "tile": { + "overview": "README.md#Overview", + "configuration": "README.md#Setup", + "support": "README.md#Support", + "changelog": "CHANGELOG.md", + "description": "Integration for DuckDB", + "title": "DuckDB", + "media": [], + "classifier_tags": [ + "Supported OS::Linux", + "Supported OS::Windows", + "Supported OS::macOS", + "Category::Metrics", + "Offering::Integration", + "Submitted Data Type::Metrics" + ] + }, + "assets": { + "integration": { + "auto_install": true, + "source_type_id": 28902456, + "source_type_name": "DuckDB", + "configuration": { + "spec": "assets/configuration/spec.yaml" + }, + "events": { + "creates_events": false + }, + "metrics": { + "prefix": "duckdb.", + "check": "duckdb.memory_limit", + "metadata_path": "metadata.csv" + }, + "service_checks": { + "metadata_path": "assets/service_checks.json" + } + }, + "dashboards": { + "Duckdb Overview": "assets/dashboards/duckdb_overview.json" + } + }, + "author": { + "support_email": "help@datadoghq.com", + "name": "Datadog", + "homepage": "https://www.datadoghq.com", + "sales_email": "info@datadoghq.com" + } +} diff --git a/duckdb/metadata.csv b/duckdb/metadata.csv new file mode 100644 index 0000000000000..0704022a503ab --- /dev/null +++ b/duckdb/metadata.csv @@ -0,0 +1,6 @@ +metric_name,metric_type,interval,unit_name,per_unit_name,description,orientation,integration,short_name,curated_metric,sample_tags +duckdb.memory_limit,gauge,,byte,,The maximum memory of the system.,0,duckdb,,, +duckdb.partitioned_write_flush_threshold,gauge,,,,The threshold in number of rows after which we flush a thread state when writing using PARTITION_BY.,0,duckdb,,, +duckdb.partitioned_write_max_open_files,gauge,,,,The maximum amount of files the system can keep open before flushing to disk when writing using PARTITION_BY.,0,duckdb,,, +duckdb.wal_autocheckpoint,gauge,,byte,,The WAL size threshold at which to automatically trigger a checkpoint.,0,duckdb,,, +duckdb.worker_threads,gauge,,,,The number of total threads used by the system.,0,duckdb,,, diff --git a/duckdb/pyproject.toml b/duckdb/pyproject.toml new file mode 100644 index 0000000000000..801b00ea69063 --- /dev/null +++ b/duckdb/pyproject.toml @@ -0,0 +1,62 @@ +[build-system] +requires = [ + "hatchling>=0.13.0", +] +build-backend = "hatchling.build" + +[project] +name = "datadog-duckdb" +description = "The DuckDB check" +readme = "README.md" +license = "BSD-3-Clause" +requires-python = ">=3.12" +keywords = [ + "datadog", + "datadog agent", + "datadog check", + "duckdb", +] +authors = [ + { name = "Datadog", email = "packages@datadoghq.com" }, +] +classifiers = [ + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "Intended Audience :: System Administrators", + "License :: OSI Approved :: BSD License", + "Private :: Do Not Upload", + "Programming Language :: Python :: 3.12", + "Topic :: System :: Monitoring", +] +dependencies = [ + "datadog-checks-base>=37.0.0", +] +dynamic = [ + "version", +] + +[project.optional-dependencies] +deps = [ + "duckdb==1.1.1", +] + +[project.urls] +Source = "https://github.com/DataDog/integrations-core" + +[tool.hatch.version] +path = "datadog_checks/duckdb/__about__.py" + +[tool.hatch.build.targets.sdist] +include = [ + "/datadog_checks", + "/tests", + "/manifest.json", +] + +[tool.hatch.build.targets.wheel] +include = [ + "/datadog_checks/duckdb", +] +dev-mode-dirs = [ + ".", +] diff --git a/duckdb/tests/__init__.py b/duckdb/tests/__init__.py new file mode 100644 index 0000000000000..9103122bf028d --- /dev/null +++ b/duckdb/tests/__init__.py @@ -0,0 +1,3 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) diff --git a/duckdb/tests/common.py b/duckdb/tests/common.py new file mode 100644 index 0000000000000..7fa9e7a75b9a5 --- /dev/null +++ b/duckdb/tests/common.py @@ -0,0 +1,23 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +import os + +from datadog_checks.dev import get_here + +HERE = get_here() +DB_NAME = 'data/sample.db' +WRONG_DB_NAME = 'test.db' + +DB = os.path.join(HERE, DB_NAME) + +DEFAULT_INSTANCE = {'db_name': DB} +WRONG_INSTANCE = {'db_name': WRONG_DB_NAME} + +METRICS_MAP = [ + 'duckdb.worker_threads', + 'duckdb.wal_autocheckpoint', + 'duckdb.memory_limit', + 'duckdb.partitioned_write_flush_threshold', + 'duckdb.partitioned_write_max_open_files', +] diff --git a/duckdb/tests/conftest.py b/duckdb/tests/conftest.py new file mode 100644 index 0000000000000..9fbf7220d4ef2 --- /dev/null +++ b/duckdb/tests/conftest.py @@ -0,0 +1,18 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from copy import deepcopy + +import pytest + +from . import common + + +@pytest.fixture(scope='session') +def dd_environment(): + yield common.DEFAULT_INSTANCE + + +@pytest.fixture +def instance(): + return deepcopy(common.DEFAULT_INSTANCE) diff --git a/duckdb/tests/data/sample.db b/duckdb/tests/data/sample.db new file mode 100644 index 0000000000000..021514090f2e9 Binary files /dev/null and b/duckdb/tests/data/sample.db differ diff --git a/duckdb/tests/test_e2e.py b/duckdb/tests/test_e2e.py new file mode 100644 index 0000000000000..c9eab0ad6ab45 --- /dev/null +++ b/duckdb/tests/test_e2e.py @@ -0,0 +1,10 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +import pytest + + +@pytest.mark.e2e +def test_e2e(dd_agent_check): + aggregator = dd_agent_check() + aggregator.assert_all_metrics_covered() diff --git a/duckdb/tests/test_integration.py b/duckdb/tests/test_integration.py new file mode 100644 index 0000000000000..c5d96a1c15118 --- /dev/null +++ b/duckdb/tests/test_integration.py @@ -0,0 +1,45 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +import logging + +from datadog_checks.duckdb import DuckdbCheck + +from . import common + + +def test_check(dd_run_check, aggregator, instance): + instance = common.DEFAULT_INSTANCE + check = DuckdbCheck('duckdb', {}, [instance]) + dd_run_check(check) + + for metric in common.METRICS_MAP: + aggregator.assert_metric(metric) + + +def test_failed_connection(dd_run_check, instance, caplog): + caplog.set_level(logging.ERROR) + instance = common.WRONG_INSTANCE + check = DuckdbCheck('duckdb', {}, [instance]) + dd_run_check(check) + + expected_error = "Database file not found" + assert expected_error in caplog.text + + +def test_version(dd_run_check, instance, datadog_agent): + instance = common.DEFAULT_INSTANCE + check = DuckdbCheck('duckdb', {}, [instance]) + check.check_id = 'test:123' + raw_version = '1.1.1' + major, minor, patch = raw_version.split('.') + version_metadata = { + 'version.scheme': 'semver', + 'version.major': major, + 'version.minor': minor, + 'version.patch': patch, + 'version.raw': raw_version, + } + dd_run_check(check) + + datadog_agent.assert_metadata('test:123', version_metadata) diff --git a/duckdb/tests/test_unit.py b/duckdb/tests/test_unit.py new file mode 100644 index 0000000000000..623d1d0e5e51f --- /dev/null +++ b/duckdb/tests/test_unit.py @@ -0,0 +1,18 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +import pytest + +from datadog_checks.base import AgentCheck # noqa: F401 +from datadog_checks.base.stubs.aggregator import AggregatorStub # noqa: F401 +from datadog_checks.duckdb import DuckdbCheck + + +def test_empty_instance(dd_run_check): + with pytest.raises( + Exception, + match='InstanceConfig`:\ndb_name\n Field required', + ): + check = DuckdbCheck('duckdb', {}, [{}]) + dd_run_check(check) diff --git a/keda/CHANGELOG.md b/keda/CHANGELOG.md new file mode 100644 index 0000000000000..4882ed5d592a7 --- /dev/null +++ b/keda/CHANGELOG.md @@ -0,0 +1,9 @@ +# CHANGELOG - Keda + + + +## 1.0.0 / 2024-12-26 + +***Added***: + +* Initial Release ([#19303](https://github.com/DataDog/integrations-core/pull/19303)) diff --git a/keda/README.md b/keda/README.md new file mode 100644 index 0000000000000..0577faf7a08d8 --- /dev/null +++ b/keda/README.md @@ -0,0 +1,60 @@ +# Agent Check: Keda + +## Overview + +This check monitors [Keda][1] through the Datadog Agent. + +Include a high level overview of what this integration does: +- What does your product do (in 1-2 sentences)? +- What value will customers get from this integration, and why is it valuable to them? +- What specific data will your integration monitor, and what's the value of that data? + +## Setup + +Follow the instructions below to install and configure this check for an Agent running on a host. For containerized environments, see the [Autodiscovery Integration Templates][3] for guidance on applying these instructions. + +### Installation + +The Keda check is included in the [Datadog Agent][2] package. +No additional installation is needed on your server. + +### Configuration + +1. Edit the `keda.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your keda performance data. See the [sample keda.d/conf.yaml][4] for all available configuration options. + +2. [Restart the Agent][5]. + +### Validation + +[Run the Agent's status subcommand][6] and look for `keda` under the Checks section. + +## Data Collected + +### Metrics + +See [metadata.csv][7] for a list of metrics provided by this integration. + +### Events + +The Keda integration does not include any events. + +### Service Checks + +The Keda integration does not include any service checks. + +See [service_checks.json][8] for a list of service checks provided by this integration. + +## Troubleshooting + +Need help? Contact [Datadog support][9]. + + +[1]: **LINK_TO_INTEGRATION_SITE** +[2]: https://app.datadoghq.com/account/settings/agent/latest +[3]: https://docs.datadoghq.com/agent/kubernetes/integrations/ +[4]: https://github.com/DataDog/integrations-core/blob/master/keda/datadog_checks/keda/data/conf.yaml.example +[5]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent +[6]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information +[7]: https://github.com/DataDog/integrations-core/blob/master/keda/metadata.csv +[8]: https://github.com/DataDog/integrations-core/blob/master/keda/assets/service_checks.json +[9]: https://docs.datadoghq.com/help/ diff --git a/keda/assets/configuration/spec.yaml b/keda/assets/configuration/spec.yaml new file mode 100644 index 0000000000000..e12e7436cd9ed --- /dev/null +++ b/keda/assets/configuration/spec.yaml @@ -0,0 +1,10 @@ +name: Keda +files: +- name: keda.yaml + options: + - template: init_config + options: + - template: init_config/default + - template: instances + options: + - template: instances/default diff --git a/keda/assets/dashboards/keda_overview.json b/keda/assets/dashboards/keda_overview.json new file mode 100644 index 0000000000000..5b8e456397e3a --- /dev/null +++ b/keda/assets/dashboards/keda_overview.json @@ -0,0 +1,77 @@ +{ + "title": "Keda Overview", + "description": "## Keda\n", + "widgets": [ + { + "id": 4717263751542750, + "definition": { + "title": "", + "banner_img": "/static/images/logos/keda_large.svg", + "show_title": true, + "type": "group", + "layout_type": "ordered", + "widgets": [ + { + "id": 5685022835071772, + "definition": { + "type": "note", + "content": "## Keda\n", + "background_color": "white", + "font_size": "14", + "text_align": "left", + "vertical_align": "center", + "show_tick": false, + "tick_pos": "50%", + "tick_edge": "left", + "has_padding": true + }, + "layout": { + "x": 0, + "y": 0, + "width": 3, + "height": 3 + } + }, + { + "id": 8921963557059570, + "definition": { + "type": "note", + "content": "", + "background_color": "white", + "font_size": "14", + "text_align": "center", + "vertical_align": "center", + "show_tick": false, + "tick_pos": "50%", + "tick_edge": "left", + "has_padding": true + }, + "layout": { + "x": 3, + "y": 0, + "width": 3, + "height": 3 + } + } + ] + }, + "layout": { + "x": 0, + "y": 0, + "width": 6, + "height": 6 + } + } + ], + "template_variables": [ + { + "name": "host", + "prefix": "host", + "available_values": [], + "default": "*" + } + ], + "layout_type": "ordered", + "notify_list": [], + "reflow_type": "fixed" +} \ No newline at end of file diff --git a/keda/assets/service_checks.json b/keda/assets/service_checks.json new file mode 100644 index 0000000000000..3409a32a68141 --- /dev/null +++ b/keda/assets/service_checks.json @@ -0,0 +1,17 @@ +[ + { + "agent_version": "7.62.0", + "integration": "Keda", + "check": "keda.openmetrics.health", + "statuses": [ + "ok", + "critical" + ], + "groups": [ + "host", + "endpoint" + ], + "name": "Keda OpenMetrics endpoint health", + "description": "Returns `CRITICAL` if the Agent is unable to connect to the Keda OpenMetrics endpoint, otherwise returns `OK`." + } +] \ No newline at end of file diff --git a/keda/datadog_checks/__init__.py b/keda/datadog_checks/__init__.py new file mode 100644 index 0000000000000..1517d901c0aae --- /dev/null +++ b/keda/datadog_checks/__init__.py @@ -0,0 +1,4 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +__path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore diff --git a/keda/datadog_checks/keda/__about__.py b/keda/datadog_checks/keda/__about__.py new file mode 100644 index 0000000000000..acbfd1c866b84 --- /dev/null +++ b/keda/datadog_checks/keda/__about__.py @@ -0,0 +1,4 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +__version__ = '1.0.0' diff --git a/keda/datadog_checks/keda/__init__.py b/keda/datadog_checks/keda/__init__.py new file mode 100644 index 0000000000000..5c1f9b950e5e6 --- /dev/null +++ b/keda/datadog_checks/keda/__init__.py @@ -0,0 +1,7 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from .__about__ import __version__ +from .check import KedaCheck + +__all__ = ['__version__', 'KedaCheck'] diff --git a/keda/datadog_checks/keda/check.py b/keda/datadog_checks/keda/check.py new file mode 100644 index 0000000000000..e511c429a1d43 --- /dev/null +++ b/keda/datadog_checks/keda/check.py @@ -0,0 +1,26 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +from datadog_checks.base import OpenMetricsBaseCheckV2 + +from .metrics import METRIC_MAP, RENAME_LABELS_MAP + + +class KedaCheck(OpenMetricsBaseCheckV2): + DEFAULT_METRIC_LIMIT = 0 + __NAMESPACE__ = 'keda' + + def __init__(self, name, init_config, instances=None): + + super(KedaCheck, self).__init__( + name, + init_config, + instances, + ) + + def get_default_config(self): + return { + 'metrics': [METRIC_MAP], + "rename_labels": RENAME_LABELS_MAP, + } diff --git a/keda/datadog_checks/keda/config_models/__init__.py b/keda/datadog_checks/keda/config_models/__init__.py new file mode 100644 index 0000000000000..106fff2032f68 --- /dev/null +++ b/keda/datadog_checks/keda/config_models/__init__.py @@ -0,0 +1,24 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +# This file is autogenerated. +# To change this file you should edit assets/configuration/spec.yaml and then run the following commands: +# ddev -x validate config -s +# ddev -x validate models -s + +from .instance import InstanceConfig +from .shared import SharedConfig + + +class ConfigMixin: + _config_model_instance: InstanceConfig + _config_model_shared: SharedConfig + + @property + def config(self) -> InstanceConfig: + return self._config_model_instance + + @property + def shared_config(self) -> SharedConfig: + return self._config_model_shared diff --git a/keda/datadog_checks/keda/config_models/defaults.py b/keda/datadog_checks/keda/config_models/defaults.py new file mode 100644 index 0000000000000..4d46152df5d40 --- /dev/null +++ b/keda/datadog_checks/keda/config_models/defaults.py @@ -0,0 +1,20 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +# This file is autogenerated. +# To change this file you should edit assets/configuration/spec.yaml and then run the following commands: +# ddev -x validate config -s +# ddev -x validate models -s + + +def instance_disable_generic_tags(): + return False + + +def instance_empty_default_hostname(): + return False + + +def instance_min_collection_interval(): + return 15 diff --git a/keda/datadog_checks/keda/config_models/instance.py b/keda/datadog_checks/keda/config_models/instance.py new file mode 100644 index 0000000000000..56acae21ba432 --- /dev/null +++ b/keda/datadog_checks/keda/config_models/instance.py @@ -0,0 +1,61 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +# This file is autogenerated. +# To change this file you should edit assets/configuration/spec.yaml and then run the following commands: +# ddev -x validate config -s +# ddev -x validate models -s + +from __future__ import annotations + +from typing import Optional + +from pydantic import BaseModel, ConfigDict, field_validator, model_validator + +from datadog_checks.base.utils.functions import identity +from datadog_checks.base.utils.models import validation + +from . import defaults, validators + + +class MetricPatterns(BaseModel): + model_config = ConfigDict( + arbitrary_types_allowed=True, + frozen=True, + ) + exclude: Optional[tuple[str, ...]] = None + include: Optional[tuple[str, ...]] = None + + +class InstanceConfig(BaseModel): + model_config = ConfigDict( + validate_default=True, + arbitrary_types_allowed=True, + frozen=True, + ) + disable_generic_tags: Optional[bool] = None + empty_default_hostname: Optional[bool] = None + metric_patterns: Optional[MetricPatterns] = None + min_collection_interval: Optional[float] = None + service: Optional[str] = None + tags: Optional[tuple[str, ...]] = None + + @model_validator(mode='before') + def _initial_validation(cls, values): + return validation.core.initialize_config(getattr(validators, 'initialize_instance', identity)(values)) + + @field_validator('*', mode='before') + def _validate(cls, value, info): + field = cls.model_fields[info.field_name] + field_name = field.alias or info.field_name + if field_name in info.context['configured_fields']: + value = getattr(validators, f'instance_{info.field_name}', identity)(value, field=field) + else: + value = getattr(defaults, f'instance_{info.field_name}', lambda: value)() + + return validation.utils.make_immutable(value) + + @model_validator(mode='after') + def _final_validation(cls, model): + return validation.core.check_model(getattr(validators, 'check_instance', identity)(model)) diff --git a/keda/datadog_checks/keda/config_models/shared.py b/keda/datadog_checks/keda/config_models/shared.py new file mode 100644 index 0000000000000..e39d447dfc4b9 --- /dev/null +++ b/keda/datadog_checks/keda/config_models/shared.py @@ -0,0 +1,45 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +# This file is autogenerated. +# To change this file you should edit assets/configuration/spec.yaml and then run the following commands: +# ddev -x validate config -s +# ddev -x validate models -s + +from __future__ import annotations + +from typing import Optional + +from pydantic import BaseModel, ConfigDict, field_validator, model_validator + +from datadog_checks.base.utils.functions import identity +from datadog_checks.base.utils.models import validation + +from . import validators + + +class SharedConfig(BaseModel): + model_config = ConfigDict( + validate_default=True, + arbitrary_types_allowed=True, + frozen=True, + ) + service: Optional[str] = None + + @model_validator(mode='before') + def _initial_validation(cls, values): + return validation.core.initialize_config(getattr(validators, 'initialize_shared', identity)(values)) + + @field_validator('*', mode='before') + def _validate(cls, value, info): + field = cls.model_fields[info.field_name] + field_name = field.alias or info.field_name + if field_name in info.context['configured_fields']: + value = getattr(validators, f'shared_{info.field_name}', identity)(value, field=field) + + return validation.utils.make_immutable(value) + + @model_validator(mode='after') + def _final_validation(cls, model): + return validation.core.check_model(getattr(validators, 'check_shared', identity)(model)) diff --git a/keda/datadog_checks/keda/config_models/validators.py b/keda/datadog_checks/keda/config_models/validators.py new file mode 100644 index 0000000000000..70150e85e6124 --- /dev/null +++ b/keda/datadog_checks/keda/config_models/validators.py @@ -0,0 +1,13 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +# Here you can include additional config validators or transformers +# +# def initialize_instance(values, **kwargs): +# if 'my_option' not in values and 'my_legacy_option' in values: +# values['my_option'] = values['my_legacy_option'] +# if values.get('my_number') > 10: +# raise ValueError('my_number max value is 10, got %s' % str(values.get('my_number'))) +# +# return values diff --git a/keda/datadog_checks/keda/data/conf.yaml.example b/keda/datadog_checks/keda/data/conf.yaml.example new file mode 100644 index 0000000000000..57b46cc14ac44 --- /dev/null +++ b/keda/datadog_checks/keda/data/conf.yaml.example @@ -0,0 +1,55 @@ +## All options defined here are available to all instances. +# +init_config: + + ## @param service - string - optional + ## Attach the tag `service:` to every metric, event, and service check emitted by this integration. + ## + ## Additionally, this sets the default `service` for every log source. + # + # service: + +## Every instance is scheduled independently of the others. +# +instances: + + - + ## @param tags - list of strings - optional + ## A list of tags to attach to every metric and service check emitted by this instance. + ## + ## Learn more about tagging at https://docs.datadoghq.com/tagging + # + # tags: + # - : + # - : + + ## @param service - string - optional + ## Attach the tag `service:` to every metric, event, and service check emitted by this integration. + ## + ## Overrides any `service` defined in the `init_config` section. + # + # service: + + ## @param min_collection_interval - number - optional - default: 15 + ## This changes the collection interval of the check. For more information, see: + ## https://docs.datadoghq.com/developers/write_agent_check/#collection-interval + # + # min_collection_interval: 15 + + ## @param empty_default_hostname - boolean - optional - default: false + ## This forces the check to send metrics with no hostname. + ## + ## This is useful for cluster-level checks. + # + # empty_default_hostname: false + + ## @param metric_patterns - mapping - optional + ## A mapping of metrics to include or exclude, with each entry being a regular expression. + ## + ## Metrics defined in `exclude` will take precedence in case of overlap. + # + # metric_patterns: + # include: + # - + # exclude: + # - diff --git a/keda/datadog_checks/keda/metrics.py b/keda/datadog_checks/keda/metrics.py new file mode 100644 index 0000000000000..0883b2b390946 --- /dev/null +++ b/keda/datadog_checks/keda/metrics.py @@ -0,0 +1,145 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +# Some metrics mapping are too long. This turns off the 120 line limit for this file: +# ruff: noqa: E501 + + +METRIC_MAP = { + # Operator + "keda_build_info": "build_info", + "keda_scaler_active": "scaler.active", + "keda_scaled_object_paused": "scaled_object.paused", + "keda_scaler_metrics_value": "scaler.metrics_value", + "keda_scaler_metrics_latency_seconds": "scaler.metrics_latency_seconds", + "keda_scaler_detail_errors": "scaler.detail_errors", + "keda_scaled_object_errors": "scaled_object.errors", + "keda_scaled_job_errors": "scaled_job.errors", + "keda_resource_registered": "resource_registered", + "keda_trigger_registered": "trigger_registered", + "keda_internal_scale_loop_latency_seconds": "internal_scale.loop_latency_seconds", + "keda_cloudeventsource_events_emitted": "cloudeventsource.events_emitted", + "keda_cloudeventsource_events_queued": "cloudeventsource.events_queued", + "keda_internal_metricsservice_grpc_server_started": "internal_metricsservice.grpc_server_started", + "keda_internal_metricsservice_grpc_server_handled": "internal_metricsservice.grpc_server_handled", + "keda_internal_metricsservice_grpc_server_msg_received": "internal_metricsservice.grpc_server_msg_received", + "keda_internal_metricsservice_grpc_server_msg_sent": "internal_metricsservice.grpc_server_msg_sent", + "keda_internal_metricsservice_grpc_server_handling_seconds": "internal_metricsservice.grpc_server_handling_seconds", + # Operator soon to be deprecated in v2.16 + "keda_scaler_metrics_latency": "scaler.metrics_latency", + "keda_scaler_errors": "scaler.errors", + "keda_resource_totals": "resource_totals", + "keda_trigger_totals": "trigger_totals", + "keda_internal_scale_loop_latency": "internal_scale.loop_latency", + # Admission Webhook + "keda_webhook_scaled_object_validation": "webhook_scaled.object_validation", + "keda_webhook_scaled_object_validation_errors": "webhook_scaled.object_validation_errors", + # Metrics Server + "keda_internal_metricsservice_grpc_client_started": "internal_metricsservice.grpc_client_started", + "keda_internal_metricsservice_grpc_client_handled": "internal_metricsservice.grpc_client_handled", + "keda_internal_metricsservice_grpc_client_msg_received": "internal_metricsservice.grpc_client_msg_received", + "keda_internal_metricsservice_grpc_client_msg_sent": "internal_metricsservice.grpc_client_msg_sent", + "keda_internal_metricsservice_grpc_client_handling_seconds": "internal_metricsservice.grpc_client_handling_seconds", + # Metric Server + "aggregator_discovery_aggregation_count": "aggregator_discovery_aggregation", + "apiserver_audit_event": "apiserver_audit_event", + "apiserver_audit_requests_rejected": "apiserver_audit_requests_rejected", + "apiserver_client_certificate_expiration_seconds": "apiserver_client_certificate_expiration_seconds", + "apiserver_current_inflight_requests": "apiserver_current_inflight_requests", + "apiserver_delegated_authz_request": "apiserver_delegated_authz_request", + "apiserver_delegated_authz_request_duration_seconds": "apiserver_delegated_authz_request_duration_seconds", + "apiserver_envelope_encryption_dek_cache_fill_percent": "apiserver_envelope_encryption_dek_cache_fill_percent", + "apiserver_flowcontrol_read_vs_write_current_requests": "apiserver_flowcontrol_read_vs_write_current_requests", + "apiserver_flowcontrol_seat_fair_frac": "apiserver_flowcontrol_seat_fair_frac", + "apiserver_request": "apiserver_request", + "apiserver_request_duration_seconds": "apiserver_request_duration_seconds", + "apiserver_request_filter_duration_seconds": "apiserver_request_filter_duration_seconds", + "apiserver_request_sli_duration_seconds": "apiserver_request_sli_duration_seconds", + "apiserver_request_slo_duration_seconds": "apiserver_request_slo_duration_seconds", + "apiserver_response_sizes": "apiserver_response_sizes", + "apiserver_storage_data_key_generation_duration_seconds": "apiserver_storage_data_key_generation_duration_seconds", + "apiserver_storage_data_key_generation_failures": "apiserver_storage_data_key_generation_failures", + "apiserver_storage_envelope_transformation_cache_misses": "apiserver_storage_envelope_transformation_cache_misses", + "apiserver_tls_handshake_errors": "apiserver_tls_handshake_errors", + "apiserver_webhooks_x509_insecure_sha1": "apiserver_webhooks_x509_insecure_sha1", + "apiserver_webhooks_x509_missing_san": "apiserver_webhooks_x509_missing_san", + "authenticated_user_requests": "authenticated_user_requests", + "authentication_attempts": "authentication_attempts", + "authentication_duration_seconds": "authentication_duration_seconds", + "authorization_attempts": "authorization_attempts", + "authorization_duration_seconds": "authorization_duration_seconds", + "cardinality_enforcement_unexpected_categorizations": "cardinality_enforcement_unexpected_categorizations", + "disabled_metrics": "disabled_metrics", + "field_validation_request_duration_seconds": "field_validation_request_duration_seconds", + "hidden_metrics": "hidden_metrics", + "registered_metrics": "registered_metrics", + # Generic Metrics + 'controller_clientset_k8s_request': 'controller.clientset.k8s.request', + 'go_gc_duration_seconds': 'go.gc.duration.seconds', + 'go_goroutines': 'go.goroutines', + 'go_info': "go.info", + 'go_memstats_alloc_bytes': {'name': 'go.memstats.alloc_bytes', 'type': 'native_dynamic'}, + 'go_memstats_buck_hash_sys_bytes': 'go.memstats.buck_hash.sys_bytes', + 'go_memstats_frees': 'go.memstats.frees', + 'go_memstats_gc_cpu_fraction': 'go.memstats.gc.cpu_fraction', + 'go_memstats_gc_sys_bytes': 'go.memstats.gc.sys_bytes', + 'go_memstats_heap_alloc_bytes': 'go.memstats.heap.alloc_bytes', + 'go_memstats_heap_idle_bytes': 'go.memstats.heap.idle_bytes', + 'go_memstats_heap_inuse_bytes': 'go.memstats.heap.inuse_bytes', + 'go_memstats_heap_objects': 'go.memstats.heap.objects', + 'go_memstats_heap_released_bytes': 'go.memstats.heap.released_bytes', + 'go_memstats_heap_sys_bytes': 'go.memstats.heap.sys_bytes', + 'go_memstats_lookups': 'go.memstats.lookups', + 'go_memstats_mallocs': 'go.memstats.mallocs', + 'go_memstats_mcache_inuse_bytes': 'go.memstats.mcache.inuse_bytes', + 'go_memstats_mcache_sys_bytes': 'go.memstats.mcache.sys_bytes', + 'go_memstats_mspan_inuse_bytes': 'go.memstats.mspan.inuse_bytes', + 'go_memstats_mspan_sys_bytes': 'go.memstats.mspan.sys_bytes', + 'go_memstats_next_gc_bytes': 'go.memstats.next.gc_bytes', + 'go_memstats_other_sys_bytes': 'go.memstats.other.sys_bytes', + 'go_memstats_stack_inuse_bytes': 'go.memstats.stack.inuse_bytes', + 'go_memstats_stack_sys_bytes': 'go.memstats.stack.sys_bytes', + 'go_memstats_sys_bytes': 'go.memstats.sys_bytes', + 'go_threads': 'go.threads', + 'process_cpu_seconds': 'process.cpu.seconds', + 'process_max_fds': 'process.max_fds', + 'process_open_fds': 'process.open_fds', + 'process_resident_memory_bytes': 'process.resident_memory.bytes', + 'process_start_time_seconds': { + 'name': 'process.uptime.seconds', + 'type': 'time_elapsed', + }, + 'process_virtual_memory_bytes': 'process.virtual_memory.bytes', + 'process_virtual_memory_max_bytes': 'process.virtual_memory.max_bytes', + 'workqueue_adds': 'workqueue.adds', + 'workqueue_depth': 'workqueue.depth', + 'workqueue_longest_running_processor_seconds': 'workqueue.longest.running_processor.seconds', + 'workqueue_queue_duration_seconds': 'workqueue.queue.duration.seconds', + 'workqueue_retries': 'workqueue.retries', + 'workqueue_unfinished_work_seconds': 'workqueue.unfinished_work.seconds', + 'workqueue_work_duration_seconds': 'workqueue.work.duration.seconds', + 'certwatcher_read_certificate': 'certwatcher.read_certificate', + 'certwatcher_read_certificate_errors': 'certwatcher.read_certificate_errors', + 'controller_runtime_active_workers': 'controller.runtime.active_workers', + 'controller_runtime_max_concurrent_reconciles': 'controller.runtime.max_concurrent_reconciles', + 'controller_runtime_reconcile': 'controller.runtime.reconcile', + 'controller_runtime_reconcile_errors': 'controller.runtime.reconcile_errors', + 'controller_runtime_reconcile_panics': 'controller.runtime.reconcile_panics', + 'controller_runtime_reconcile_time_seconds': 'controller.runtime.reconcile_time.seconds', + 'controller_runtime_terminal_reconcile_errors': 'controller.runtime.terminal_reconcile_errors', + 'controller_runtime_webhook_panics': 'controller.runtime.webhook_panics', + 'controller_runtime_webhook_requests': 'controller.runtime.webhook_requests', + 'controller_runtime_webhook_requests_in_flight': 'controller.runtime.webhook_requests_in_flight', + 'go_memstats_last_gc_time_seconds': { + 'name': 'go.memstats.time_since_last_gc.seconds', + 'type': 'time_elapsed', + }, + 'leader_election_master_status': 'leader_election.master_status', + 'rest_client_requests': 'rest.client.requests', +} + +RENAME_LABELS_MAP = { + 'version': 'keda_version', + 'host': 'keda_host', +} diff --git a/keda/hatch.toml b/keda/hatch.toml new file mode 100644 index 0000000000000..c8e94079d3721 --- /dev/null +++ b/keda/hatch.toml @@ -0,0 +1,5 @@ +[env.collectors.datadog-checks] + +[[envs.default.matrix]] +python = ["3.12"] +version = ["2.16.0"] \ No newline at end of file diff --git a/keda/manifest.json b/keda/manifest.json new file mode 100644 index 0000000000000..df68f70639b97 --- /dev/null +++ b/keda/manifest.json @@ -0,0 +1,55 @@ +{ + "manifest_version": "2.0.0", + "app_uuid": "961d21ab-c2c9-409f-8d2a-e54132004d1e", + "app_id": "keda", + "display_on_public_website": false, + "tile": { + "overview": "README.md#Overview", + "configuration": "README.md#Setup", + "support": "README.md#Support", + "changelog": "CHANGELOG.md", + "description": "", + "title": "Keda", + "media": [], + "classifier_tags": [ + "Supported OS::Linux", + "Supported OS::Windows", + "Supported OS::macOS", + "Category::Metrics", + "Category::Kubernetes", + "Category::Security", + "Submitted Data Type::Metrics", + "Offering::Integration" + ] + }, + "assets": { + "integration": { + "auto_install": true, + "source_type_id": 33795545, + "source_type_name": "Keda", + "configuration": { + "spec": "assets/configuration/spec.yaml" + }, + "events": { + "creates_events": false + }, + "metrics": { + "prefix": "keda.", + "check": "keda.process.uptime.seconds", + "metadata_path": "metadata.csv" + }, + "service_checks": { + "metadata_path": "assets/service_checks.json" + } + }, + "dashboards": { + "Keda Overview": "assets/dashboards/keda_overview.json" + } + }, + "author": { + "support_email": "help@datadoghq.com", + "name": "Datadog", + "homepage": "https://www.datadoghq.com", + "sales_email": "info@datadoghq.com" + } +} diff --git a/keda/metadata.csv b/keda/metadata.csv new file mode 100644 index 0000000000000..bebba661e3f72 --- /dev/null +++ b/keda/metadata.csv @@ -0,0 +1,132 @@ +metric_name,metric_type,interval,unit_name,per_unit_name,description,orientation,integration,short_name,curated_metric,sample_tags +keda.aggregator_discovery_aggregation.count,count,,,,,0,keda,,, +keda.apiserver_audit_event.count,count,,,,,0,keda,,, +keda.apiserver_audit_requests_rejected.count,count,,,,,0,keda,,, +keda.apiserver_client_certificate_expiration_seconds.bucket,count,,,,,0,keda,,, +keda.apiserver_client_certificate_expiration_seconds.count,count,,,,,0,keda,,, +keda.apiserver_client_certificate_expiration_seconds.sum,count,,,,,0,keda,,, +keda.apiserver_current_inflight_requests,gauge,,,,,0,keda,,, +keda.apiserver_delegated_authz_request.count,count,,,,,0,keda,,, +keda.apiserver_delegated_authz_request_duration_seconds.bucket,count,,,,,0,keda,,, +keda.apiserver_delegated_authz_request_duration_seconds.count,count,,,,,0,keda,,, +keda.apiserver_delegated_authz_request_duration_seconds.sum,count,,,,,0,keda,,, +keda.apiserver_envelope_encryption_dek_cache_fill_percent,gauge,,,,,0,keda,,, +keda.apiserver_flowcontrol_read_vs_write_current_requests.bucket,count,,,,,0,keda,,, +keda.apiserver_flowcontrol_read_vs_write_current_requests.count,count,,,,,0,keda,,, +keda.apiserver_flowcontrol_read_vs_write_current_requests.sum,count,,,,,0,keda,,, +keda.apiserver_flowcontrol_seat_fair_frac,gauge,,,,,0,keda,,, +keda.apiserver_request.count,count,,,,,0,keda,,, +keda.apiserver_request_duration_seconds.bucket,count,,,,,0,keda,,, +keda.apiserver_request_duration_seconds.count,count,,,,,0,keda,,, +keda.apiserver_request_duration_seconds.sum,count,,,,,0,keda,,, +keda.apiserver_request_filter_duration_seconds.bucket,count,,,,,0,keda,,, +keda.apiserver_request_filter_duration_seconds.count,count,,,,,0,keda,,, +keda.apiserver_request_filter_duration_seconds.sum,count,,,,,0,keda,,, +keda.apiserver_request_sli_duration_seconds.bucket,count,,,,,0,keda,,, +keda.apiserver_request_sli_duration_seconds.count,count,,,,,0,keda,,, +keda.apiserver_request_sli_duration_seconds.sum,count,,,,,0,keda,,, +keda.apiserver_request_slo_duration_seconds.bucket,count,,,,,0,keda,,, +keda.apiserver_request_slo_duration_seconds.count,count,,,,,0,keda,,, +keda.apiserver_request_slo_duration_seconds.sum,count,,,,,0,keda,,, +keda.apiserver_response_sizes.bucket,count,,,,,0,keda,,, +keda.apiserver_response_sizes.count,count,,,,,0,keda,,, +keda.apiserver_response_sizes.sum,count,,,,,0,keda,,, +keda.apiserver_storage_data_key_generation_duration_seconds.bucket,count,,,,,0,keda,,, +keda.apiserver_storage_data_key_generation_duration_seconds.count,count,,,,,0,keda,,, +keda.apiserver_storage_data_key_generation_duration_seconds.sum,count,,,,,0,keda,,, +keda.apiserver_storage_data_key_generation_failures.count,count,,,,,0,keda,,, +keda.apiserver_storage_envelope_transformation_cache_misses.count,count,,,,,0,keda,,, +keda.apiserver_tls_handshake_errors.count,count,,,,,0,keda,,, +keda.apiserver_webhooks_x509_insecure_sha1.count,count,,,,,0,keda,,, +keda.apiserver_webhooks_x509_missing_san.count,count,,,,,0,keda,,, +keda.authenticated_user_requests.count,count,,,,,0,keda,,, +keda.authentication_attempts.count,count,,,,,0,keda,,, +keda.authentication_duration_seconds.bucket,count,,,,,0,keda,,, +keda.authentication_duration_seconds.count,count,,,,,0,keda,,, +keda.authentication_duration_seconds.sum,count,,,,,0,keda,,, +keda.authorization_attempts.count,count,,,,,0,keda,,, +keda.authorization_duration_seconds.bucket,count,,,,,0,keda,,, +keda.authorization_duration_seconds.count,count,,,,,0,keda,,, +keda.authorization_duration_seconds.sum,count,,,,,0,keda,,, +keda.build_info,gauge,,,,,0,keda,,, +keda.cardinality_enforcement_unexpected_categorizations.count,count,,,,,0,keda,,, +keda.certwatcher.read_certificate.count,count,,,,,0,keda,,, +keda.certwatcher.read_certificate_errors.count,count,,,,,0,keda,,, +keda.controller.runtime.active_workers,gauge,,,,,0,keda,,, +keda.controller.runtime.max_concurrent_reconciles,gauge,,,,,0,keda,,, +keda.controller.runtime.reconcile.count,count,,,,,0,keda,,, +keda.controller.runtime.reconcile_errors.count,count,,,,,0,keda,,, +keda.controller.runtime.reconcile_panics.count,count,,,,,0,keda,,, +keda.controller.runtime.reconcile_time.seconds.bucket,count,,,,,0,keda,,, +keda.controller.runtime.reconcile_time.seconds.count,count,,,,,0,keda,,, +keda.controller.runtime.reconcile_time.seconds.sum,count,,,,,0,keda,,, +keda.controller.runtime.terminal_reconcile_errors.count,count,,,,,0,keda,,, +keda.controller.runtime.webhook_panics.count,count,,,,,0,keda,,, +keda.controller.runtime.webhook_requests.count,count,,,,,0,keda,,, +keda.controller.runtime.webhook_requests_in_flight,gauge,,,,,0,keda,,, +keda.disabled_metrics.count,count,,,,,0,keda,,, +keda.field_validation_request_duration_seconds.bucket,count,,,,,0,keda,,, +keda.field_validation_request_duration_seconds.count,count,,,,,0,keda,,, +keda.field_validation_request_duration_seconds.sum,count,,,,,0,keda,,, +keda.go.gc.duration.seconds.count,count,,,,,0,keda,,, +keda.go.gc.duration.seconds.quantile,gauge,,,,,0,keda,,, +keda.go.gc.duration.seconds.sum,count,,,,,0,keda,,, +keda.go.goroutines,gauge,,,,,0,keda,,, +keda.go.info,gauge,,,,,0,keda,,, +keda.go.memstats.alloc_bytes,gauge,,,,,0,keda,,, +keda.go.memstats.alloc_bytes.count,count,,,,,0,keda,,, +keda.go.memstats.buck_hash.sys_bytes,gauge,,,,,0,keda,,, +keda.go.memstats.frees.count,count,,,,,0,keda,,, +keda.go.memstats.gc.sys_bytes,gauge,,,,,0,keda,,, +keda.go.memstats.heap.alloc_bytes,gauge,,,,,0,keda,,, +keda.go.memstats.heap.idle_bytes,gauge,,,,,0,keda,,, +keda.go.memstats.heap.inuse_bytes,gauge,,,,,0,keda,,, +keda.go.memstats.heap.objects,gauge,,,,,0,keda,,, +keda.go.memstats.heap.released_bytes,gauge,,,,,0,keda,,, +keda.go.memstats.heap.sys_bytes,gauge,,,,,0,keda,,, +keda.go.memstats.lookups.count,count,,,,,0,keda,,, +keda.go.memstats.mallocs.count,count,,,,,0,keda,,, +keda.go.memstats.mcache.inuse_bytes,gauge,,,,,0,keda,,, +keda.go.memstats.mcache.sys_bytes,gauge,,,,,0,keda,,, +keda.go.memstats.mspan.inuse_bytes,gauge,,,,,0,keda,,, +keda.go.memstats.mspan.sys_bytes,gauge,,,,,0,keda,,, +keda.go.memstats.next.gc_bytes,gauge,,,,,0,keda,,, +keda.go.memstats.other.sys_bytes,gauge,,,,,0,keda,,, +keda.go.memstats.stack.inuse_bytes,gauge,,,,,0,keda,,, +keda.go.memstats.stack.sys_bytes,gauge,,,,,0,keda,,, +keda.go.memstats.sys_bytes,gauge,,,,,0,keda,,, +keda.go.memstats.time_since_last_gc.seconds,gauge,,,,,0,keda,,, +keda.go.threads,gauge,,,,,0,keda,,, +keda.hidden_metrics.count,count,,,,,0,keda,,, +keda.internal_scale.loop_latency,gauge,,,,,0,keda,,, +keda.internal_scale.loop_latency_seconds,gauge,,,,,0,keda,,, +keda.leader_election.master_status,gauge,,,,,0,keda,,, +keda.process.cpu.seconds.count,count,,,,,0,keda,,, +keda.process.max_fds,gauge,,,,,0,keda,,, +keda.process.open_fds,gauge,,,,,0,keda,,, +keda.process.resident_memory.bytes,gauge,,,,,0,keda,,, +keda.process.uptime.seconds,gauge,,,,,0,keda,,, +keda.process.virtual_memory.bytes,gauge,,,,,0,keda,,, +keda.process.virtual_memory.max_bytes,gauge,,,,,0,keda,,, +keda.registered_metrics.count,count,,,,,0,keda,,, +keda.resource_totals,gauge,,,,,0,keda,,, +keda.rest.client.requests.count,count,,,,,0,keda,,, +keda.scaled_job.errors.count,count,,,,,0,keda,,, +keda.scaler.active,gauge,,,,,0,keda,,, +keda.scaler.detail_errors.count,count,,,,,0,keda,,, +keda.scaler.errors.count,count,,,,,0,keda,,, +keda.scaler.metrics_latency,gauge,,,,,0,keda,,, +keda.scaler.metrics_latency_seconds,gauge,,,,,0,keda,,, +keda.scaler.metrics_value,gauge,,,,,0,keda,,, +keda.trigger_totals,gauge,,,,,0,keda,,, +keda.workqueue.adds.count,count,,,,,0,keda,,, +keda.workqueue.depth,gauge,,,,,0,keda,,, +keda.workqueue.longest.running_processor.seconds,gauge,,,,,0,keda,,, +keda.workqueue.queue.duration.seconds.bucket,count,,,,,0,keda,,, +keda.workqueue.queue.duration.seconds.count,count,,,,,0,keda,,, +keda.workqueue.queue.duration.seconds.sum,count,,,,,0,keda,,, +keda.workqueue.retries.count,count,,,,,0,keda,,, +keda.workqueue.unfinished_work.seconds,gauge,,,,,0,keda,,, +keda.workqueue.work.duration.seconds.bucket,count,,,,,0,keda,,, +keda.workqueue.work.duration.seconds.count,count,,,,,0,keda,,, +keda.workqueue.work.duration.seconds.sum,count,,,,,0,keda,,, diff --git a/keda/pyproject.toml b/keda/pyproject.toml new file mode 100644 index 0000000000000..52d46b5adcf56 --- /dev/null +++ b/keda/pyproject.toml @@ -0,0 +1,60 @@ +[build-system] +requires = [ + "hatchling>=0.13.0", +] +build-backend = "hatchling.build" + +[project] +name = "datadog-keda" +description = "The Keda check" +readme = "README.md" +license = "BSD-3-Clause" +requires-python = ">=3.12" +keywords = [ + "datadog", + "datadog agent", + "datadog check", + "keda", +] +authors = [ + { name = "Datadog", email = "packages@datadoghq.com" }, +] +classifiers = [ + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "Intended Audience :: System Administrators", + "License :: OSI Approved :: BSD License", + "Private :: Do Not Upload", + "Programming Language :: Python :: 3.12", + "Topic :: System :: Monitoring", +] +dependencies = [ + "datadog-checks-base>=37.0.0", +] +dynamic = [ + "version", +] + +[project.optional-dependencies] +deps = [] + +[project.urls] +Source = "https://github.com/DataDog/integrations-core" + +[tool.hatch.version] +path = "datadog_checks/keda/__about__.py" + +[tool.hatch.build.targets.sdist] +include = [ + "/datadog_checks", + "/tests", + "/manifest.json", +] + +[tool.hatch.build.targets.wheel] +include = [ + "/datadog_checks/keda", +] +dev-mode-dirs = [ + ".", +] diff --git a/keda/tests/__init__.py b/keda/tests/__init__.py new file mode 100644 index 0000000000000..9103122bf028d --- /dev/null +++ b/keda/tests/__init__.py @@ -0,0 +1,3 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) diff --git a/keda/tests/common.py b/keda/tests/common.py new file mode 100644 index 0000000000000..2dd1d9b669da2 --- /dev/null +++ b/keda/tests/common.py @@ -0,0 +1,157 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +import os + +from datadog_checks.dev import get_docker_hostname, get_here + +HERE = get_here() +HOST = get_docker_hostname() +PORT = 8080 + + +def get_fixture_path(filename): + return os.path.join(HERE, 'fixtures', filename) + + +MOCKED_INSTANCE = { + "openmetrics_endpoint": f"http://{HOST}:{PORT}/metrics", + 'tags': ['test:tag'], +} + +COMPOSE_FILE = os.path.join(HERE, 'docker', 'docker-compose.yaml') + +TEST_METRICS = [ + 'keda.aggregator_discovery_aggregation.count', + 'keda.apiserver_audit_event.count', + 'keda.apiserver_audit_requests_rejected.count', + 'keda.apiserver_client_certificate_expiration_seconds.bucket', + 'keda.apiserver_client_certificate_expiration_seconds.count', + 'keda.apiserver_client_certificate_expiration_seconds.sum', + 'keda.apiserver_current_inflight_requests', + 'keda.apiserver_delegated_authz_request.count', + 'keda.apiserver_delegated_authz_request_duration_seconds.bucket', + 'keda.apiserver_delegated_authz_request_duration_seconds.count', + 'keda.apiserver_delegated_authz_request_duration_seconds.sum', + 'keda.apiserver_envelope_encryption_dek_cache_fill_percent', + 'keda.apiserver_flowcontrol_read_vs_write_current_requests.bucket', + 'keda.apiserver_flowcontrol_read_vs_write_current_requests.count', + 'keda.apiserver_flowcontrol_read_vs_write_current_requests.sum', + 'keda.apiserver_flowcontrol_seat_fair_frac', + 'keda.apiserver_request.count', + 'keda.apiserver_request_duration_seconds.bucket', + 'keda.apiserver_request_duration_seconds.count', + 'keda.apiserver_request_duration_seconds.sum', + 'keda.apiserver_request_filter_duration_seconds.bucket', + 'keda.apiserver_request_filter_duration_seconds.count', + 'keda.apiserver_request_filter_duration_seconds.sum', + 'keda.apiserver_request_sli_duration_seconds.bucket', + 'keda.apiserver_request_sli_duration_seconds.count', + 'keda.apiserver_request_sli_duration_seconds.sum', + 'keda.apiserver_request_slo_duration_seconds.bucket', + 'keda.apiserver_request_slo_duration_seconds.count', + 'keda.apiserver_request_slo_duration_seconds.sum', + 'keda.apiserver_response_sizes.bucket', + 'keda.apiserver_response_sizes.count', + 'keda.apiserver_response_sizes.sum', + 'keda.apiserver_storage_data_key_generation_duration_seconds.bucket', + 'keda.apiserver_storage_data_key_generation_duration_seconds.count', + 'keda.apiserver_storage_data_key_generation_duration_seconds.sum', + 'keda.apiserver_storage_data_key_generation_failures.count', + 'keda.apiserver_storage_envelope_transformation_cache_misses.count', + 'keda.apiserver_tls_handshake_errors.count', + 'keda.apiserver_webhooks_x509_insecure_sha1.count', + 'keda.apiserver_webhooks_x509_missing_san.count', + 'keda.authenticated_user_requests.count', + 'keda.authentication_attempts.count', + 'keda.authentication_duration_seconds.bucket', + 'keda.authentication_duration_seconds.count', + 'keda.authentication_duration_seconds.sum', + 'keda.authorization_attempts.count', + 'keda.authorization_duration_seconds.bucket', + 'keda.authorization_duration_seconds.count', + 'keda.authorization_duration_seconds.sum', + 'keda.build_info', + 'keda.cardinality_enforcement_unexpected_categorizations.count', + 'keda.certwatcher.read_certificate.count', + 'keda.certwatcher.read_certificate_errors.count', + 'keda.controller.runtime.active_workers', + 'keda.controller.runtime.max_concurrent_reconciles', + 'keda.controller.runtime.reconcile.count', + 'keda.controller.runtime.reconcile_errors.count', + 'keda.controller.runtime.reconcile_panics.count', + 'keda.controller.runtime.reconcile_time.seconds.bucket', + 'keda.controller.runtime.reconcile_time.seconds.count', + 'keda.controller.runtime.reconcile_time.seconds.sum', + 'keda.controller.runtime.terminal_reconcile_errors.count', + 'keda.controller.runtime.webhook_panics.count', + 'keda.controller.runtime.webhook_requests.count', + 'keda.controller.runtime.webhook_requests_in_flight', + 'keda.disabled_metrics.count', + 'keda.field_validation_request_duration_seconds.bucket', + 'keda.field_validation_request_duration_seconds.count', + 'keda.field_validation_request_duration_seconds.sum', + 'keda.go.gc.duration.seconds.count', + 'keda.go.gc.duration.seconds.quantile', + 'keda.go.gc.duration.seconds.sum', + 'keda.go.goroutines', + 'keda.go.info', + 'keda.go.memstats.alloc_bytes', + 'keda.go.memstats.alloc_bytes.count', + 'keda.go.memstats.buck_hash.sys_bytes', + 'keda.go.memstats.frees.count', + 'keda.go.memstats.gc.sys_bytes', + 'keda.go.memstats.heap.alloc_bytes', + 'keda.go.memstats.heap.idle_bytes', + 'keda.go.memstats.heap.inuse_bytes', + 'keda.go.memstats.heap.objects', + 'keda.go.memstats.heap.released_bytes', + 'keda.go.memstats.heap.sys_bytes', + 'keda.go.memstats.time_since_last_gc.seconds', + 'keda.go.memstats.lookups.count', + 'keda.go.memstats.mallocs.count', + 'keda.go.memstats.mcache.inuse_bytes', + 'keda.go.memstats.mcache.sys_bytes', + 'keda.go.memstats.mspan.inuse_bytes', + 'keda.go.memstats.mspan.sys_bytes', + 'keda.go.memstats.next.gc_bytes', + 'keda.go.memstats.other.sys_bytes', + 'keda.go.memstats.stack.inuse_bytes', + 'keda.go.memstats.stack.sys_bytes', + 'keda.go.memstats.sys_bytes', + 'keda.go.threads', + 'keda.hidden_metrics.count', + 'keda.internal_scale.loop_latency', + 'keda.internal_scale.loop_latency_seconds', + 'keda.leader_election.master_status', + 'keda.process.cpu.seconds.count', + 'keda.process.max_fds', + 'keda.process.open_fds', + 'keda.process.resident_memory.bytes', + 'keda.process.uptime.seconds', + 'keda.process.virtual_memory.bytes', + 'keda.process.virtual_memory.max_bytes', + 'keda.registered_metrics.count', + 'keda.resource_totals', + 'keda.rest.client.requests.count', + 'keda.scaled_job.errors.count', + 'keda.scaler.active', + 'keda.scaler.detail_errors.count', + 'keda.scaler.errors.count', + 'keda.scaler.metrics_latency', + 'keda.scaler.metrics_latency_seconds', + 'keda.scaler.metrics_value', + 'keda.trigger_totals', + 'keda.workqueue.adds.count', + 'keda.workqueue.depth', + 'keda.workqueue.longest.running_processor.seconds', + 'keda.workqueue.queue.duration.seconds.bucket', + 'keda.workqueue.queue.duration.seconds.count', + 'keda.workqueue.queue.duration.seconds.sum', + 'keda.workqueue.retries.count', + 'keda.workqueue.unfinished_work.seconds', + 'keda.workqueue.work.duration.seconds.bucket', + 'keda.workqueue.work.duration.seconds.count', + 'keda.workqueue.work.duration.seconds.sum', +] diff --git a/keda/tests/conftest.py b/keda/tests/conftest.py new file mode 100644 index 0000000000000..44f8cb07eb0d4 --- /dev/null +++ b/keda/tests/conftest.py @@ -0,0 +1,41 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +import copy +import os +from contextlib import ExitStack + +import pytest + +from datadog_checks.dev.kind import kind_run +from datadog_checks.dev.kube_port_forward import port_forward +from datadog_checks.dev.subprocess import run_command + +from . import common + +HERE = common.HERE + + +def setup_ked(): + run_command(['kubectl', 'create', 'ns', 'keda']) + run_command(['kubectl', 'apply', '-f', os.path.join(HERE, 'kind', 'keda_install.yaml')]) + + # Tries to ensure that the Kubernetes resources are deployed and ready before we do anything else + run_command(['kubectl', 'rollout', 'status', 'deployment/keda-operator-metrics-apiserver', '-n', 'keda']) + run_command(['kubectl', 'wait', 'pods', '--all', '-n', 'keda', '--for=condition=Ready', '--timeout=600s']) + + +@pytest.fixture(scope='session') +def dd_environment(): + with kind_run(conditions=[setup_ked], sleep=30) as kubeconfig, ExitStack() as stack: + keda_host, keda_port = stack.enter_context( + port_forward(kubeconfig, 'keda', 8080, 'deployment', 'keda-operator-metrics-apiserver') + ) + instances = [{'openmetrics_endpoint': f'http://{keda_host}:{keda_port}/metrics'}] + + yield {'instances': instances} + + +@pytest.fixture +def instance(): + return copy.deepcopy(common.MOCKED_INSTANCE) diff --git a/keda/tests/fixtures/keda_metrics.txt b/keda/tests/fixtures/keda_metrics.txt new file mode 100644 index 0000000000000..878f5966fc2d0 --- /dev/null +++ b/keda/tests/fixtures/keda_metrics.txt @@ -0,0 +1,1162 @@ +# HELP certwatcher_read_certificate_errors_total Total number of certificate read errors +# TYPE certwatcher_read_certificate_errors_total counter +certwatcher_read_certificate_errors_total 0 +# HELP controller_runtime_webhook_panics_total Total number of webhook panics +# TYPE controller_runtime_webhook_panics_total counter +controller_runtime_webhook_panics_total 0 +# HELP controller_runtime_webhook_requests_in_flight Current number of admission requests being served. +# TYPE controller_runtime_webhook_requests_in_flight gauge +controller_runtime_webhook_requests_in_flight{webhook="/validate-eventing-keda-sh-v1alpha1-cloudeventsource"} 0 +controller_runtime_webhook_requests_in_flight{webhook="/validate-eventing-keda-sh-v1alpha1-clustercloudeventsource"} 0 +controller_runtime_webhook_requests_in_flight{webhook="/validate-keda-sh-v1alpha1-clustertriggerauthentication"} 0 +controller_runtime_webhook_requests_in_flight{webhook="/validate-keda-sh-v1alpha1-scaledjob"} 0 +controller_runtime_webhook_requests_in_flight{webhook="/validate-keda-sh-v1alpha1-scaledobject"} 0 +controller_runtime_webhook_requests_in_flight{webhook="/validate-keda-sh-v1alpha1-triggerauthentication"} 0 +# HELP controller_runtime_webhook_requests_total Total number of admission requests by HTTP status code. +# TYPE controller_runtime_webhook_requests_total counter +controller_runtime_webhook_requests_total{code="200",webhook="/validate-eventing-keda-sh-v1alpha1-cloudeventsource"} 0 +controller_runtime_webhook_requests_total{code="200",webhook="/validate-eventing-keda-sh-v1alpha1-clustercloudeventsource"} 0 +controller_runtime_webhook_requests_total{code="200",webhook="/validate-keda-sh-v1alpha1-clustertriggerauthentication"} 0 +controller_runtime_webhook_requests_total{code="200",webhook="/validate-keda-sh-v1alpha1-scaledjob"} 0 +controller_runtime_webhook_requests_total{code="200",webhook="/validate-keda-sh-v1alpha1-scaledobject"} 0 +controller_runtime_webhook_requests_total{code="200",webhook="/validate-keda-sh-v1alpha1-triggerauthentication"} 0 +controller_runtime_webhook_requests_total{code="500",webhook="/validate-eventing-keda-sh-v1alpha1-cloudeventsource"} 0 +controller_runtime_webhook_requests_total{code="500",webhook="/validate-eventing-keda-sh-v1alpha1-clustercloudeventsource"} 0 +controller_runtime_webhook_requests_total{code="500",webhook="/validate-keda-sh-v1alpha1-clustertriggerauthentication"} 0 +controller_runtime_webhook_requests_total{code="500",webhook="/validate-keda-sh-v1alpha1-scaledjob"} 0 +controller_runtime_webhook_requests_total{code="500",webhook="/validate-keda-sh-v1alpha1-scaledobject"} 0 +controller_runtime_webhook_requests_total{code="500",webhook="/validate-keda-sh-v1alpha1-triggerauthentication"} 0 +# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{quantile="0"} 8.042e-06 +go_gc_duration_seconds{quantile="0.25"} 8.042e-06 +go_gc_duration_seconds{quantile="0.5"} 0.000170208 +go_gc_duration_seconds{quantile="0.75"} 0.000170208 +go_gc_duration_seconds{quantile="1"} 0.000170208 +go_gc_duration_seconds_sum 0.00017825 +go_gc_duration_seconds_count 2 +# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. +# TYPE go_memstats_alloc_bytes gauge +go_memstats_alloc_bytes 3.223464e+06 +# HELP go_memstats_frees_total Total number of frees. +# TYPE go_memstats_frees_total counter +go_memstats_frees_total 22526 +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. +# TYPE go_memstats_heap_idle_bytes gauge +go_memstats_heap_idle_bytes 2.736128e+06 +# HELP go_memstats_heap_objects Number of allocated objects. +# TYPE go_memstats_heap_objects gauge +go_memstats_heap_objects 12431 +# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE go_memstats_last_gc_time_seconds gauge +go_memstats_last_gc_time_seconds 1.7326008802889886e+09 +# HELP go_memstats_mallocs_total Total number of mallocs. +# TYPE go_memstats_mallocs_total counter +go_memstats_mallocs_total 34957 +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. +# TYPE go_memstats_mspan_inuse_bytes gauge +go_memstats_mspan_inuse_bytes 75040 +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. +# TYPE go_memstats_mspan_sys_bytes gauge +go_memstats_mspan_sys_bytes 81600 +# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. +# TYPE go_memstats_stack_inuse_bytes gauge +go_memstats_stack_inuse_bytes 557056 +# HELP process_max_fds Maximum number of open file descriptors. +# TYPE process_max_fds gauge +process_max_fds 1.048576e+06 +# HELP process_resident_memory_bytes Resident memory size in bytes. +# TYPE process_resident_memory_bytes gauge +process_resident_memory_bytes 3.7474304e+07 +# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes. +# TYPE process_virtual_memory_max_bytes gauge +process_virtual_memory_max_bytes 1.8446744073709552e+19 +# HELP aggregator_discovery_aggregation_count_total [ALPHA] Counter of number of times discovery was aggregated +# TYPE aggregator_discovery_aggregation_count_total counter +aggregator_discovery_aggregation_count_total 1 +# HELP apiserver_audit_event_total [ALPHA] Counter of audit events generated and sent to the audit backend. +# TYPE apiserver_audit_event_total counter +apiserver_audit_event_total 0 +# HELP apiserver_audit_requests_rejected_total [ALPHA] Counter of apiserver requests rejected due to an error in audit logging backend. +# TYPE apiserver_audit_requests_rejected_total counter +apiserver_audit_requests_rejected_total 0 +# HELP apiserver_client_certificate_expiration_seconds [ALPHA] Distribution of the remaining lifetime on the certificate used to authenticate a request. +# TYPE apiserver_client_certificate_expiration_seconds histogram +apiserver_client_certificate_expiration_seconds_bucket{le="0"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="1800"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="3600"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="7200"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="21600"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="43200"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="86400"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="172800"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="345600"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="604800"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="2.592e+06"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="7.776e+06"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="1.5552e+07"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="3.1104e+07"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="+Inf"} 0 +apiserver_client_certificate_expiration_seconds_sum 0 +apiserver_client_certificate_expiration_seconds_count 0 +# HELP apiserver_current_inflight_requests [STABLE] Maximal number of currently used inflight request limit of this apiserver per request kind in last second. +# TYPE apiserver_current_inflight_requests gauge +apiserver_current_inflight_requests{request_kind="mutating"} 0 +apiserver_current_inflight_requests{request_kind="readOnly"} 0 +# HELP apiserver_delegated_authz_request_duration_seconds [ALPHA] Request latency in seconds. Broken down by status code. +# TYPE apiserver_delegated_authz_request_duration_seconds histogram +apiserver_delegated_authz_request_duration_seconds_bucket{code="201",le="0.25"} 12 +apiserver_delegated_authz_request_duration_seconds_bucket{code="201",le="0.5"} 12 +apiserver_delegated_authz_request_duration_seconds_bucket{code="201",le="0.7"} 12 +apiserver_delegated_authz_request_duration_seconds_bucket{code="201",le="1"} 12 +apiserver_delegated_authz_request_duration_seconds_bucket{code="201",le="1.5"} 12 +apiserver_delegated_authz_request_duration_seconds_bucket{code="201",le="3"} 12 +apiserver_delegated_authz_request_duration_seconds_bucket{code="201",le="5"} 12 +apiserver_delegated_authz_request_duration_seconds_bucket{code="201",le="10"} 12 +apiserver_delegated_authz_request_duration_seconds_bucket{code="201",le="+Inf"} 12 +apiserver_delegated_authz_request_duration_seconds_sum{code="201"} 0.045623252 +apiserver_delegated_authz_request_duration_seconds_count{code="201"} 12 +# HELP apiserver_delegated_authz_request_total [ALPHA] Number of HTTP requests partitioned by status code. +# TYPE apiserver_delegated_authz_request_total counter +apiserver_delegated_authz_request_total{code="201"} 12 +# HELP apiserver_envelope_encryption_dek_cache_fill_percent [ALPHA] Percent of the cache slots currently occupied by cached DEKs. +# TYPE apiserver_envelope_encryption_dek_cache_fill_percent gauge +apiserver_envelope_encryption_dek_cache_fill_percent 0 +# HELP apiserver_flowcontrol_read_vs_write_current_requests EXPERIMENTAL: [ALPHA] Observations, at the end of every nanosecond, of the number of requests (as a fraction of the relevant limit) waiting or in regular stage of execution +# TYPE apiserver_flowcontrol_read_vs_write_current_requests histogram +apiserver_flowcontrol_read_vs_write_current_requests_bucket{phase="executing",request_kind="readOnly",le="0"} 3.43803666325e+11 +apiserver_flowcontrol_read_vs_write_current_requests_bucket{phase="executing",request_kind="readOnly",le="0.001"} 3.43803666325e+11 +apiserver_flowcontrol_read_vs_write_current_requests_bucket{phase="executing",request_kind="readOnly",le="0.01"} 3.43861560783e+11 +apiserver_flowcontrol_read_vs_write_current_requests_bucket{phase="executing",request_kind="readOnly",le="0.1"} 3.43861560783e+11 +apiserver_flowcontrol_read_vs_write_current_requests_bucket{phase="executing",request_kind="readOnly",le="0.2"} 3.43861560783e+11 +apiserver_flowcontrol_read_vs_write_current_requests_bucket{phase="executing",request_kind="readOnly",le="0.3"} 3.43861560783e+11 +apiserver_flowcontrol_read_vs_write_current_requests_bucket{phase="executing",request_kind="readOnly",le="0.4"} 3.43861560783e+11 +apiserver_flowcontrol_read_vs_write_current_requests_bucket{phase="executing",request_kind="readOnly",le="0.5"} 3.43861560783e+11 +apiserver_flowcontrol_read_vs_write_current_requests_bucket{phase="executing",request_kind="readOnly",le="0.6"} 3.43861560783e+11 +apiserver_flowcontrol_read_vs_write_current_requests_bucket{phase="executing",request_kind="readOnly",le="0.7"} 3.43861560783e+11 +apiserver_flowcontrol_read_vs_write_current_requests_bucket{phase="executing",request_kind="readOnly",le="0.8"} 3.43861560783e+11 +apiserver_flowcontrol_read_vs_write_current_requests_bucket{phase="executing",request_kind="readOnly",le="0.9"} 3.43861560783e+11 +apiserver_flowcontrol_read_vs_write_current_requests_bucket{phase="executing",request_kind="readOnly",le="0.95"} 3.43861560783e+11 +apiserver_flowcontrol_read_vs_write_current_requests_bucket{phase="executing",request_kind="readOnly",le="0.99"} 3.43861560783e+11 +apiserver_flowcontrol_read_vs_write_current_requests_bucket{phase="executing",request_kind="readOnly",le="1"} 3.43861560783e+11 +apiserver_flowcontrol_read_vs_write_current_requests_bucket{phase="executing",request_kind="readOnly",le="+Inf"} 3.43861560783e+11 +apiserver_flowcontrol_read_vs_write_current_requests_sum{phase="executing",request_kind="readOnly"} 195506.455 +apiserver_flowcontrol_read_vs_write_current_requests_count{phase="executing",request_kind="readOnly"} 3.43861560783e+11 +# HELP apiserver_flowcontrol_seat_fair_frac [ALPHA] Fair fraction of server's concurrency to allocate to each priority level that can use it +# TYPE apiserver_flowcontrol_seat_fair_frac gauge +apiserver_flowcontrol_seat_fair_frac 0 +# HELP apiserver_request_duration_seconds [STABLE] Response latency distribution in seconds for each verb, dry run value, group, version, resource, subresource, scope and component. +# TYPE apiserver_request_duration_seconds histogram +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="0.005"} 34 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="0.025"} 34 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="0.05"} 34 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="0.1"} 34 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="0.2"} 34 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="0.4"} 34 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="0.6"} 34 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="0.8"} 34 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="1"} 34 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="1.25"} 34 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="1.5"} 34 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="2"} 34 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="3"} 34 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="4"} 34 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="5"} 34 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="6"} 34 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="8"} 34 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="10"} 34 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="15"} 34 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="20"} 34 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="30"} 34 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="45"} 34 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="60"} 34 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="+Inf"} 34 +apiserver_request_duration_seconds_sum{component="",dry_run="",group="",resource="",scope="",subresource="/healthz",verb="GET",version=""} 0.010738917999999998 +apiserver_request_duration_seconds_count{component="",dry_run="",group="",resource="",scope="",subresource="/healthz",verb="GET",version=""} 34 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="0.005"} 115 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="0.025"} 115 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="0.05"} 115 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="0.1"} 115 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="0.2"} 115 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="0.4"} 115 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="0.6"} 115 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="0.8"} 115 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="1"} 115 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="1.25"} 115 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="1.5"} 115 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="2"} 115 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="3"} 115 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="4"} 115 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="5"} 115 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="6"} 115 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="8"} 115 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="10"} 115 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="15"} 115 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="20"} 115 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="30"} 115 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="45"} 115 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="60"} 115 +apiserver_request_duration_seconds_bucket{component="",dry_run="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="+Inf"} 115 +apiserver_request_duration_seconds_sum{component="",dry_run="",group="",resource="",scope="",subresource="/readyz",verb="GET",version=""} 0.034279792 +apiserver_request_duration_seconds_count{component="",dry_run="",group="",resource="",scope="",subresource="/readyz",verb="GET",version=""} 115 +apiserver_request_duration_seconds_bucket{component="apiserver",dry_run="",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="0.005"} 7 +apiserver_request_duration_seconds_bucket{component="apiserver",dry_run="",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="0.025"} 7 +apiserver_request_duration_seconds_bucket{component="apiserver",dry_run="",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="0.05"} 7 +apiserver_request_duration_seconds_bucket{component="apiserver",dry_run="",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="0.1"} 7 +apiserver_request_duration_seconds_bucket{component="apiserver",dry_run="",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="0.2"} 7 +apiserver_request_duration_seconds_bucket{component="apiserver",dry_run="",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="0.4"} 7 +apiserver_request_duration_seconds_bucket{component="apiserver",dry_run="",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="0.6"} 7 +apiserver_request_duration_seconds_bucket{component="apiserver",dry_run="",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="0.8"} 7 +apiserver_request_duration_seconds_bucket{component="apiserver",dry_run="",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="1"} 7 +apiserver_request_duration_seconds_bucket{component="apiserver",dry_run="",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="1.25"} 7 +apiserver_request_duration_seconds_bucket{component="apiserver",dry_run="",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="1.5"} 7 +apiserver_request_duration_seconds_bucket{component="apiserver",dry_run="",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="2"} 7 +apiserver_request_duration_seconds_bucket{component="apiserver",dry_run="",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="3"} 7 +apiserver_request_duration_seconds_bucket{component="apiserver",dry_run="",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="4"} 7 +apiserver_request_duration_seconds_bucket{component="apiserver",dry_run="",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="5"} 7 +apiserver_request_duration_seconds_bucket{component="apiserver",dry_run="",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="6"} 7 +apiserver_request_duration_seconds_bucket{component="apiserver",dry_run="",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="8"} 7 +apiserver_request_duration_seconds_bucket{component="apiserver",dry_run="",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="10"} 7 +apiserver_request_duration_seconds_bucket{component="apiserver",dry_run="",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="15"} 7 +apiserver_request_duration_seconds_bucket{component="apiserver",dry_run="",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="20"} 7 +apiserver_request_duration_seconds_bucket{component="apiserver",dry_run="",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="30"} 7 +apiserver_request_duration_seconds_bucket{component="apiserver",dry_run="",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="45"} 7 +apiserver_request_duration_seconds_bucket{component="apiserver",dry_run="",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="60"} 7 +apiserver_request_duration_seconds_bucket{component="apiserver",dry_run="",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="+Inf"} 7 +apiserver_request_duration_seconds_sum{component="apiserver",dry_run="",group="",resource="",scope="",subresource="apis",verb="GET",version=""} 0.004998374999999999 +apiserver_request_duration_seconds_count{component="apiserver",dry_run="",group="",resource="",scope="",subresource="apis",verb="GET",version=""} 7 +# HELP apiserver_request_filter_duration_seconds [ALPHA] Request filter latency distribution in seconds, for each filter type +# TYPE apiserver_request_filter_duration_seconds histogram +apiserver_request_filter_duration_seconds_bucket{filter="audit",le="0.0001"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="audit",le="0.0003"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="audit",le="0.001"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="audit",le="0.003"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="audit",le="0.01"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="audit",le="0.03"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="audit",le="0.1"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="audit",le="0.3"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="audit",le="1"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="audit",le="5"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="audit",le="10"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="audit",le="15"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="audit",le="30"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="audit",le="+Inf"} 239 +apiserver_request_filter_duration_seconds_sum{filter="audit"} 0.0001087550000000001 +apiserver_request_filter_duration_seconds_count{filter="audit"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="authentication",le="0.0001"} 183 +apiserver_request_filter_duration_seconds_bucket{filter="authentication",le="0.0003"} 232 +apiserver_request_filter_duration_seconds_bucket{filter="authentication",le="0.001"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="authentication",le="0.003"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="authentication",le="0.01"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="authentication",le="0.03"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="authentication",le="0.1"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="authentication",le="0.3"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="authentication",le="1"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="authentication",le="5"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="authentication",le="10"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="authentication",le="15"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="authentication",le="30"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="authentication",le="+Inf"} 239 +apiserver_request_filter_duration_seconds_sum{filter="authentication"} 0.017057584000000008 +apiserver_request_filter_duration_seconds_count{filter="authentication"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="authorization",le="0.0001"} 227 +apiserver_request_filter_duration_seconds_bucket{filter="authorization",le="0.0003"} 227 +apiserver_request_filter_duration_seconds_bucket{filter="authorization",le="0.001"} 227 +apiserver_request_filter_duration_seconds_bucket{filter="authorization",le="0.003"} 232 +apiserver_request_filter_duration_seconds_bucket{filter="authorization",le="0.01"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="authorization",le="0.03"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="authorization",le="0.1"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="authorization",le="0.3"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="authorization",le="1"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="authorization",le="5"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="authorization",le="10"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="authorization",le="15"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="authorization",le="30"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="authorization",le="+Inf"} 239 +apiserver_request_filter_duration_seconds_sum{filter="authorization"} 0.04983261600000001 +apiserver_request_filter_duration_seconds_count{filter="authorization"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="impersonation",le="0.0001"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="impersonation",le="0.0003"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="impersonation",le="0.001"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="impersonation",le="0.003"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="impersonation",le="0.01"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="impersonation",le="0.03"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="impersonation",le="0.1"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="impersonation",le="0.3"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="impersonation",le="1"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="impersonation",le="5"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="impersonation",le="10"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="impersonation",le="15"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="impersonation",le="30"} 239 +apiserver_request_filter_duration_seconds_bucket{filter="impersonation",le="+Inf"} 239 +apiserver_request_filter_duration_seconds_sum{filter="impersonation"} 0.002865845 +apiserver_request_filter_duration_seconds_count{filter="impersonation"} 239 +# HELP apiserver_request_sli_duration_seconds [ALPHA] Response latency distribution (not counting webhook duration and priority & fairness queue wait times) in seconds for each verb, group, version, resource, subresource, scope and component. +# TYPE apiserver_request_sli_duration_seconds histogram +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="0.05"} 34 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="0.1"} 34 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="0.2"} 34 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="0.4"} 34 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="0.6"} 34 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="0.8"} 34 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="1"} 34 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="1.25"} 34 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="1.5"} 34 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="2"} 34 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="3"} 34 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="4"} 34 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="5"} 34 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="6"} 34 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="8"} 34 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="10"} 34 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="15"} 34 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="20"} 34 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="30"} 34 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="45"} 34 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="60"} 34 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="+Inf"} 34 +apiserver_request_sli_duration_seconds_sum{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version=""} 0.010738917999999998 +apiserver_request_sli_duration_seconds_count{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version=""} 34 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="0.05"} 115 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="0.1"} 115 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="0.2"} 115 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="0.4"} 115 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="0.6"} 115 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="0.8"} 115 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="1"} 115 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="1.25"} 115 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="1.5"} 115 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="2"} 115 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="3"} 115 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="4"} 115 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="5"} 115 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="6"} 115 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="8"} 115 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="10"} 115 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="15"} 115 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="20"} 115 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="30"} 115 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="45"} 115 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="60"} 115 +apiserver_request_sli_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="+Inf"} 115 +apiserver_request_sli_duration_seconds_sum{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version=""} 0.034279792 +apiserver_request_sli_duration_seconds_count{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version=""} 115 +apiserver_request_sli_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="0.05"} 7 +apiserver_request_sli_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="0.1"} 7 +apiserver_request_sli_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="0.2"} 7 +apiserver_request_sli_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="0.4"} 7 +apiserver_request_sli_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="0.6"} 7 +apiserver_request_sli_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="0.8"} 7 +apiserver_request_sli_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="1"} 7 +apiserver_request_sli_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="1.25"} 7 +apiserver_request_sli_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="1.5"} 7 +apiserver_request_sli_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="2"} 7 +apiserver_request_sli_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="3"} 7 +apiserver_request_sli_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="4"} 7 +apiserver_request_sli_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="5"} 7 +apiserver_request_sli_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="6"} 7 +apiserver_request_sli_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="8"} 7 +apiserver_request_sli_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="10"} 7 +apiserver_request_sli_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="15"} 7 +apiserver_request_sli_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="20"} 7 +apiserver_request_sli_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="30"} 7 +apiserver_request_sli_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="45"} 7 +apiserver_request_sli_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="60"} 7 +apiserver_request_sli_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="+Inf"} 7 +apiserver_request_sli_duration_seconds_sum{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version=""} 0.004998374999999999 +apiserver_request_sli_duration_seconds_count{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version=""} 7 +# HELP apiserver_request_slo_duration_seconds [ALPHA] Response latency distribution (not counting webhook duration and priority & fairness queue wait times) in seconds for each verb, group, version, resource, subresource, scope and component. +# TYPE apiserver_request_slo_duration_seconds histogram +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="0.05"} 34 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="0.1"} 34 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="0.2"} 34 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="0.4"} 34 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="0.6"} 34 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="0.8"} 34 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="1"} 34 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="1.25"} 34 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="1.5"} 34 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="2"} 34 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="3"} 34 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="4"} 34 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="5"} 34 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="6"} 34 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="8"} 34 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="10"} 34 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="15"} 34 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="20"} 34 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="30"} 34 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="45"} 34 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="60"} 34 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="+Inf"} 34 +apiserver_request_slo_duration_seconds_sum{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version=""} 0.010738917999999998 +apiserver_request_slo_duration_seconds_count{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version=""} 34 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="0.05"} 115 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="0.1"} 115 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="0.2"} 115 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="0.4"} 115 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="0.6"} 115 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="0.8"} 115 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="1"} 115 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="1.25"} 115 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="1.5"} 115 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="2"} 115 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="3"} 115 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="4"} 115 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="5"} 115 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="6"} 115 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="8"} 115 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="10"} 115 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="15"} 115 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="20"} 115 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="30"} 115 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="45"} 115 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="60"} 115 +apiserver_request_slo_duration_seconds_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="+Inf"} 115 +apiserver_request_slo_duration_seconds_sum{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version=""} 0.034279792 +apiserver_request_slo_duration_seconds_count{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version=""} 115 +apiserver_request_slo_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="0.05"} 7 +apiserver_request_slo_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="0.1"} 7 +apiserver_request_slo_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="0.2"} 7 +apiserver_request_slo_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="0.4"} 7 +apiserver_request_slo_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="0.6"} 7 +apiserver_request_slo_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="0.8"} 7 +apiserver_request_slo_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="1"} 7 +apiserver_request_slo_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="1.25"} 7 +apiserver_request_slo_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="1.5"} 7 +apiserver_request_slo_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="2"} 7 +apiserver_request_slo_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="3"} 7 +apiserver_request_slo_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="4"} 7 +apiserver_request_slo_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="5"} 7 +apiserver_request_slo_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="6"} 7 +apiserver_request_slo_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="8"} 7 +apiserver_request_slo_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="10"} 7 +apiserver_request_slo_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="15"} 7 +apiserver_request_slo_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="20"} 7 +apiserver_request_slo_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="30"} 7 +apiserver_request_slo_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="45"} 7 +apiserver_request_slo_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="60"} 7 +apiserver_request_slo_duration_seconds_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="+Inf"} 7 +apiserver_request_slo_duration_seconds_sum{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version=""} 0.004998374999999999 +apiserver_request_slo_duration_seconds_count{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version=""} 7 +# HELP apiserver_request_total [STABLE] Counter of apiserver requests broken out for each verb, dry run value, group, version, resource, scope, component, and HTTP response code. +# TYPE apiserver_request_total counter +apiserver_request_total{code="200",component="",dry_run="",group="",resource="",scope="",subresource="/healthz",verb="GET",version=""} 34 +apiserver_request_total{code="200",component="",dry_run="",group="",resource="",scope="",subresource="/readyz",verb="GET",version=""} 115 +apiserver_request_total{code="304",component="apiserver",dry_run="",group="",resource="",scope="",subresource="apis",verb="GET",version=""} 7 +# HELP apiserver_response_sizes [STABLE] Response size distribution in bytes for each group, version, verb, resource, subresource, scope and component. +# TYPE apiserver_response_sizes histogram +apiserver_response_sizes_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="1000"} 34 +apiserver_response_sizes_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="10000"} 34 +apiserver_response_sizes_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="100000"} 34 +apiserver_response_sizes_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="1e+06"} 34 +apiserver_response_sizes_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="1e+07"} 34 +apiserver_response_sizes_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="1e+08"} 34 +apiserver_response_sizes_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="1e+09"} 34 +apiserver_response_sizes_bucket{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version="",le="+Inf"} 34 +apiserver_response_sizes_sum{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version=""} 68 +apiserver_response_sizes_count{component="",group="",resource="",scope="",subresource="/healthz",verb="GET",version=""} 34 +apiserver_response_sizes_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="1000"} 115 +apiserver_response_sizes_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="10000"} 115 +apiserver_response_sizes_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="100000"} 115 +apiserver_response_sizes_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="1e+06"} 115 +apiserver_response_sizes_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="1e+07"} 115 +apiserver_response_sizes_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="1e+08"} 115 +apiserver_response_sizes_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="1e+09"} 115 +apiserver_response_sizes_bucket{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version="",le="+Inf"} 115 +apiserver_response_sizes_sum{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version=""} 230 +apiserver_response_sizes_count{component="",group="",resource="",scope="",subresource="/readyz",verb="GET",version=""} 115 +apiserver_response_sizes_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="1000"} 7 +apiserver_response_sizes_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="10000"} 7 +apiserver_response_sizes_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="100000"} 7 +apiserver_response_sizes_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="1e+06"} 7 +apiserver_response_sizes_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="1e+07"} 7 +apiserver_response_sizes_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="1e+08"} 7 +apiserver_response_sizes_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="1e+09"} 7 +apiserver_response_sizes_bucket{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version="",le="+Inf"} 7 +apiserver_response_sizes_sum{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version=""} 0 +apiserver_response_sizes_count{component="apiserver",group="",resource="",scope="",subresource="apis",verb="GET",version=""} 7 +# HELP apiserver_storage_data_key_generation_duration_seconds [ALPHA] Latencies in seconds of data encryption key(DEK) generation operations. +# TYPE apiserver_storage_data_key_generation_duration_seconds histogram +apiserver_storage_data_key_generation_duration_seconds_bucket{le="5e-06"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="1e-05"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="2e-05"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="4e-05"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="8e-05"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="0.00016"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="0.00032"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="0.00064"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="0.00128"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="0.00256"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="0.00512"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="0.01024"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="0.02048"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="0.04096"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="+Inf"} 0 +apiserver_storage_data_key_generation_duration_seconds_sum 0 +apiserver_storage_data_key_generation_duration_seconds_count 0 +# HELP apiserver_storage_data_key_generation_failures_total [ALPHA] Total number of failed data encryption key(DEK) generation operations. +# TYPE apiserver_storage_data_key_generation_failures_total counter +apiserver_storage_data_key_generation_failures_total 0 +# HELP apiserver_storage_envelope_transformation_cache_misses_total [ALPHA] Total number of cache misses while accessing key decryption key(KEK). +# TYPE apiserver_storage_envelope_transformation_cache_misses_total counter +apiserver_storage_envelope_transformation_cache_misses_total 0 +# HELP apiserver_tls_handshake_errors_total [ALPHA] Number of requests dropped with 'TLS handshake error from' error +# TYPE apiserver_tls_handshake_errors_total counter +apiserver_tls_handshake_errors_total 2 +# HELP apiserver_webhooks_x509_insecure_sha1_total [ALPHA] Counts the number of requests to servers with insecure SHA1 signatures in their serving certificate OR the number of connection failures due to the insecure SHA1 signatures (either/or, based on the runtime environment) +# TYPE apiserver_webhooks_x509_insecure_sha1_total counter +apiserver_webhooks_x509_insecure_sha1_total 0 +# HELP apiserver_webhooks_x509_missing_san_total [ALPHA] Counts the number of requests to servers missing SAN extension in their serving certificate OR the number of connection failures due to the lack of x509 certificate SAN extension missing (either/or, based on the runtime environment) +# TYPE apiserver_webhooks_x509_missing_san_total counter +apiserver_webhooks_x509_missing_san_total 0 +# HELP authenticated_user_requests [ALPHA] Counter of authenticated requests broken out by username. +# TYPE authenticated_user_requests counter +authenticated_user_requests{username="other"} 239 +# HELP authentication_attempts [ALPHA] Counter of authenticated attempts. +# TYPE authentication_attempts counter +authentication_attempts{result="success"} 239 +# HELP authentication_duration_seconds [ALPHA] Authentication duration in seconds broken out by result. +# TYPE authentication_duration_seconds histogram +authentication_duration_seconds_bucket{result="success",le="0.001"} 239 +authentication_duration_seconds_bucket{result="success",le="0.002"} 239 +authentication_duration_seconds_bucket{result="success",le="0.004"} 239 +authentication_duration_seconds_bucket{result="success",le="0.008"} 239 +authentication_duration_seconds_bucket{result="success",le="0.016"} 239 +authentication_duration_seconds_bucket{result="success",le="0.032"} 239 +authentication_duration_seconds_bucket{result="success",le="0.064"} 239 +authentication_duration_seconds_bucket{result="success",le="0.128"} 239 +authentication_duration_seconds_bucket{result="success",le="0.256"} 239 +authentication_duration_seconds_bucket{result="success",le="0.512"} 239 +authentication_duration_seconds_bucket{result="success",le="1.024"} 239 +authentication_duration_seconds_bucket{result="success",le="2.048"} 239 +authentication_duration_seconds_bucket{result="success",le="4.096"} 239 +authentication_duration_seconds_bucket{result="success",le="8.192"} 239 +authentication_duration_seconds_bucket{result="success",le="16.384"} 239 +authentication_duration_seconds_bucket{result="success",le="+Inf"} 239 +authentication_duration_seconds_sum{result="success"} 0.013010210000000003 +authentication_duration_seconds_count{result="success"} 239 +# HELP authorization_attempts_total [ALPHA] Counter of authorization attempts broken down by result. It can be either 'allowed', 'denied', 'no-opinion' or 'error'. +# TYPE authorization_attempts_total counter +authorization_attempts_total{result="allowed"} 239 +# HELP authorization_duration_seconds [ALPHA] Authorization duration in seconds broken out by result. +# TYPE authorization_duration_seconds histogram +authorization_duration_seconds_bucket{result="allowed",le="0.001"} 227 +authorization_duration_seconds_bucket{result="allowed",le="0.002"} 228 +authorization_duration_seconds_bucket{result="allowed",le="0.004"} 235 +authorization_duration_seconds_bucket{result="allowed",le="0.008"} 239 +authorization_duration_seconds_bucket{result="allowed",le="0.016"} 239 +authorization_duration_seconds_bucket{result="allowed",le="0.032"} 239 +authorization_duration_seconds_bucket{result="allowed",le="0.064"} 239 +authorization_duration_seconds_bucket{result="allowed",le="0.128"} 239 +authorization_duration_seconds_bucket{result="allowed",le="0.256"} 239 +authorization_duration_seconds_bucket{result="allowed",le="0.512"} 239 +authorization_duration_seconds_bucket{result="allowed",le="1.024"} 239 +authorization_duration_seconds_bucket{result="allowed",le="2.048"} 239 +authorization_duration_seconds_bucket{result="allowed",le="4.096"} 239 +authorization_duration_seconds_bucket{result="allowed",le="8.192"} 239 +authorization_duration_seconds_bucket{result="allowed",le="16.384"} 239 +authorization_duration_seconds_bucket{result="allowed",le="+Inf"} 239 +authorization_duration_seconds_sum{result="allowed"} 0.048879254000000004 +authorization_duration_seconds_count{result="allowed"} 239 +# HELP cardinality_enforcement_unexpected_categorizations_total [ALPHA] The count of unexpected categorizations during cardinality enforcement. +# TYPE cardinality_enforcement_unexpected_categorizations_total counter +cardinality_enforcement_unexpected_categorizations_total 0 +# HELP disabled_metrics_total [BETA] The count of disabled metrics. +# TYPE disabled_metrics_total counter +disabled_metrics_total 0 +# HELP field_validation_request_duration_seconds [ALPHA] Response latency distribution in seconds for each field validation value +# TYPE field_validation_request_duration_seconds histogram +field_validation_request_duration_seconds_bucket{field_validation="",le="0.05"} 0 +field_validation_request_duration_seconds_bucket{field_validation="",le="0.1"} 0 +field_validation_request_duration_seconds_bucket{field_validation="",le="0.2"} 0 +field_validation_request_duration_seconds_bucket{field_validation="",le="0.4"} 0 +field_validation_request_duration_seconds_bucket{field_validation="",le="0.6"} 0 +field_validation_request_duration_seconds_bucket{field_validation="",le="0.8"} 0 +field_validation_request_duration_seconds_bucket{field_validation="",le="1"} 0 +field_validation_request_duration_seconds_bucket{field_validation="",le="1.25"} 0 +field_validation_request_duration_seconds_bucket{field_validation="",le="1.5"} 0 +field_validation_request_duration_seconds_bucket{field_validation="",le="2"} 0 +field_validation_request_duration_seconds_bucket{field_validation="",le="3"} 0 +field_validation_request_duration_seconds_bucket{field_validation="",le="4"} 0 +field_validation_request_duration_seconds_bucket{field_validation="",le="5"} 0 +field_validation_request_duration_seconds_bucket{field_validation="",le="6"} 0 +field_validation_request_duration_seconds_bucket{field_validation="",le="8"} 0 +field_validation_request_duration_seconds_bucket{field_validation="",le="10"} 0 +field_validation_request_duration_seconds_bucket{field_validation="",le="15"} 0 +field_validation_request_duration_seconds_bucket{field_validation="",le="20"} 0 +field_validation_request_duration_seconds_bucket{field_validation="",le="30"} 0 +field_validation_request_duration_seconds_bucket{field_validation="",le="45"} 0 +field_validation_request_duration_seconds_bucket{field_validation="",le="60"} 0 +field_validation_request_duration_seconds_bucket{field_validation="",le="+Inf"} 0 +field_validation_request_duration_seconds_sum{field_validation=""} 0 +field_validation_request_duration_seconds_count{field_validation=""} 0 +# HELP hidden_metrics_total [BETA] The count of hidden metrics. +# TYPE hidden_metrics_total counter +hidden_metrics_total 0 +# HELP registered_metrics_total [BETA] The count of registered metrics broken by stability level and deprecation version. +# TYPE registered_metrics_total counter +registered_metrics_total{deprecated_version="",stability_level="ALPHA"} 120 +registered_metrics_total{deprecated_version="",stability_level="BETA"} 13 +registered_metrics_total{deprecated_version="",stability_level="STABLE"} 12 +registered_metrics_total{deprecated_version="1.27.0",stability_level="ALPHA"} 1 +registered_metrics_total{deprecated_version="1.28.0",stability_level="ALPHA"} 1 +registered_metrics_total{deprecated_version="1.30.0",stability_level="ALPHA"} 3 +registered_metrics_total{deprecated_version="1.31.0",stability_level="ALPHA"} 1 +# HELP workqueue_adds_total [ALPHA] Total number of adds handled by workqueue +# TYPE workqueue_adds_total counter +workqueue_adds_total{name="DynamicCABundle-client-ca-bundle"} 1 +workqueue_adds_total{name="DynamicCABundle-serving-cert"} 1 +workqueue_adds_total{name="DynamicConfigMapCABundle-client-ca"} 7 +workqueue_adds_total{name="DynamicServingCertificateController"} 7 +workqueue_adds_total{name="RequestHeaderAuthRequestController"} 1 +# HELP workqueue_depth [ALPHA] Current depth of workqueue +# TYPE workqueue_depth gauge +workqueue_depth{name="DynamicCABundle-client-ca-bundle"} 0 +workqueue_depth{name="DynamicCABundle-serving-cert"} 0 +workqueue_depth{name="DynamicConfigMapCABundle-client-ca"} 0 +workqueue_depth{name="DynamicServingCertificateController"} 0 +workqueue_depth{name="RequestHeaderAuthRequestController"} 0 +# HELP certwatcher_read_certificate_total Total number of certificate reads +# TYPE certwatcher_read_certificate_total counter +certwatcher_read_certificate_total 0 +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines 88 +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. +# TYPE go_memstats_alloc_bytes_total counter +go_memstats_alloc_bytes_total 4.5449336e+07 +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. +# TYPE go_memstats_gc_sys_bytes gauge +go_memstats_gc_sys_bytes 3.909936e+06 +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. +# TYPE go_memstats_heap_inuse_bytes gauge +go_memstats_heap_inuse_bytes 1.9595264e+07 +# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. +# TYPE go_memstats_heap_released_bytes gauge +go_memstats_heap_released_bytes 1.900544e+06 +# HELP go_memstats_lookups_total Total number of pointer lookups. +# TYPE go_memstats_lookups_total counter +go_memstats_lookups_total 0 +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. +# TYPE go_memstats_mcache_inuse_bytes gauge +go_memstats_mcache_inuse_bytes 1200 +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. +# TYPE go_memstats_mspan_sys_bytes gauge +go_memstats_mspan_sys_bytes 326400 +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. +# TYPE go_memstats_next_gc_bytes gauge +go_memstats_next_gc_bytes 2.7305824e+07 +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. +# TYPE go_memstats_stack_sys_bytes gauge +go_memstats_stack_sys_bytes 1.212416e+06 +# HELP go_memstats_sys_bytes Number of bytes obtained from system. +# TYPE go_memstats_sys_bytes gauge +go_memstats_sys_bytes 3.1806728e+07 +# HELP go_threads Number of OS threads created. +# TYPE go_threads gauge +go_threads 8 +# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE process_start_time_seconds gauge +process_start_time_seconds 1.73260052073e+09 +# HELP rest_client_requests_total Number of HTTP requests, partitioned by status code, method, and host. +# TYPE rest_client_requests_total counter +rest_client_requests_total{code="200",host="10.96.0.1:443",method="GET"} 7 +rest_client_requests_total{code="201",host="10.96.0.1:443",method="POST"} 12 +# HELP controller_runtime_active_workers Number of currently used workers per controller +# TYPE controller_runtime_active_workers gauge +controller_runtime_active_workers{controller="cert-rotator"} 0 +controller_runtime_active_workers{controller="cloudeventsource"} 0 +controller_runtime_active_workers{controller="clustercloudeventsource"} 0 +controller_runtime_active_workers{controller="clustertriggerauthentication"} 0 +controller_runtime_active_workers{controller="scaledjob"} 0 +controller_runtime_active_workers{controller="scaledobject"} 0 +controller_runtime_active_workers{controller="triggerauthentication"} 0 +# HELP controller_runtime_max_concurrent_reconciles Maximum number of concurrent reconciles per controller +# TYPE controller_runtime_max_concurrent_reconciles gauge +controller_runtime_max_concurrent_reconciles{controller="cert-rotator"} 1 +controller_runtime_max_concurrent_reconciles{controller="cloudeventsource"} 1 +controller_runtime_max_concurrent_reconciles{controller="clustercloudeventsource"} 1 +controller_runtime_max_concurrent_reconciles{controller="clustertriggerauthentication"} 1 +controller_runtime_max_concurrent_reconciles{controller="scaledjob"} 1 +controller_runtime_max_concurrent_reconciles{controller="scaledobject"} 5 +controller_runtime_max_concurrent_reconciles{controller="triggerauthentication"} 1 +# HELP controller_runtime_reconcile_errors_total Total number of reconciliation errors per controller +# TYPE controller_runtime_reconcile_errors_total counter +controller_runtime_reconcile_errors_total{controller="cert-rotator"} 0 +controller_runtime_reconcile_errors_total{controller="cloudeventsource"} 0 +controller_runtime_reconcile_errors_total{controller="clustercloudeventsource"} 0 +controller_runtime_reconcile_errors_total{controller="clustertriggerauthentication"} 0 +controller_runtime_reconcile_errors_total{controller="scaledjob"} 0 +controller_runtime_reconcile_errors_total{controller="scaledobject"} 0 +controller_runtime_reconcile_errors_total{controller="triggerauthentication"} 0 +# HELP controller_runtime_reconcile_panics_total Total number of reconciliation panics per controller +# TYPE controller_runtime_reconcile_panics_total counter +controller_runtime_reconcile_panics_total{controller="cert-rotator"} 0 +controller_runtime_reconcile_panics_total{controller="cloudeventsource"} 0 +controller_runtime_reconcile_panics_total{controller="clustercloudeventsource"} 0 +controller_runtime_reconcile_panics_total{controller="clustertriggerauthentication"} 0 +controller_runtime_reconcile_panics_total{controller="scaledjob"} 0 +controller_runtime_reconcile_panics_total{controller="scaledobject"} 0 +controller_runtime_reconcile_panics_total{controller="triggerauthentication"} 0 +# HELP controller_runtime_reconcile_time_seconds Length of time per reconciliation per controller +# TYPE controller_runtime_reconcile_time_seconds histogram +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="0.005"} 0 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="0.01"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="0.025"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="0.05"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="0.1"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="0.15"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="0.2"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="0.25"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="0.3"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="0.35"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="0.4"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="0.45"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="0.5"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="0.6"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="0.7"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="0.8"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="0.9"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="1"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="1.25"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="1.5"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="1.75"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="2"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="2.5"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="3"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="3.5"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="4"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="4.5"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="5"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="6"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="7"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="8"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="9"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="10"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="15"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="20"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="25"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="30"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="40"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="50"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="60"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="cert-rotator",le="+Inf"} 2 +controller_runtime_reconcile_time_seconds_sum{controller="cert-rotator"} 0.014531625 +controller_runtime_reconcile_time_seconds_count{controller="cert-rotator"} 2 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="0.005"} 0 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="0.01"} 0 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="0.025"} 0 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="0.05"} 0 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="0.1"} 0 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="0.15"} 1 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="0.2"} 1 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="0.25"} 1 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="0.3"} 1 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="0.35"} 1 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="0.4"} 1 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="0.45"} 1 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="0.5"} 1 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="0.6"} 1 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="0.7"} 1 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="0.8"} 1 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="0.9"} 1 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="1"} 1 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="1.25"} 1 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="1.5"} 1 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="1.75"} 1 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="2"} 1 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="2.5"} 1 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="3"} 1 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="3.5"} 1 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="4"} 1 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="4.5"} 1 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="5"} 1 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="6"} 1 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="7"} 1 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="8"} 1 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="9"} 1 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="10"} 1 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="15"} 1 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="20"} 1 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="25"} 1 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="30"} 1 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="40"} 1 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="50"} 1 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="60"} 1 +controller_runtime_reconcile_time_seconds_bucket{controller="scaledjob",le="+Inf"} 1 +controller_runtime_reconcile_time_seconds_sum{controller="scaledjob"} 0.123412833 +controller_runtime_reconcile_time_seconds_count{controller="scaledjob"} 1 +# HELP controller_runtime_reconcile_total Total number of reconciliations per controller +# TYPE controller_runtime_reconcile_total counter +controller_runtime_reconcile_total{controller="cert-rotator",result="error"} 0 +controller_runtime_reconcile_total{controller="cert-rotator",result="requeue"} 0 +controller_runtime_reconcile_total{controller="cert-rotator",result="requeue_after"} 0 +controller_runtime_reconcile_total{controller="cert-rotator",result="success"} 2 +controller_runtime_reconcile_total{controller="cloudeventsource",result="error"} 0 +controller_runtime_reconcile_total{controller="cloudeventsource",result="requeue"} 0 +controller_runtime_reconcile_total{controller="cloudeventsource",result="requeue_after"} 0 +controller_runtime_reconcile_total{controller="cloudeventsource",result="success"} 0 +controller_runtime_reconcile_total{controller="clustercloudeventsource",result="error"} 0 +controller_runtime_reconcile_total{controller="clustercloudeventsource",result="requeue"} 0 +controller_runtime_reconcile_total{controller="clustercloudeventsource",result="requeue_after"} 0 +controller_runtime_reconcile_total{controller="clustercloudeventsource",result="success"} 0 +controller_runtime_reconcile_total{controller="clustertriggerauthentication",result="error"} 0 +controller_runtime_reconcile_total{controller="clustertriggerauthentication",result="requeue"} 0 +controller_runtime_reconcile_total{controller="clustertriggerauthentication",result="requeue_after"} 0 +controller_runtime_reconcile_total{controller="clustertriggerauthentication",result="success"} 0 +controller_runtime_reconcile_total{controller="scaledjob",result="error"} 0 +controller_runtime_reconcile_total{controller="scaledjob",result="requeue"} 0 +controller_runtime_reconcile_total{controller="scaledjob",result="requeue_after"} 0 +controller_runtime_reconcile_total{controller="scaledjob",result="success"} 1 +controller_runtime_reconcile_total{controller="scaledobject",result="error"} 0 +controller_runtime_reconcile_total{controller="scaledobject",result="requeue"} 0 +controller_runtime_reconcile_total{controller="scaledobject",result="requeue_after"} 0 +controller_runtime_reconcile_total{controller="scaledobject",result="success"} 0 +controller_runtime_reconcile_total{controller="triggerauthentication",result="error"} 0 +controller_runtime_reconcile_total{controller="triggerauthentication",result="requeue"} 0 +controller_runtime_reconcile_total{controller="triggerauthentication",result="requeue_after"} 0 +controller_runtime_reconcile_total{controller="triggerauthentication",result="success"} 0 +# HELP controller_runtime_terminal_reconcile_errors_total Total number of terminal reconciliation errors per controller +# TYPE controller_runtime_terminal_reconcile_errors_total counter +controller_runtime_terminal_reconcile_errors_total{controller="cert-rotator"} 0 +controller_runtime_terminal_reconcile_errors_total{controller="cloudeventsource"} 0 +controller_runtime_terminal_reconcile_errors_total{controller="clustercloudeventsource"} 0 +controller_runtime_terminal_reconcile_errors_total{controller="clustertriggerauthentication"} 0 +controller_runtime_terminal_reconcile_errors_total{controller="scaledjob"} 0 +controller_runtime_terminal_reconcile_errors_total{controller="scaledobject"} 0 +controller_runtime_terminal_reconcile_errors_total{controller="triggerauthentication"} 0 +# HELP go_info Information about the Go environment. +# TYPE go_info gauge +go_info{version="go1.23.3"} 1 +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. +# TYPE go_memstats_buck_hash_sys_bytes gauge +go_memstats_buck_hash_sys_bytes 1.475087e+06 +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. +# TYPE go_memstats_heap_alloc_bytes gauge +go_memstats_heap_alloc_bytes 1.4489752e+07 +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. +# TYPE go_memstats_heap_sys_bytes gauge +go_memstats_heap_sys_bytes 3.2342016e+07 +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. +# TYPE go_memstats_mcache_sys_bytes gauge +go_memstats_mcache_sys_bytes 15600 +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. +# TYPE go_memstats_mspan_sys_bytes gauge +go_memstats_mspan_sys_bytes 408000 +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. +# TYPE go_memstats_other_sys_bytes gauge +go_memstats_other_sys_bytes 885817 +# HELP keda_build_info Info metric, with static information about KEDA build like: version, git commit and Golang runtime info. +# TYPE keda_build_info gauge +keda_build_info{git_commit="5c52d032931b8ecf855d0c298f8d5e48937aecd7",goarch="arm64",goos="linux",goversion="go1.23.3",version="2.16.0"} 1 +# HELP keda_internal_scale_loop_latency DEPRECATED - will be removed in 2.16: use 'keda_internal_scale_loop_latency_seconds' instead. +# TYPE keda_internal_scale_loop_latency gauge +keda_internal_scale_loop_latency{namespace="default",resource="csv-processor",type="scaledjob"} 0 +# HELP keda_internal_scale_loop_latency_seconds Total deviation (in seconds) between the expected execution time and the actual execution time for the scaling loop. +# TYPE keda_internal_scale_loop_latency_seconds gauge +keda_internal_scale_loop_latency_seconds{namespace="default",resource="csv-processor",type="scaledjob"} 0.000345505 +# HELP keda_resource_registered_total Total number of KEDA custom resources per namespace for each custom resource type (CRD) registered. +# TYPE keda_resource_registered_total gauge +keda_resource_registered_total{namespace="default",type="scaled_job"} 1 +# HELP keda_resource_totals DEPRECATED - will be removed in 2.16: use 'keda_resource_registered_total' instead. +# TYPE keda_resource_totals gauge +keda_resource_totals{namespace="default",type="scaled_job"} 1 +# HELP keda_scaled_job_errors_total Number of scaled job errors +# TYPE keda_scaled_job_errors_total counter +keda_scaled_job_errors_total{namespace="default",scaledJob="csv-processor"} 0 +# HELP keda_scaler_active Indicates whether a scaler is active (1), or not (0). +# TYPE keda_scaler_active gauge +keda_scaler_active{metric="s0-redis-csvs-to-process",namespace="default",scaledObject="csv-processor",scaler="redisScaler",triggerIndex="0",type="scaledjob"} 0 +# HELP keda_scaler_detail_errors_total The total number of errors encountered for each scaler. +# TYPE keda_scaler_detail_errors_total counter +keda_scaler_detail_errors_total{metric="s0-redis-csvs-to-process",namespace="default",scaledObject="csv-processor",scaler="redisScaler",triggerIndex="0",type="scaledjob"} 0 +# HELP keda_scaler_errors DEPRECATED - will be removed in 2.16: use 'keda_scaler_detail_errors_total' instead. +# TYPE keda_scaler_errors counter +keda_scaler_errors{metric="s0-redis-csvs-to-process",namespace="default",scaledObject="csv-processor",scaler="redisScaler",triggerIndex="0",type="scaledjob"} 0 +# HELP keda_scaler_metrics_latency DEPRECATED - will be removed in 2.16: use 'keda_scaler_metrics_latency_seconds' instead. +# TYPE keda_scaler_metrics_latency gauge +keda_scaler_metrics_latency{metric="s0-redis-csvs-to-process",namespace="default",scaledObject="csv-processor",scaler="redisScaler",triggerIndex="0",type="scaledjob"} 1 +# HELP keda_scaler_metrics_latency_seconds The latency of retrieving current metric from each scaler, in seconds. +# TYPE keda_scaler_metrics_latency_seconds gauge +keda_scaler_metrics_latency_seconds{metric="s0-redis-csvs-to-process",namespace="default",scaledObject="csv-processor",scaler="redisScaler",triggerIndex="0",type="scaledjob"} 0.001407625 +# HELP keda_scaler_metrics_value The current value for each scaler's metric that would be used by the HPA in computing the target average. +# TYPE keda_scaler_metrics_value gauge +keda_scaler_metrics_value{metric="s0-redis-csvs-to-process",namespace="default",scaledObject="csv-processor",scaler="redisScaler",triggerIndex="0",type="scaledjob"} 0 +# HELP keda_trigger_registered_total Total number of triggers per trigger type registered. +# TYPE keda_trigger_registered_total gauge +keda_trigger_registered_total{type="redis"} 1 +# HELP keda_trigger_totals DEPRECATED - will be removed in 2.16: use 'keda_trigger_registered_total' instead. +# TYPE keda_trigger_totals gauge +keda_trigger_totals{type="redis"} 1 +# HELP leader_election_master_status Gauge of if the reporting system is master of the relevant lease, 0 indicates backup, 1 indicates master. 'name' is the string used to identify the lease. Please make sure to group by name. +# TYPE leader_election_master_status gauge +leader_election_master_status{name="operator.keda.sh"} 1 +# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. +# TYPE process_cpu_seconds_total counter +process_cpu_seconds_total 0.24 +# HELP process_open_fds Number of open file descriptors. +# TYPE process_open_fds gauge +process_open_fds 11 +# HELP process_virtual_memory_bytes Virtual memory size in bytes. +# TYPE process_virtual_memory_bytes gauge +process_virtual_memory_bytes 1.377685504e+09 +# HELP workqueue_adds_total Total number of adds handled by workqueue +# TYPE workqueue_adds_total counter +workqueue_adds_total{controller="cert-rotator",name="cert-rotator"} 2 +workqueue_adds_total{controller="cloudeventsource",name="cloudeventsource"} 0 +workqueue_adds_total{controller="clustercloudeventsource",name="clustercloudeventsource"} 0 +workqueue_adds_total{controller="clustertriggerauthentication",name="clustertriggerauthentication"} 0 +workqueue_adds_total{controller="scaledjob",name="scaledjob"} 1 +workqueue_adds_total{controller="scaledobject",name="scaledobject"} 0 +workqueue_adds_total{controller="triggerauthentication",name="triggerauthentication"} 0 +# HELP workqueue_depth Current depth of workqueue +# TYPE workqueue_depth gauge +workqueue_depth{controller="cert-rotator",name="cert-rotator"} 0 +workqueue_depth{controller="cloudeventsource",name="cloudeventsource"} 0 +workqueue_depth{controller="clustercloudeventsource",name="clustercloudeventsource"} 0 +workqueue_depth{controller="clustertriggerauthentication",name="clustertriggerauthentication"} 0 +workqueue_depth{controller="scaledjob",name="scaledjob"} 0 +workqueue_depth{controller="scaledobject",name="scaledobject"} 0 +workqueue_depth{controller="triggerauthentication",name="triggerauthentication"} 0 +# HELP workqueue_longest_running_processor_seconds How many seconds has the longest running processor for workqueue been running. +# TYPE workqueue_longest_running_processor_seconds gauge +workqueue_longest_running_processor_seconds{controller="cert-rotator",name="cert-rotator"} 0 +workqueue_longest_running_processor_seconds{controller="cloudeventsource",name="cloudeventsource"} 0 +workqueue_longest_running_processor_seconds{controller="clustercloudeventsource",name="clustercloudeventsource"} 0 +workqueue_longest_running_processor_seconds{controller="clustertriggerauthentication",name="clustertriggerauthentication"} 0 +workqueue_longest_running_processor_seconds{controller="scaledjob",name="scaledjob"} 0 +workqueue_longest_running_processor_seconds{controller="scaledobject",name="scaledobject"} 0 +workqueue_longest_running_processor_seconds{controller="triggerauthentication",name="triggerauthentication"} 0 +# HELP workqueue_queue_duration_seconds How long in seconds an item stays in workqueue before being requested +# TYPE workqueue_queue_duration_seconds histogram +workqueue_queue_duration_seconds_bucket{controller="cert-rotator",name="cert-rotator",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{controller="cert-rotator",name="cert-rotator",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{controller="cert-rotator",name="cert-rotator",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{controller="cert-rotator",name="cert-rotator",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{controller="cert-rotator",name="cert-rotator",le="9.999999999999999e-05"} 1 +workqueue_queue_duration_seconds_bucket{controller="cert-rotator",name="cert-rotator",le="0.001"} 1 +workqueue_queue_duration_seconds_bucket{controller="cert-rotator",name="cert-rotator",le="0.01"} 2 +workqueue_queue_duration_seconds_bucket{controller="cert-rotator",name="cert-rotator",le="0.1"} 2 +workqueue_queue_duration_seconds_bucket{controller="cert-rotator",name="cert-rotator",le="1"} 2 +workqueue_queue_duration_seconds_bucket{controller="cert-rotator",name="cert-rotator",le="10"} 2 +workqueue_queue_duration_seconds_bucket{controller="cert-rotator",name="cert-rotator",le="100"} 2 +workqueue_queue_duration_seconds_bucket{controller="cert-rotator",name="cert-rotator",le="1000"} 2 +workqueue_queue_duration_seconds_bucket{controller="cert-rotator",name="cert-rotator",le="+Inf"} 2 +workqueue_queue_duration_seconds_sum{controller="cert-rotator",name="cert-rotator"} 0.007450832 +workqueue_queue_duration_seconds_count{controller="cert-rotator",name="cert-rotator"} 2 +workqueue_queue_duration_seconds_bucket{controller="cloudeventsource",name="cloudeventsource",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{controller="cloudeventsource",name="cloudeventsource",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{controller="cloudeventsource",name="cloudeventsource",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{controller="cloudeventsource",name="cloudeventsource",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{controller="cloudeventsource",name="cloudeventsource",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{controller="cloudeventsource",name="cloudeventsource",le="0.001"} 0 +workqueue_queue_duration_seconds_bucket{controller="cloudeventsource",name="cloudeventsource",le="0.01"} 0 +workqueue_queue_duration_seconds_bucket{controller="cloudeventsource",name="cloudeventsource",le="0.1"} 0 +workqueue_queue_duration_seconds_bucket{controller="cloudeventsource",name="cloudeventsource",le="1"} 0 +workqueue_queue_duration_seconds_bucket{controller="cloudeventsource",name="cloudeventsource",le="10"} 0 +workqueue_queue_duration_seconds_bucket{controller="cloudeventsource",name="cloudeventsource",le="100"} 0 +workqueue_queue_duration_seconds_bucket{controller="cloudeventsource",name="cloudeventsource",le="1000"} 0 +workqueue_queue_duration_seconds_bucket{controller="cloudeventsource",name="cloudeventsource",le="+Inf"} 0 +workqueue_queue_duration_seconds_sum{controller="cloudeventsource",name="cloudeventsource"} 0 +workqueue_queue_duration_seconds_count{controller="cloudeventsource",name="cloudeventsource"} 0 +workqueue_queue_duration_seconds_bucket{controller="clustercloudeventsource",name="clustercloudeventsource",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{controller="clustercloudeventsource",name="clustercloudeventsource",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{controller="clustercloudeventsource",name="clustercloudeventsource",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{controller="clustercloudeventsource",name="clustercloudeventsource",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{controller="clustercloudeventsource",name="clustercloudeventsource",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{controller="clustercloudeventsource",name="clustercloudeventsource",le="0.001"} 0 +workqueue_queue_duration_seconds_bucket{controller="clustercloudeventsource",name="clustercloudeventsource",le="0.01"} 0 +workqueue_queue_duration_seconds_bucket{controller="clustercloudeventsource",name="clustercloudeventsource",le="0.1"} 0 +workqueue_queue_duration_seconds_bucket{controller="clustercloudeventsource",name="clustercloudeventsource",le="1"} 0 +workqueue_queue_duration_seconds_bucket{controller="clustercloudeventsource",name="clustercloudeventsource",le="10"} 0 +workqueue_queue_duration_seconds_bucket{controller="clustercloudeventsource",name="clustercloudeventsource",le="100"} 0 +workqueue_queue_duration_seconds_bucket{controller="clustercloudeventsource",name="clustercloudeventsource",le="1000"} 0 +workqueue_queue_duration_seconds_bucket{controller="clustercloudeventsource",name="clustercloudeventsource",le="+Inf"} 0 +workqueue_queue_duration_seconds_sum{controller="clustercloudeventsource",name="clustercloudeventsource"} 0 +workqueue_queue_duration_seconds_count{controller="clustercloudeventsource",name="clustercloudeventsource"} 0 +workqueue_queue_duration_seconds_bucket{controller="clustertriggerauthentication",name="clustertriggerauthentication",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{controller="clustertriggerauthentication",name="clustertriggerauthentication",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{controller="clustertriggerauthentication",name="clustertriggerauthentication",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{controller="clustertriggerauthentication",name="clustertriggerauthentication",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{controller="clustertriggerauthentication",name="clustertriggerauthentication",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{controller="clustertriggerauthentication",name="clustertriggerauthentication",le="0.001"} 0 +workqueue_queue_duration_seconds_bucket{controller="clustertriggerauthentication",name="clustertriggerauthentication",le="0.01"} 0 +workqueue_queue_duration_seconds_bucket{controller="clustertriggerauthentication",name="clustertriggerauthentication",le="0.1"} 0 +workqueue_queue_duration_seconds_bucket{controller="clustertriggerauthentication",name="clustertriggerauthentication",le="1"} 0 +workqueue_queue_duration_seconds_bucket{controller="clustertriggerauthentication",name="clustertriggerauthentication",le="10"} 0 +workqueue_queue_duration_seconds_bucket{controller="clustertriggerauthentication",name="clustertriggerauthentication",le="100"} 0 +workqueue_queue_duration_seconds_bucket{controller="clustertriggerauthentication",name="clustertriggerauthentication",le="1000"} 0 +workqueue_queue_duration_seconds_bucket{controller="clustertriggerauthentication",name="clustertriggerauthentication",le="+Inf"} 0 +workqueue_queue_duration_seconds_sum{controller="clustertriggerauthentication",name="clustertriggerauthentication"} 0 +workqueue_queue_duration_seconds_count{controller="clustertriggerauthentication",name="clustertriggerauthentication"} 0 +workqueue_queue_duration_seconds_bucket{controller="scaledjob",name="scaledjob",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{controller="scaledjob",name="scaledjob",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{controller="scaledjob",name="scaledjob",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{controller="scaledjob",name="scaledjob",le="9.999999999999999e-06"} 1 +workqueue_queue_duration_seconds_bucket{controller="scaledjob",name="scaledjob",le="9.999999999999999e-05"} 1 +workqueue_queue_duration_seconds_bucket{controller="scaledjob",name="scaledjob",le="0.001"} 1 +workqueue_queue_duration_seconds_bucket{controller="scaledjob",name="scaledjob",le="0.01"} 1 +workqueue_queue_duration_seconds_bucket{controller="scaledjob",name="scaledjob",le="0.1"} 1 +workqueue_queue_duration_seconds_bucket{controller="scaledjob",name="scaledjob",le="1"} 1 +workqueue_queue_duration_seconds_bucket{controller="scaledjob",name="scaledjob",le="10"} 1 +workqueue_queue_duration_seconds_bucket{controller="scaledjob",name="scaledjob",le="100"} 1 +workqueue_queue_duration_seconds_bucket{controller="scaledjob",name="scaledjob",le="1000"} 1 +workqueue_queue_duration_seconds_bucket{controller="scaledjob",name="scaledjob",le="+Inf"} 1 +workqueue_queue_duration_seconds_sum{controller="scaledjob",name="scaledjob"} 9.333e-06 +workqueue_queue_duration_seconds_count{controller="scaledjob",name="scaledjob"} 1 +workqueue_queue_duration_seconds_bucket{controller="scaledobject",name="scaledobject",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{controller="scaledobject",name="scaledobject",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{controller="scaledobject",name="scaledobject",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{controller="scaledobject",name="scaledobject",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{controller="scaledobject",name="scaledobject",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{controller="scaledobject",name="scaledobject",le="0.001"} 0 +workqueue_queue_duration_seconds_bucket{controller="scaledobject",name="scaledobject",le="0.01"} 0 +workqueue_queue_duration_seconds_bucket{controller="scaledobject",name="scaledobject",le="0.1"} 0 +workqueue_queue_duration_seconds_bucket{controller="scaledobject",name="scaledobject",le="1"} 0 +workqueue_queue_duration_seconds_bucket{controller="scaledobject",name="scaledobject",le="10"} 0 +workqueue_queue_duration_seconds_bucket{controller="scaledobject",name="scaledobject",le="100"} 0 +workqueue_queue_duration_seconds_bucket{controller="scaledobject",name="scaledobject",le="1000"} 0 +workqueue_queue_duration_seconds_bucket{controller="scaledobject",name="scaledobject",le="+Inf"} 0 +workqueue_queue_duration_seconds_sum{controller="scaledobject",name="scaledobject"} 0 +workqueue_queue_duration_seconds_count{controller="scaledobject",name="scaledobject"} 0 +workqueue_queue_duration_seconds_bucket{controller="triggerauthentication",name="triggerauthentication",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{controller="triggerauthentication",name="triggerauthentication",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{controller="triggerauthentication",name="triggerauthentication",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{controller="triggerauthentication",name="triggerauthentication",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{controller="triggerauthentication",name="triggerauthentication",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{controller="triggerauthentication",name="triggerauthentication",le="0.001"} 0 +workqueue_queue_duration_seconds_bucket{controller="triggerauthentication",name="triggerauthentication",le="0.01"} 0 +workqueue_queue_duration_seconds_bucket{controller="triggerauthentication",name="triggerauthentication",le="0.1"} 0 +workqueue_queue_duration_seconds_bucket{controller="triggerauthentication",name="triggerauthentication",le="1"} 0 +workqueue_queue_duration_seconds_bucket{controller="triggerauthentication",name="triggerauthentication",le="10"} 0 +workqueue_queue_duration_seconds_bucket{controller="triggerauthentication",name="triggerauthentication",le="100"} 0 +workqueue_queue_duration_seconds_bucket{controller="triggerauthentication",name="triggerauthentication",le="1000"} 0 +workqueue_queue_duration_seconds_bucket{controller="triggerauthentication",name="triggerauthentication",le="+Inf"} 0 +workqueue_queue_duration_seconds_sum{controller="triggerauthentication",name="triggerauthentication"} 0 +workqueue_queue_duration_seconds_count{controller="triggerauthentication",name="triggerauthentication"} 0 +# HELP workqueue_retries_total Total number of retries handled by workqueue +# TYPE workqueue_retries_total counter +workqueue_retries_total{controller="cert-rotator",name="cert-rotator"} 0 +workqueue_retries_total{controller="cloudeventsource",name="cloudeventsource"} 0 +workqueue_retries_total{controller="clustercloudeventsource",name="clustercloudeventsource"} 0 +workqueue_retries_total{controller="clustertriggerauthentication",name="clustertriggerauthentication"} 0 +workqueue_retries_total{controller="scaledjob",name="scaledjob"} 0 +workqueue_retries_total{controller="scaledobject",name="scaledobject"} 0 +workqueue_retries_total{controller="triggerauthentication",name="triggerauthentication"} 0 +# HELP workqueue_unfinished_work_seconds How many seconds of work has been done that is in progress and hasn't been observed by work_duration. Large values indicate stuck threads. One can deduce the number of stuck threads by observing the rate at which this increases. +# TYPE workqueue_unfinished_work_seconds gauge +workqueue_unfinished_work_seconds{controller="cert-rotator",name="cert-rotator"} 0 +workqueue_unfinished_work_seconds{controller="cloudeventsource",name="cloudeventsource"} 0 +workqueue_unfinished_work_seconds{controller="clustercloudeventsource",name="clustercloudeventsource"} 0 +workqueue_unfinished_work_seconds{controller="clustertriggerauthentication",name="clustertriggerauthentication"} 0 +workqueue_unfinished_work_seconds{controller="scaledjob",name="scaledjob"} 0 +workqueue_unfinished_work_seconds{controller="scaledobject",name="scaledobject"} 0 +workqueue_unfinished_work_seconds{controller="triggerauthentication",name="triggerauthentication"} 0 +# HELP workqueue_work_duration_seconds How long in seconds processing an item from workqueue takes. +# TYPE workqueue_work_duration_seconds histogram +workqueue_work_duration_seconds_bucket{controller="cert-rotator",name="cert-rotator",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{controller="cert-rotator",name="cert-rotator",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{controller="cert-rotator",name="cert-rotator",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{controller="cert-rotator",name="cert-rotator",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{controller="cert-rotator",name="cert-rotator",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{controller="cert-rotator",name="cert-rotator",le="0.001"} 0 +workqueue_work_duration_seconds_bucket{controller="cert-rotator",name="cert-rotator",le="0.01"} 2 +workqueue_work_duration_seconds_bucket{controller="cert-rotator",name="cert-rotator",le="0.1"} 2 +workqueue_work_duration_seconds_bucket{controller="cert-rotator",name="cert-rotator",le="1"} 2 +workqueue_work_duration_seconds_bucket{controller="cert-rotator",name="cert-rotator",le="10"} 2 +workqueue_work_duration_seconds_bucket{controller="cert-rotator",name="cert-rotator",le="100"} 2 +workqueue_work_duration_seconds_bucket{controller="cert-rotator",name="cert-rotator",le="1000"} 2 +workqueue_work_duration_seconds_bucket{controller="cert-rotator",name="cert-rotator",le="+Inf"} 2 +workqueue_work_duration_seconds_sum{controller="cert-rotator",name="cert-rotator"} 0.014563709000000001 +workqueue_work_duration_seconds_count{controller="cert-rotator",name="cert-rotator"} 2 +workqueue_work_duration_seconds_bucket{controller="cloudeventsource",name="cloudeventsource",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{controller="cloudeventsource",name="cloudeventsource",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{controller="cloudeventsource",name="cloudeventsource",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{controller="cloudeventsource",name="cloudeventsource",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{controller="cloudeventsource",name="cloudeventsource",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{controller="cloudeventsource",name="cloudeventsource",le="0.001"} 0 +workqueue_work_duration_seconds_bucket{controller="cloudeventsource",name="cloudeventsource",le="0.01"} 0 +workqueue_work_duration_seconds_bucket{controller="cloudeventsource",name="cloudeventsource",le="0.1"} 0 +workqueue_work_duration_seconds_bucket{controller="cloudeventsource",name="cloudeventsource",le="1"} 0 +workqueue_work_duration_seconds_bucket{controller="cloudeventsource",name="cloudeventsource",le="10"} 0 +workqueue_work_duration_seconds_bucket{controller="cloudeventsource",name="cloudeventsource",le="100"} 0 +workqueue_work_duration_seconds_bucket{controller="cloudeventsource",name="cloudeventsource",le="1000"} 0 +workqueue_work_duration_seconds_bucket{controller="cloudeventsource",name="cloudeventsource",le="+Inf"} 0 +workqueue_work_duration_seconds_sum{controller="cloudeventsource",name="cloudeventsource"} 0 +workqueue_work_duration_seconds_count{controller="cloudeventsource",name="cloudeventsource"} 0 +workqueue_work_duration_seconds_bucket{controller="clustercloudeventsource",name="clustercloudeventsource",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{controller="clustercloudeventsource",name="clustercloudeventsource",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{controller="clustercloudeventsource",name="clustercloudeventsource",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{controller="clustercloudeventsource",name="clustercloudeventsource",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{controller="clustercloudeventsource",name="clustercloudeventsource",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{controller="clustercloudeventsource",name="clustercloudeventsource",le="0.001"} 0 +workqueue_work_duration_seconds_bucket{controller="clustercloudeventsource",name="clustercloudeventsource",le="0.01"} 0 +workqueue_work_duration_seconds_bucket{controller="clustercloudeventsource",name="clustercloudeventsource",le="0.1"} 0 +workqueue_work_duration_seconds_bucket{controller="clustercloudeventsource",name="clustercloudeventsource",le="1"} 0 +workqueue_work_duration_seconds_bucket{controller="clustercloudeventsource",name="clustercloudeventsource",le="10"} 0 +workqueue_work_duration_seconds_bucket{controller="clustercloudeventsource",name="clustercloudeventsource",le="100"} 0 +workqueue_work_duration_seconds_bucket{controller="clustercloudeventsource",name="clustercloudeventsource",le="1000"} 0 +workqueue_work_duration_seconds_bucket{controller="clustercloudeventsource",name="clustercloudeventsource",le="+Inf"} 0 +workqueue_work_duration_seconds_sum{controller="clustercloudeventsource",name="clustercloudeventsource"} 0 +workqueue_work_duration_seconds_count{controller="clustercloudeventsource",name="clustercloudeventsource"} 0 +workqueue_work_duration_seconds_bucket{controller="clustertriggerauthentication",name="clustertriggerauthentication",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{controller="clustertriggerauthentication",name="clustertriggerauthentication",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{controller="clustertriggerauthentication",name="clustertriggerauthentication",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{controller="clustertriggerauthentication",name="clustertriggerauthentication",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{controller="clustertriggerauthentication",name="clustertriggerauthentication",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{controller="clustertriggerauthentication",name="clustertriggerauthentication",le="0.001"} 0 +workqueue_work_duration_seconds_bucket{controller="clustertriggerauthentication",name="clustertriggerauthentication",le="0.01"} 0 +workqueue_work_duration_seconds_bucket{controller="clustertriggerauthentication",name="clustertriggerauthentication",le="0.1"} 0 +workqueue_work_duration_seconds_bucket{controller="clustertriggerauthentication",name="clustertriggerauthentication",le="1"} 0 +workqueue_work_duration_seconds_bucket{controller="clustertriggerauthentication",name="clustertriggerauthentication",le="10"} 0 +workqueue_work_duration_seconds_bucket{controller="clustertriggerauthentication",name="clustertriggerauthentication",le="100"} 0 +workqueue_work_duration_seconds_bucket{controller="clustertriggerauthentication",name="clustertriggerauthentication",le="1000"} 0 +workqueue_work_duration_seconds_bucket{controller="clustertriggerauthentication",name="clustertriggerauthentication",le="+Inf"} 0 +workqueue_work_duration_seconds_sum{controller="clustertriggerauthentication",name="clustertriggerauthentication"} 0 +workqueue_work_duration_seconds_count{controller="clustertriggerauthentication",name="clustertriggerauthentication"} 0 +workqueue_work_duration_seconds_bucket{controller="scaledjob",name="scaledjob",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{controller="scaledjob",name="scaledjob",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{controller="scaledjob",name="scaledjob",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{controller="scaledjob",name="scaledjob",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{controller="scaledjob",name="scaledjob",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{controller="scaledjob",name="scaledjob",le="0.001"} 0 +workqueue_work_duration_seconds_bucket{controller="scaledjob",name="scaledjob",le="0.01"} 0 +workqueue_work_duration_seconds_bucket{controller="scaledjob",name="scaledjob",le="0.1"} 0 +workqueue_work_duration_seconds_bucket{controller="scaledjob",name="scaledjob",le="1"} 1 +workqueue_work_duration_seconds_bucket{controller="scaledjob",name="scaledjob",le="10"} 1 +workqueue_work_duration_seconds_bucket{controller="scaledjob",name="scaledjob",le="100"} 1 +workqueue_work_duration_seconds_bucket{controller="scaledjob",name="scaledjob",le="1000"} 1 +workqueue_work_duration_seconds_bucket{controller="scaledjob",name="scaledjob",le="+Inf"} 1 +workqueue_work_duration_seconds_sum{controller="scaledjob",name="scaledjob"} 0.123438292 +workqueue_work_duration_seconds_count{controller="scaledjob",name="scaledjob"} 1 +workqueue_work_duration_seconds_bucket{controller="scaledobject",name="scaledobject",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{controller="scaledobject",name="scaledobject",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{controller="scaledobject",name="scaledobject",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{controller="scaledobject",name="scaledobject",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{controller="scaledobject",name="scaledobject",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{controller="scaledobject",name="scaledobject",le="0.001"} 0 +workqueue_work_duration_seconds_bucket{controller="scaledobject",name="scaledobject",le="0.01"} 0 +workqueue_work_duration_seconds_bucket{controller="scaledobject",name="scaledobject",le="0.1"} 0 +workqueue_work_duration_seconds_bucket{controller="scaledobject",name="scaledobject",le="1"} 0 +workqueue_work_duration_seconds_bucket{controller="scaledobject",name="scaledobject",le="10"} 0 +workqueue_work_duration_seconds_bucket{controller="scaledobject",name="scaledobject",le="100"} 0 +workqueue_work_duration_seconds_bucket{controller="scaledobject",name="scaledobject",le="1000"} 0 +workqueue_work_duration_seconds_bucket{controller="scaledobject",name="scaledobject",le="+Inf"} 0 +workqueue_work_duration_seconds_sum{controller="scaledobject",name="scaledobject"} 0 +workqueue_work_duration_seconds_count{controller="scaledobject",name="scaledobject"} 0 +workqueue_work_duration_seconds_bucket{controller="triggerauthentication",name="triggerauthentication",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{controller="triggerauthentication",name="triggerauthentication",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{controller="triggerauthentication",name="triggerauthentication",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{controller="triggerauthentication",name="triggerauthentication",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{controller="triggerauthentication",name="triggerauthentication",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{controller="triggerauthentication",name="triggerauthentication",le="0.001"} 0 +workqueue_work_duration_seconds_bucket{controller="triggerauthentication",name="triggerauthentication",le="0.01"} 0 +workqueue_work_duration_seconds_bucket{controller="triggerauthentication",name="triggerauthentication",le="0.1"} 0 +workqueue_work_duration_seconds_bucket{controller="triggerauthentication",name="triggerauthentication",le="1"} 0 +workqueue_work_duration_seconds_bucket{controller="triggerauthentication",name="triggerauthentication",le="10"} 0 +workqueue_work_duration_seconds_bucket{controller="triggerauthentication",name="triggerauthentication",le="100"} 0 +workqueue_work_duration_seconds_bucket{controller="triggerauthentication",name="triggerauthentication",le="1000"} 0 +workqueue_work_duration_seconds_bucket{controller="triggerauthentication",name="triggerauthentication",le="+Inf"} 0 +workqueue_work_duration_seconds_sum{controller="triggerauthentication",name="triggerauthentication"} 0 +workqueue_work_duration_seconds_count{controller="triggerauthentication",name="triggerauthentication"} 0 \ No newline at end of file diff --git a/keda/tests/kind/keda_install.yaml b/keda/tests/kind/keda_install.yaml new file mode 100644 index 0000000000000..4dd12e61907c6 --- /dev/null +++ b/keda/tests/kind/keda_install.yaml @@ -0,0 +1,11031 @@ +--- +# Source: keda/templates/manager/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: keda-operator + + helm.sh/chart: keda-2.16.0 + app.kubernetes.io/component: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: keda-operator + app.kubernetes.io/version: 2.16.0 + app.kubernetes.io/instance: keda + name: keda-operator + namespace: keda +automountServiceAccountToken: true +--- +# Source: keda/templates/metrics-server/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: keda-metrics-server + + helm.sh/chart: keda-2.16.0 + app.kubernetes.io/component: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: keda-operator + app.kubernetes.io/version: 2.16.0 + app.kubernetes.io/instance: keda + annotations: + name: keda-metrics-server + namespace: keda +automountServiceAccountToken: true +--- +# Source: keda/templates/webhooks/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: keda-webhook + + helm.sh/chart: keda-2.16.0 + app.kubernetes.io/component: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: keda-operator + app.kubernetes.io/version: 2.16.0 + app.kubernetes.io/instance: keda + annotations: + name: keda-webhook + namespace: keda +automountServiceAccountToken: true +--- +# Source: keda/templates/crds/crd-cloudeventsources.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + labels: + app.kubernetes.io/name: keda-operator + helm.sh/chart: keda-2.16.0 + app.kubernetes.io/component: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: keda-operator + app.kubernetes.io/version: 2.16.0 + name: cloudeventsources.eventing.keda.sh +spec: + group: eventing.keda.sh + names: + kind: CloudEventSource + listKind: CloudEventSourceList + plural: cloudeventsources + singular: cloudeventsource + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Active")].status + name: Active + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: CloudEventSource defines how a KEDA event will be sent to event + sink + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CloudEventSourceSpec defines the spec of CloudEventSource + properties: + authenticationRef: + description: |- + AuthenticationRef points to the TriggerAuthentication or ClusterTriggerAuthentication object that + is used to authenticate the scaler with the environment + properties: + kind: + description: Kind of the resource being referred to. Defaults + to TriggerAuthentication. + type: string + name: + type: string + required: + - name + type: object + clusterName: + type: string + destination: + description: Destination defines the various ways to emit events + properties: + azureEventGridTopic: + properties: + endpoint: + type: string + required: + - endpoint + type: object + http: + properties: + uri: + type: string + required: + - uri + type: object + type: object + eventSubscription: + description: EventSubscription defines filters for events + properties: + excludedEventTypes: + items: + description: CloudEventType contains the list of cloudevent + types + enum: + - keda.scaledobject.ready.v1 + - keda.scaledobject.failed.v1 + - keda.scaledobject.removed.v1 + - keda.scaledjob.ready.v1 + - keda.scaledjob.failed.v1 + - keda.scaledjob.removed.v1 + - keda.authentication.triggerauthentication.created.v1 + - keda.authentication.triggerauthentication.updated.v1 + - keda.authentication.triggerauthentication.removed.v1 + - keda.authentication.clustertriggerauthentication.created.v1 + - keda.authentication.clustertriggerauthentication.updated.v1 + - keda.authentication.clustertriggerauthentication.removed.v1 + type: string + type: array + includedEventTypes: + items: + description: CloudEventType contains the list of cloudevent + types + enum: + - keda.scaledobject.ready.v1 + - keda.scaledobject.failed.v1 + - keda.scaledobject.removed.v1 + - keda.scaledjob.ready.v1 + - keda.scaledjob.failed.v1 + - keda.scaledjob.removed.v1 + - keda.authentication.triggerauthentication.created.v1 + - keda.authentication.triggerauthentication.updated.v1 + - keda.authentication.triggerauthentication.removed.v1 + - keda.authentication.clustertriggerauthentication.created.v1 + - keda.authentication.clustertriggerauthentication.updated.v1 + - keda.authentication.clustertriggerauthentication.removed.v1 + type: string + type: array + type: object + required: + - destination + type: object + status: + description: CloudEventSourceStatus defines the observed state of CloudEventSource + properties: + conditions: + description: Conditions an array representation to store multiple + Conditions + items: + description: Condition to store the condition state + properties: + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition + type: string + required: + - status + - type + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: keda/templates/crds/crd-clustercloudeventsources.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + labels: + app.kubernetes.io/name: keda-operator + helm.sh/chart: keda-2.16.0 + app.kubernetes.io/component: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: keda-operator + app.kubernetes.io/version: 2.16.0 + name: clustercloudeventsources.eventing.keda.sh +spec: + group: eventing.keda.sh + names: + kind: ClusterCloudEventSource + listKind: ClusterCloudEventSourceList + plural: clustercloudeventsources + singular: clustercloudeventsource + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Active")].status + name: Active + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CloudEventSourceSpec defines the spec of CloudEventSource + properties: + authenticationRef: + description: |- + AuthenticationRef points to the TriggerAuthentication or ClusterTriggerAuthentication object that + is used to authenticate the scaler with the environment + properties: + kind: + description: Kind of the resource being referred to. Defaults + to TriggerAuthentication. + type: string + name: + type: string + required: + - name + type: object + clusterName: + type: string + destination: + description: Destination defines the various ways to emit events + properties: + azureEventGridTopic: + properties: + endpoint: + type: string + required: + - endpoint + type: object + http: + properties: + uri: + type: string + required: + - uri + type: object + type: object + eventSubscription: + description: EventSubscription defines filters for events + properties: + excludedEventTypes: + items: + description: CloudEventType contains the list of cloudevent + types + enum: + - keda.scaledobject.ready.v1 + - keda.scaledobject.failed.v1 + - keda.scaledobject.removed.v1 + - keda.scaledjob.ready.v1 + - keda.scaledjob.failed.v1 + - keda.scaledjob.removed.v1 + - keda.authentication.triggerauthentication.created.v1 + - keda.authentication.triggerauthentication.updated.v1 + - keda.authentication.triggerauthentication.removed.v1 + - keda.authentication.clustertriggerauthentication.created.v1 + - keda.authentication.clustertriggerauthentication.updated.v1 + - keda.authentication.clustertriggerauthentication.removed.v1 + type: string + type: array + includedEventTypes: + items: + description: CloudEventType contains the list of cloudevent + types + enum: + - keda.scaledobject.ready.v1 + - keda.scaledobject.failed.v1 + - keda.scaledobject.removed.v1 + - keda.scaledjob.ready.v1 + - keda.scaledjob.failed.v1 + - keda.scaledjob.removed.v1 + - keda.authentication.triggerauthentication.created.v1 + - keda.authentication.triggerauthentication.updated.v1 + - keda.authentication.triggerauthentication.removed.v1 + - keda.authentication.clustertriggerauthentication.created.v1 + - keda.authentication.clustertriggerauthentication.updated.v1 + - keda.authentication.clustertriggerauthentication.removed.v1 + type: string + type: array + type: object + required: + - destination + type: object + status: + description: CloudEventSourceStatus defines the observed state of CloudEventSource + properties: + conditions: + description: Conditions an array representation to store multiple + Conditions + items: + description: Condition to store the condition state + properties: + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition + type: string + required: + - status + - type + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: keda/templates/crds/crd-clustertriggerauthentications.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + labels: + app.kubernetes.io/name: keda-operator + helm.sh/chart: keda-2.16.0 + app.kubernetes.io/component: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: keda-operator + app.kubernetes.io/version: 2.16.0 + name: clustertriggerauthentications.keda.sh +spec: + group: keda.sh + names: + kind: ClusterTriggerAuthentication + listKind: ClusterTriggerAuthenticationList + plural: clustertriggerauthentications + shortNames: + - cta + - clustertriggerauth + singular: clustertriggerauthentication + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .spec.podIdentity.provider + name: PodIdentity + type: string + - jsonPath: .spec.secretTargetRef[*].name + name: Secret + type: string + - jsonPath: .spec.env[*].name + name: Env + type: string + - jsonPath: .spec.hashiCorpVault.address + name: VaultAddress + type: string + - jsonPath: .status.scaledobjects + name: ScaledObjects + priority: 1 + type: string + - jsonPath: .status.scaledjobs + name: ScaledJobs + priority: 1 + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: ClusterTriggerAuthentication defines how a trigger can authenticate + globally + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TriggerAuthenticationSpec defines the various ways to authenticate + properties: + awsSecretManager: + description: AwsSecretManager is used to authenticate using AwsSecretManager + properties: + credentials: + properties: + accessKey: + properties: + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + required: + - key + - name + type: object + required: + - secretKeyRef + type: object + required: + - valueFrom + type: object + accessSecretKey: + properties: + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + required: + - key + - name + type: object + required: + - secretKeyRef + type: object + required: + - valueFrom + type: object + accessToken: + properties: + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + required: + - key + - name + type: object + required: + - secretKeyRef + type: object + required: + - valueFrom + type: object + required: + - accessKey + - accessSecretKey + type: object + podIdentity: + description: |- + AuthPodIdentity allows users to select the platform native identity + mechanism + properties: + identityAuthorityHost: + description: Set identityAuthorityHost to override the default + Azure authority host. If this is set, then the IdentityTenantID + must also be set + type: string + identityId: + type: string + identityOwner: + description: IdentityOwner configures which identity has to + be used during auto discovery, keda or the scaled workload. + Mutually exclusive with roleArn + enum: + - keda + - workload + type: string + identityTenantId: + description: Set identityTenantId to override the default + Azure tenant id. If this is set, then the IdentityID must + also be set + type: string + provider: + description: PodIdentityProvider contains the list of providers + enum: + - azure-workload + - gcp + - aws + - aws-eks + - none + type: string + roleArn: + description: RoleArn sets the AWS RoleArn to be used. Mutually + exclusive with IdentityOwner + type: string + required: + - provider + type: object + region: + type: string + secrets: + items: + properties: + name: + type: string + parameter: + type: string + versionId: + type: string + versionStage: + type: string + required: + - name + - parameter + type: object + type: array + required: + - secrets + type: object + azureKeyVault: + description: AzureKeyVault is used to authenticate using Azure Key + Vault + properties: + cloud: + properties: + activeDirectoryEndpoint: + type: string + keyVaultResourceURL: + type: string + type: + type: string + required: + - type + type: object + credentials: + properties: + clientId: + type: string + clientSecret: + properties: + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + required: + - key + - name + type: object + required: + - secretKeyRef + type: object + required: + - valueFrom + type: object + tenantId: + type: string + required: + - clientId + - clientSecret + - tenantId + type: object + podIdentity: + description: |- + AuthPodIdentity allows users to select the platform native identity + mechanism + properties: + identityAuthorityHost: + description: Set identityAuthorityHost to override the default + Azure authority host. If this is set, then the IdentityTenantID + must also be set + type: string + identityId: + type: string + identityOwner: + description: IdentityOwner configures which identity has to + be used during auto discovery, keda or the scaled workload. + Mutually exclusive with roleArn + enum: + - keda + - workload + type: string + identityTenantId: + description: Set identityTenantId to override the default + Azure tenant id. If this is set, then the IdentityID must + also be set + type: string + provider: + description: PodIdentityProvider contains the list of providers + enum: + - azure-workload + - gcp + - aws + - aws-eks + - none + type: string + roleArn: + description: RoleArn sets the AWS RoleArn to be used. Mutually + exclusive with IdentityOwner + type: string + required: + - provider + type: object + secrets: + items: + properties: + name: + type: string + parameter: + type: string + version: + type: string + required: + - name + - parameter + type: object + type: array + vaultUri: + type: string + required: + - secrets + - vaultUri + type: object + configMapTargetRef: + items: + description: AuthConfigMapTargetRef is used to authenticate using + a reference to a config map + properties: + key: + type: string + name: + type: string + parameter: + type: string + required: + - key + - name + - parameter + type: object + type: array + env: + items: + description: |- + AuthEnvironment is used to authenticate using environment variables + in the destination ScaleTarget spec + properties: + containerName: + type: string + name: + type: string + parameter: + type: string + required: + - name + - parameter + type: object + type: array + gcpSecretManager: + properties: + credentials: + properties: + clientSecret: + properties: + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + required: + - key + - name + type: object + required: + - secretKeyRef + type: object + required: + - valueFrom + type: object + required: + - clientSecret + type: object + podIdentity: + description: |- + AuthPodIdentity allows users to select the platform native identity + mechanism + properties: + identityAuthorityHost: + description: Set identityAuthorityHost to override the default + Azure authority host. If this is set, then the IdentityTenantID + must also be set + type: string + identityId: + type: string + identityOwner: + description: IdentityOwner configures which identity has to + be used during auto discovery, keda or the scaled workload. + Mutually exclusive with roleArn + enum: + - keda + - workload + type: string + identityTenantId: + description: Set identityTenantId to override the default + Azure tenant id. If this is set, then the IdentityID must + also be set + type: string + provider: + description: PodIdentityProvider contains the list of providers + enum: + - azure-workload + - gcp + - aws + - aws-eks + - none + type: string + roleArn: + description: RoleArn sets the AWS RoleArn to be used. Mutually + exclusive with IdentityOwner + type: string + required: + - provider + type: object + secrets: + items: + properties: + id: + type: string + parameter: + type: string + version: + type: string + required: + - id + - parameter + type: object + type: array + required: + - secrets + type: object + hashiCorpVault: + description: HashiCorpVault is used to authenticate using Hashicorp + Vault + properties: + address: + type: string + authentication: + description: VaultAuthentication contains the list of Hashicorp + Vault authentication methods + type: string + credential: + description: Credential defines the Hashicorp Vault credentials + depending on the authentication method + properties: + serviceAccount: + type: string + token: + type: string + type: object + mount: + type: string + namespace: + type: string + role: + type: string + secrets: + items: + description: VaultSecret defines the mapping between the path + of the secret in Vault to the parameter + properties: + key: + type: string + parameter: + type: string + path: + type: string + pkiData: + properties: + altNames: + type: string + commonName: + type: string + format: + type: string + ipSans: + type: string + otherSans: + type: string + ttl: + type: string + uriSans: + type: string + type: object + type: + description: VaultSecretType defines the type of vault secret + type: string + required: + - key + - parameter + - path + type: object + type: array + required: + - address + - authentication + - secrets + type: object + podIdentity: + description: |- + AuthPodIdentity allows users to select the platform native identity + mechanism + properties: + identityAuthorityHost: + description: Set identityAuthorityHost to override the default + Azure authority host. If this is set, then the IdentityTenantID + must also be set + type: string + identityId: + type: string + identityOwner: + description: IdentityOwner configures which identity has to be + used during auto discovery, keda or the scaled workload. Mutually + exclusive with roleArn + enum: + - keda + - workload + type: string + identityTenantId: + description: Set identityTenantId to override the default Azure + tenant id. If this is set, then the IdentityID must also be + set + type: string + provider: + description: PodIdentityProvider contains the list of providers + enum: + - azure-workload + - gcp + - aws + - aws-eks + - none + type: string + roleArn: + description: RoleArn sets the AWS RoleArn to be used. Mutually + exclusive with IdentityOwner + type: string + required: + - provider + type: object + secretTargetRef: + items: + description: AuthSecretTargetRef is used to authenticate using a + reference to a secret + properties: + key: + type: string + name: + type: string + parameter: + type: string + required: + - key + - name + - parameter + type: object + type: array + type: object + status: + description: TriggerAuthenticationStatus defines the observed state of + TriggerAuthentication + properties: + scaledjobs: + type: string + scaledobjects: + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: keda/templates/crds/crd-scaledjobs.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + labels: + app.kubernetes.io/name: keda-operator + helm.sh/chart: keda-2.16.0 + app.kubernetes.io/component: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: keda-operator + app.kubernetes.io/version: 2.16.0 + name: scaledjobs.keda.sh +spec: + group: keda.sh + names: + kind: ScaledJob + listKind: ScaledJobList + plural: scaledjobs + shortNames: + - sj + singular: scaledjob + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.minReplicaCount + name: Min + type: integer + - jsonPath: .spec.maxReplicaCount + name: Max + type: integer + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Active")].status + name: Active + type: string + - jsonPath: .status.conditions[?(@.type=="Paused")].status + name: Paused + type: string + - jsonPath: .status.triggersTypes + name: Triggers + type: string + - jsonPath: .status.authenticationsTypes + name: Authentications + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ScaledJob is the Schema for the scaledjobs API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ScaledJobSpec defines the desired state of ScaledJob + properties: + envSourceContainerName: + type: string + failedJobsHistoryLimit: + format: int32 + type: integer + jobTargetRef: + description: JobSpec describes how the job execution will look like. + properties: + activeDeadlineSeconds: + description: |- + Specifies the duration in seconds relative to the startTime that the job + may be continuously active before the system tries to terminate it; value + must be positive integer. If a Job is suspended (at creation or through an + update), this timer will effectively be stopped and reset when the Job is + resumed again. + format: int64 + type: integer + backoffLimit: + description: |- + Specifies the number of retries before marking this job failed. + Defaults to 6 + format: int32 + type: integer + backoffLimitPerIndex: + description: |- + Specifies the limit for the number of retries within an + index before marking this index as failed. When enabled the number of + failures per index is kept in the pod's + batch.kubernetes.io/job-index-failure-count annotation. It can only + be set when Job's completionMode=Indexed, and the Pod's restart + policy is Never. The field is immutable. + This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` + feature gate is enabled (enabled by default). + format: int32 + type: integer + completionMode: + description: |- + completionMode specifies how Pod completions are tracked. It can be + `NonIndexed` (default) or `Indexed`. + + + `NonIndexed` means that the Job is considered complete when there have + been .spec.completions successfully completed Pods. Each Pod completion is + homologous to each other. + + + `Indexed` means that the Pods of a + Job get an associated completion index from 0 to (.spec.completions - 1), + available in the annotation batch.kubernetes.io/job-completion-index. + The Job is considered complete when there is one successfully completed Pod + for each index. + When value is `Indexed`, .spec.completions must be specified and + `.spec.parallelism` must be less than or equal to 10^5. + In addition, The Pod name takes the form + `$(job-name)-$(index)-$(random-string)`, + the Pod hostname takes the form `$(job-name)-$(index)`. + + + More completion modes can be added in the future. + If the Job controller observes a mode that it doesn't recognize, which + is possible during upgrades due to version skew, the controller + skips updates for the Job. + type: string + completions: + description: |- + Specifies the desired number of successfully finished pods the + job should be run with. Setting to null means that the success of any + pod signals the success of all pods, and allows parallelism to have any positive + value. Setting to 1 means that parallelism is limited to 1 and the success of that + pod signals the success of the job. + More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ + format: int32 + type: integer + manualSelector: + description: |- + manualSelector controls generation of pod labels and pod selectors. + Leave `manualSelector` unset unless you are certain what you are doing. + When false or unset, the system pick labels unique to this job + and appends those labels to the pod template. When true, + the user is responsible for picking unique labels and specifying + the selector. Failure to pick a unique label may cause this + and other jobs to not function correctly. However, You may see + `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` + API. + More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector + type: boolean + maxFailedIndexes: + description: |- + Specifies the maximal number of failed indexes before marking the Job as + failed, when backoffLimitPerIndex is set. Once the number of failed + indexes exceeds this number the entire Job is marked as Failed and its + execution is terminated. When left as null the job continues execution of + all of its indexes and is marked with the `Complete` Job condition. + It can only be specified when backoffLimitPerIndex is set. + It can be null or up to completions. It is required and must be + less than or equal to 10^4 when is completions greater than 10^5. + This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` + feature gate is enabled (enabled by default). + format: int32 + type: integer + parallelism: + description: |- + Specifies the maximum desired number of pods the job should + run at any given time. The actual number of pods running in steady state will + be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), + i.e. when the work left to do is less than max parallelism. + More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ + format: int32 + type: integer + podFailurePolicy: + description: |- + Specifies the policy of handling failed pods. In particular, it allows to + specify the set of actions and conditions which need to be + satisfied to take the associated action. + If empty, the default behaviour applies - the counter of failed pods, + represented by the jobs's .status.failed field, is incremented and it is + checked against the backoffLimit. This field cannot be used in combination + with restartPolicy=OnFailure. + + + This field is beta-level. It can be used when the `JobPodFailurePolicy` + feature gate is enabled (enabled by default). + properties: + rules: + description: |- + A list of pod failure policy rules. The rules are evaluated in order. + Once a rule matches a Pod failure, the remaining of the rules are ignored. + When no rule matches the Pod failure, the default handling applies - the + counter of pod failures is incremented and it is checked against + the backoffLimit. At most 20 elements are allowed. + items: + description: |- + PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. + One of onExitCodes and onPodConditions, but not both, can be used in each rule. + properties: + action: + description: |- + Specifies the action taken on a pod failure when the requirements are satisfied. + Possible values are: + + + - FailJob: indicates that the pod's job is marked as Failed and all + running pods are terminated. + - FailIndex: indicates that the pod's index is marked as Failed and will + not be restarted. + This value is beta-level. It can be used when the + `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default). + - Ignore: indicates that the counter towards the .backoffLimit is not + incremented and a replacement pod is created. + - Count: indicates that the pod is handled in the default way - the + counter towards the .backoffLimit is incremented. + Additional values are considered to be added in the future. Clients should + react to an unknown action by skipping the rule. + type: string + onExitCodes: + description: Represents the requirement on the container + exit codes. + properties: + containerName: + description: |- + Restricts the check for exit codes to the container with the + specified name. When null, the rule applies to all containers. + When specified, it should match one the container or initContainer + names in the pod template. + type: string + operator: + description: |- + Represents the relationship between the container exit code(s) and the + specified values. Containers completed with success (exit code 0) are + excluded from the requirement check. Possible values are: + + + - In: the requirement is satisfied if at least one container exit code + (might be multiple if there are multiple containers not restricted + by the 'containerName' field) is in the set of specified values. + - NotIn: the requirement is satisfied if at least one container exit code + (might be multiple if there are multiple containers not restricted + by the 'containerName' field) is not in the set of specified values. + Additional values are considered to be added in the future. Clients should + react to an unknown operator by assuming the requirement is not satisfied. + type: string + values: + description: |- + Specifies the set of values. Each returned container exit code (might be + multiple in case of multiple containers) is checked against this set of + values with respect to the operator. The list of values must be ordered + and must not contain duplicates. Value '0' cannot be used for the In operator. + At least one element is required. At most 255 elements are allowed. + items: + format: int32 + type: integer + type: array + x-kubernetes-list-type: set + required: + - operator + - values + type: object + onPodConditions: + description: |- + Represents the requirement on the pod conditions. The requirement is represented + as a list of pod condition patterns. The requirement is satisfied if at + least one pattern matches an actual pod condition. At most 20 elements are allowed. + items: + description: |- + PodFailurePolicyOnPodConditionsPattern describes a pattern for matching + an actual pod condition type. + properties: + status: + description: |- + Specifies the required Pod condition status. To match a pod condition + it is required that the specified status equals the pod condition status. + Defaults to True. + type: string + type: + description: |- + Specifies the required Pod condition type. To match a pod condition + it is required that specified type equals the pod condition type. + type: string + required: + - status + - type + type: object + type: array + x-kubernetes-list-type: atomic + required: + - action + type: object + type: array + x-kubernetes-list-type: atomic + required: + - rules + type: object + podReplacementPolicy: + description: |- + podReplacementPolicy specifies when to create replacement Pods. + Possible values are: + - TerminatingOrFailed means that we recreate pods + when they are terminating (has a metadata.deletionTimestamp) or failed. + - Failed means to wait until a previously created Pod is fully terminated (has phase + Failed or Succeeded) before creating a replacement Pod. + + + When using podFailurePolicy, Failed is the the only allowed value. + TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. + This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. + This is on by default. + type: string + selector: + description: |- + A label query over pods that should match the pod count. + Normally, the system sets this field for you. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + suspend: + description: |- + suspend specifies whether the Job controller should create Pods or not. If + a Job is created with suspend set to true, no Pods are created by the Job + controller. If a Job is suspended after creation (i.e. the flag goes from + false to true), the Job controller will delete all active Pods associated + with this Job. Users must design their workload to gracefully handle this. + Suspending a Job will reset the StartTime field of the Job, effectively + resetting the ActiveDeadlineSeconds timer too. Defaults to false. + type: boolean + template: + description: |- + Describes the pod that will be created when executing a job. + The only allowed template.spec.restartPolicy values are "Never" or "OnFailure". + More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + type: object + x-kubernetes-preserve-unknown-fields: true + spec: + description: |- + Specification of the desired behavior of the pod. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + activeDeadlineSeconds: + description: |- + Optional duration in seconds the pod may be active on the node relative to + StartTime before the system will actively try to mark it failed and kill associated containers. + Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in + the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules + (e.g. co-locate this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched + WeightedPodAffinityTerm fields are added per-node + to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling + rules (e.g. avoid putting this pod in the same node, + zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched + WeightedPodAffinityTerm fields are added per-node + to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates whether + a service account token should be automatically mounted. + type: boolean + containers: + description: |- + List of containers belonging to the pod. + Containers cannot currently be added or removed. + There must be at least one container in a Pod. + Cannot be updated. + items: + description: A single application container that you + want to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment + variable present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to + select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret + in the pod's namespace + properties: + key: + description: The key of the secret + to select from. Must be a valid + secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source + of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a + C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to + take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http + request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in + the request. HTTP allows repeated + headers. + items: + description: HTTPHeader describes + a custom header to be used in HTTP + probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field + value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration + that the container should sleep before + being terminated. + properties: + seconds: + description: Seconds is the number of + seconds to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to + connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to + take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http + request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in + the request. HTTP allows repeated + headers. + items: + description: HTTPHeader describes + a custom header to be used in HTTP + probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field + value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration + that the container should sleep before + being terminated. + properties: + seconds: + description: Seconds is the number of + seconds to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to + connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network + port in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + - protocol + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents + resource resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one + entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX + capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX + capabilities type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label + that applies to the container. + type: string + role: + description: Role is a SELinux role label + that applies to the container. + type: string + type: + description: Type is a SELinux type label + that applies to the container. + type: string + user: + description: User is a SELinux user label + that applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the + name of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block + devices to be used by the container. + items: + description: volumeDevice describes a mapping + of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside + of the container that the device will be + mapped to. + type: string + name: + description: name must match the name of a + persistentVolumeClaim in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting + of a Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a + Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + dnsConfig: + description: |- + Specifies the DNS parameters of a pod. + Parameters specified here will be merged to the generated DNS + configuration based on DNSPolicy. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver + options of a pod. + properties: + name: + description: Required. + type: string + value: + type: string + type: object + type: array + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + type: object + dnsPolicy: + description: |- + Set DNS policy for the pod. + Defaults to "ClusterFirst". + Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + To have DNS options set along with hostNetwork, you have to specify DNS policy + explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: |- + EnableServiceLinks indicates whether information about services should be injected into pod's + environment variables, matching the syntax of Docker links. + Optional: Defaults to true. + type: boolean + ephemeralContainers: + description: |- + List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + pod to perform user-initiated actions such as debugging. This list cannot be specified when + creating a pod, and it cannot be modified by updating the pod spec. In order to add an + ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: |- + An EphemeralContainer is a temporary container that you may add to an existing Pod for + user-initiated activities such as debugging. Ephemeral containers have no resource or + scheduling guarantees, and they will not be restarted when they exit or when a Pod is + removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the + Pod to exceed its resource allocation. + + + To add an ephemeral container, use the ephemeralcontainers subresource of an existing + Pod. Ephemeral containers may not be removed or restarted. + properties: + args: + description: |- + Arguments to the entrypoint. + The image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment + variable present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to + select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret + in the pod's namespace + properties: + key: + description: The key of the secret + to select from. Must be a valid + secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source + of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a + C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: Lifecycle is not allowed for ephemeral + containers. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to + take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http + request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in + the request. HTTP allows repeated + headers. + items: + description: HTTPHeader describes + a custom header to be used in HTTP + probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field + value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration + that the container should sleep before + being terminated. + properties: + seconds: + description: Seconds is the number of + seconds to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to + connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to + take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http + request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in + the request. HTTP allows repeated + headers. + items: + description: HTTPHeader describes + a custom header to be used in HTTP + probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field + value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration + that the container should sleep before + being terminated. + properties: + seconds: + description: Seconds is the number of + seconds to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to + connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probes are not allowed for ephemeral + containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the ephemeral container specified as a DNS_LABEL. + This name must be unique among all containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral + containers. + items: + description: ContainerPort represents a network + port in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + - protocol + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral + containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents + resource resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + already allocated to the pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one + entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for the container to manage the restart behavior of each + container within a pod. + This may only be set for init containers. You cannot set this field on + ephemeral containers. + type: string + securityContext: + description: |- + Optional: SecurityContext defines the security options the ephemeral container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX + capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX + capabilities type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label + that applies to the container. + type: string + role: + description: Role is a SELinux role label + that applies to the container. + type: string + type: + description: Type is a SELinux type label + that applies to the container. + type: string + user: + description: User is a SELinux user label + that applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the + name of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral + containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + targetContainerName: + description: |- + If set, the name of the container from PodSpec that this ephemeral container targets. + The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + If not set then the ephemeral container uses the namespaces configured in the Pod spec. + + + The container runtime must implement support for this feature. If the runtime does not + support namespace targeting then the result of setting this field is undefined. + type: string + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block + devices to be used by the container. + items: + description: volumeDevice describes a mapping + of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside + of the container that the device will be + mapped to. + type: string + name: + description: name must match the name of a + persistentVolumeClaim in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. + Cannot be updated. + items: + description: VolumeMount describes a mounting + of a Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a + Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + hostAliases: + description: |- + HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + file if specified. This is only valid for non-hostNetwork pods. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + ip: + description: IP address of the host file entry. + type: string + type: object + type: array + hostIPC: + description: |- + Use the host's ipc namespace. + Optional: Default to false. + type: boolean + hostNetwork: + description: |- + Host networking requested for this pod. Use the host's network namespace. + If this option is set, the ports that will be used must be specified. + Default to false. + type: boolean + hostPID: + description: |- + Use the host's pid namespace. + Optional: Default to false. + type: boolean + hostUsers: + description: |- + Use the host's user namespace. + Optional: Default to true. + If set to true or not present, the pod will be run in the host user namespace, useful + for when the pod needs a feature only available to the host user namespace, such as + loading a kernel module with CAP_SYS_MODULE. + When set to false, a new userns is created for the pod. Setting false is useful for + mitigating container breakout vulnerabilities even allowing users to run their + containers as root without actually having root privileges on the host. + This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. + type: boolean + hostname: + description: |- + Specifies the hostname of the Pod + If not specified, the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: |- + ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + If specified, these secrets will be passed to individual puller implementations for them to use. + More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + type: array + initContainers: + description: |- + List of initialization containers belonging to the pod. + Init containers are executed in order prior to containers being started. If any + init container fails, the pod is considered to have failed and is handled according + to its restartPolicy. The name for an init container or normal container must be + unique among all containers. + Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + The resourceRequirements of an init container are taken into account during scheduling + by finding the highest request/limit for each resource type, and then using the max of + of that value or the sum of the normal containers. Limits are applied to init containers + in a similar fashion. + Init containers cannot currently be added or removed. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + items: + description: A single application container that you + want to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment + variable present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to + select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret + in the pod's namespace + properties: + key: + description: The key of the secret + to select from. Must be a valid + secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source + of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a + C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to + take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http + request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in + the request. HTTP allows repeated + headers. + items: + description: HTTPHeader describes + a custom header to be used in HTTP + probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field + value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration + that the container should sleep before + being terminated. + properties: + seconds: + description: Seconds is the number of + seconds to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to + connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to + take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http + request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in + the request. HTTP allows repeated + headers. + items: + description: HTTPHeader describes + a custom header to be used in HTTP + probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field + value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration + that the container should sleep before + being terminated. + properties: + seconds: + description: Seconds is the number of + seconds to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to + connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network + port in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + - protocol + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents + resource resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one + entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX + capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX + capabilities type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label + that applies to the container. + type: string + role: + description: Role is a SELinux role label + that applies to the container. + type: string + type: + description: Type is a SELinux type label + that applies to the container. + type: string + user: + description: User is a SELinux user label + that applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the + name of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block + devices to be used by the container. + items: + description: volumeDevice describes a mapping + of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside + of the container that the device will be + mapped to. + type: string + name: + description: name must match the name of a + persistentVolumeClaim in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting + of a Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a + Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + nodeName: + description: |- + NodeName is a request to schedule this pod onto a specific node. If it is non-empty, + the scheduler simply schedules this pod onto that node, assuming that it fits resource + requirements. + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + x-kubernetes-map-type: atomic + os: + description: |- + Specifies the OS of the containers in the pod. + Some pod and container fields are restricted if this is set. + + + If the OS field is set to linux, the following fields must be unset: + -securityContext.windowsOptions + + + If the OS field is set to windows, following fields must be unset: + - spec.hostPID + - spec.hostIPC + - spec.hostUsers + - spec.securityContext.seLinuxOptions + - spec.securityContext.seccompProfile + - spec.securityContext.fsGroup + - spec.securityContext.fsGroupChangePolicy + - spec.securityContext.sysctls + - spec.shareProcessNamespace + - spec.securityContext.runAsUser + - spec.securityContext.runAsGroup + - spec.securityContext.supplementalGroups + - spec.containers[*].securityContext.seLinuxOptions + - spec.containers[*].securityContext.seccompProfile + - spec.containers[*].securityContext.capabilities + - spec.containers[*].securityContext.readOnlyRootFilesystem + - spec.containers[*].securityContext.privileged + - spec.containers[*].securityContext.allowPrivilegeEscalation + - spec.containers[*].securityContext.procMount + - spec.containers[*].securityContext.runAsUser + - spec.containers[*].securityContext.runAsGroup + properties: + name: + description: |- + Name is the name of the operating system. The currently supported values are linux and windows. + Additional value may be defined in future and can be one of: + https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + Clients should expect to handle additional values and treat unrecognized values in this field as os: null + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + This field will be autopopulated at admission time by the RuntimeClass admission controller. If + the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + The RuntimeClass admission controller will reject Pod create requests which have the overhead already + set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + type: object + preemptionPolicy: + description: |- + PreemptionPolicy is the Policy for preempting pods with lower priority. + One of Never, PreemptLowerPriority. + Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: |- + The priority value. Various system components use this field to find the + priority of the pod. When Priority Admission Controller is enabled, it + prevents users from setting this field. The admission controller populates + this field from PriorityClassName. + The higher the value, the higher the priority. + format: int32 + type: integer + priorityClassName: + description: |- + If specified, indicates the pod's priority. "system-node-critical" and + "system-cluster-critical" are two special keywords which indicate the + highest priorities with the former being the highest priority. Any other + name must be defined by creating a PriorityClass object with that name. + If not specified, the pod priority will be default or zero if there is no + default. + type: string + readinessGates: + description: |- + If specified, all readiness gates will be evaluated for pod readiness. + A pod is ready when all its containers are ready AND + all conditions specified in the readiness gates have status equal to "True" + More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates + items: + description: PodReadinessGate contains the reference + to a pod condition + properties: + conditionType: + description: ConditionType refers to a condition + in the pod's condition list with matching type. + type: string + required: + - conditionType + type: object + type: array + resourceClaims: + description: |- + ResourceClaims defines which ResourceClaims must be allocated + and reserved before the Pod is allowed to start. The resources + will be made available to those containers which consume them + by name. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. + items: + description: |- + PodResourceClaim references exactly one ResourceClaim through a ClaimSource. + It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. + Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: |- + Name uniquely identifies this resource claim inside the pod. + This must be a DNS_LABEL. + type: string + source: + description: Source describes where to find the + ResourceClaim. + properties: + resourceClaimName: + description: |- + ResourceClaimName is the name of a ResourceClaim object in the same + namespace as this pod. + type: string + resourceClaimTemplateName: + description: |- + ResourceClaimTemplateName is the name of a ResourceClaimTemplate + object in the same namespace as this pod. + + + The template will be used to create a new ResourceClaim, which will + be bound to this pod. When this pod is deleted, the ResourceClaim + will also be deleted. The pod name and resource name, along with a + generated component, will be used to form a unique name for the + ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + + + This field is immutable and no changes will be made to the + corresponding ResourceClaim by the control plane after creating the + ResourceClaim. + type: string + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + restartPolicy: + description: |- + Restart policy for all containers within the pod. + One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. + Default to Always. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + type: string + runtimeClassName: + description: |- + RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + empty definition that uses the default runtime handler. + More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + type: string + schedulerName: + description: |- + If specified, the pod will be dispatched by specified scheduler. + If not specified, the pod will be dispatched by default scheduler. + type: string + schedulingGates: + description: |- + SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + scheduler will not attempt to schedule the pod. + + + SchedulingGates can only be set at pod creation time, and be removed only afterwards. + + + This is a beta feature enabled by the PodSchedulingReadiness feature gate. + items: + description: PodSchedulingGate is associated to a Pod + to guard its scheduling. + properties: + name: + description: |- + Name of the scheduling gate. + Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + securityContext: + description: |- + SecurityContext holds pod-level security attributes and common container settings. + Optional: Defaults to empty. See type description for default values of each field. + properties: + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in addition + to the container's primary GID, the fsGroup (if specified), and group memberships + defined in the container image for the uid of the container process. If unspecified, + no additional groups are added to any container. Note that group memberships + defined in the container image for the uid of the container process are still effective, + even if they are not included in this list. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to + be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: |- + DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. + Deprecated: Use serviceAccountName instead. + type: string + serviceAccountName: + description: |- + ServiceAccountName is the name of the ServiceAccount to use to run this pod. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + type: string + setHostnameAsFQDN: + description: |- + If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). + In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). + In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. + If a pod does not have FQDN, this has no effect. + Default to false. + type: boolean + shareProcessNamespace: + description: |- + Share a single process namespace between all of the containers in a pod. + When this is set containers will be able to view and signal processes from other containers + in the same pod, and the first process in each container will not be assigned PID 1. + HostPID and ShareProcessNamespace cannot both be set. + Optional: Default to false. + type: boolean + subdomain: + description: |- + If specified, the fully qualified Pod hostname will be "...svc.". + If not specified, the pod will not have a domainname at all. + type: string + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + If this value is nil, the default grace period will be used instead. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of pods ought to spread across topology + domains. Scheduler will schedule pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how + to spread matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + + + This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: |- + List of volumes that can be mounted by containers belonging to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes + items: + description: Volume represents a named volume in a pod + that may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data + Disk mount on the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching + mode: None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data + disk in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk + in the blob storage + type: string + fsType: + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: + multiple blob disks per storage account Dedicated: + single blob disk per storage account Managed: + azure managed data disk (only in managed availability + set). defaults to shared' + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File + Service mount on the host and bind mount to the + pod. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret + that contains Azure Storage Account Name and + Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on + the host that shares a pod's lifetime + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + path: + description: 'path is Optional: Used as the + mounted root, rather than the full Ceph tree, + default is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that + should populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers (Beta feature). + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API + about the pod that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API + volume file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to + select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. + Must not be absolute or contain the + ''..'' path. Must be utf-8 encoded. + The first item of the relative path + must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of + resource being referenced + type: string + name: + description: Name is the name of + resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of + resource being referenced + type: string + name: + description: Name is the name of + resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query + over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding + reference to the PersistentVolume + backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and + then exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + lun: + description: 'lun is Optional: FC target lun + number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver + to use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field + holds extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume + attached to a kubelet's host machine. This depends + on the Flocker control service being running + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the + dataset. This is unique identifier of a Flocker + dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for + the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- + TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + mount host directories as read/write. + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether + support iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether + support iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified + Name. + type: string + iscsiInterface: + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun + number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for + iSCSI target and initiator authentication + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets + host machine + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies + Photon Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx + volume attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a + Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected + along with other supported volume types + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the + volume root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about + the configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to + a path within a volume. + properties: + key: + description: key is the key + to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile + represents information to create + the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name and namespace + are supported.' + properties: + apiVersion: + description: Version of + the schema the FieldPath + is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the + field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path + is the relative path name + of the file to be created. + Must not be absolute or contain + the ''..'' path. Must be utf-8 + encoded. The first item of + the relative path must not + start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container + name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the + output format of the exposed + resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: + resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + secret: + description: secret information about + the secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to + a path within a volume. + properties: + key: + description: key is the key + to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional field specify + whether the Secret or its key must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to + project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: quobyte represents a Quobyte mount + on the host that shares a pod's lifetime + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + pool: + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent + volume attached and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of + the ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of + the ScaleIO Protection Domain for the configured + storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable + SSL communication with Gateway, default false + type: boolean + storageMode: + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage + system as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether + the Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere + volume attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage + Policy Based Management (SPBM) profile ID + associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage + Policy Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + required: + - containers + type: object + type: object + ttlSecondsAfterFinished: + description: |- + ttlSecondsAfterFinished limits the lifetime of a Job that has finished + execution (either Complete or Failed). If this field is set, + ttlSecondsAfterFinished after the Job finishes, it is eligible to be + automatically deleted. When the Job is being deleted, its lifecycle + guarantees (e.g. finalizers) will be honored. If this field is unset, + the Job won't be automatically deleted. If this field is set to zero, + the Job becomes eligible to be deleted immediately after it finishes. + format: int32 + type: integer + required: + - template + type: object + maxReplicaCount: + format: int32 + type: integer + minReplicaCount: + format: int32 + type: integer + pollingInterval: + format: int32 + type: integer + rollout: + description: Rollout defines the strategy for job rollouts + properties: + propagationPolicy: + type: string + strategy: + type: string + type: object + rolloutStrategy: + type: string + scalingStrategy: + description: ScalingStrategy defines the strategy of Scaling + properties: + customScalingQueueLengthDeduction: + format: int32 + type: integer + customScalingRunningJobPercentage: + type: string + multipleScalersCalculation: + type: string + pendingPodConditions: + items: + type: string + type: array + strategy: + type: string + type: object + successfulJobsHistoryLimit: + format: int32 + type: integer + triggers: + items: + description: ScaleTriggers reference the scaler that will be used + properties: + authenticationRef: + description: |- + AuthenticationRef points to the TriggerAuthentication or ClusterTriggerAuthentication object that + is used to authenticate the scaler with the environment + properties: + kind: + description: Kind of the resource being referred to. Defaults + to TriggerAuthentication. + type: string + name: + type: string + required: + - name + type: object + metadata: + additionalProperties: + type: string + type: object + name: + type: string + type: + type: string + useCachedMetrics: + type: boolean + required: + - metadata + - type + type: object + type: array + required: + - jobTargetRef + - triggers + type: object + status: + description: ScaledJobStatus defines the observed state of ScaledJob + properties: + Paused: + type: string + conditions: + description: Conditions an array representation to store multiple + Conditions + items: + description: Condition to store the condition state + properties: + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition + type: string + required: + - status + - type + type: object + type: array + lastActiveTime: + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: keda/templates/crds/crd-scaledobjects.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + labels: + app.kubernetes.io/name: keda-operator + helm.sh/chart: keda-2.16.0 + app.kubernetes.io/component: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: keda-operator + app.kubernetes.io/version: 2.16.0 + name: scaledobjects.keda.sh +spec: + group: keda.sh + names: + kind: ScaledObject + listKind: ScaledObjectList + plural: scaledobjects + shortNames: + - so + singular: scaledobject + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.scaleTargetKind + name: ScaleTargetKind + type: string + - jsonPath: .spec.scaleTargetRef.name + name: ScaleTargetName + type: string + - jsonPath: .spec.minReplicaCount + name: Min + type: integer + - jsonPath: .spec.maxReplicaCount + name: Max + type: integer + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Active")].status + name: Active + type: string + - jsonPath: .status.conditions[?(@.type=="Fallback")].status + name: Fallback + type: string + - jsonPath: .status.conditions[?(@.type=="Paused")].status + name: Paused + type: string + - jsonPath: .status.triggersTypes + name: Triggers + type: string + - jsonPath: .status.authenticationsTypes + name: Authentications + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ScaledObject is a specification for a ScaledObject resource + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ScaledObjectSpec is the spec for a ScaledObject resource + properties: + advanced: + description: AdvancedConfig specifies advance scaling options + properties: + horizontalPodAutoscalerConfig: + description: HorizontalPodAutoscalerConfig specifies horizontal + scale config + properties: + behavior: + description: |- + HorizontalPodAutoscalerBehavior configures the scaling behavior of the target + in both Up and Down directions (scaleUp and scaleDown fields respectively). + properties: + scaleDown: + description: |- + scaleDown is scaling policy for scaling Down. + If not set, the default value is to allow to scale down to minReplicas pods, with a + 300 second stabilization window (i.e., the highest recommendation for + the last 300sec is used). + properties: + policies: + description: |- + policies is a list of potential scaling polices which can be used during scaling. + At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid + items: + description: HPAScalingPolicy is a single policy + which must hold true for a specified past interval. + properties: + periodSeconds: + description: |- + periodSeconds specifies the window of time for which the policy should hold true. + PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). + format: int32 + type: integer + type: + description: type is used to specify the scaling + policy. + type: string + value: + description: |- + value contains the amount of change which is permitted by the policy. + It must be greater than zero + format: int32 + type: integer + required: + - periodSeconds + - type + - value + type: object + type: array + x-kubernetes-list-type: atomic + selectPolicy: + description: |- + selectPolicy is used to specify which policy should be used. + If not set, the default value Max is used. + type: string + stabilizationWindowSeconds: + description: |- + stabilizationWindowSeconds is the number of seconds for which past recommendations should be + considered while scaling up or scaling down. + StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). + If not set, use the default values: + - For scale up: 0 (i.e. no stabilization is done). + - For scale down: 300 (i.e. the stabilization window is 300 seconds long). + format: int32 + maximum: 3600 + minimum: 0 + type: integer + type: object + scaleUp: + description: |- + scaleUp is scaling policy for scaling Up. + If not set, the default value is the higher of: + * increase no more than 4 pods per 60 seconds + * double the number of pods per 60 seconds + No stabilization is used. + properties: + policies: + description: |- + policies is a list of potential scaling polices which can be used during scaling. + At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid + items: + description: HPAScalingPolicy is a single policy + which must hold true for a specified past interval. + properties: + periodSeconds: + description: |- + periodSeconds specifies the window of time for which the policy should hold true. + PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). + format: int32 + type: integer + type: + description: type is used to specify the scaling + policy. + type: string + value: + description: |- + value contains the amount of change which is permitted by the policy. + It must be greater than zero + format: int32 + type: integer + required: + - periodSeconds + - type + - value + type: object + type: array + x-kubernetes-list-type: atomic + selectPolicy: + description: |- + selectPolicy is used to specify which policy should be used. + If not set, the default value Max is used. + type: string + stabilizationWindowSeconds: + description: |- + stabilizationWindowSeconds is the number of seconds for which past recommendations should be + considered while scaling up or scaling down. + StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). + If not set, use the default values: + - For scale up: 0 (i.e. no stabilization is done). + - For scale down: 300 (i.e. the stabilization window is 300 seconds long). + format: int32 + maximum: 3600 + minimum: 0 + type: integer + type: object + type: object + name: + type: string + type: object + restoreToOriginalReplicaCount: + type: boolean + scalingModifiers: + description: ScalingModifiers describes advanced scaling logic + options like formula + properties: + activationTarget: + type: string + formula: + type: string + metricType: + description: |- + MetricTargetType specifies the type of metric being targeted, and should be either + "Value", "AverageValue", or "Utilization" + type: string + target: + type: string + type: object + type: object + cooldownPeriod: + format: int32 + type: integer + fallback: + description: Fallback is the spec for fallback options + properties: + failureThreshold: + format: int32 + type: integer + replicas: + format: int32 + type: integer + required: + - failureThreshold + - replicas + type: object + idleReplicaCount: + format: int32 + type: integer + initialCooldownPeriod: + format: int32 + type: integer + maxReplicaCount: + format: int32 + type: integer + minReplicaCount: + format: int32 + type: integer + pollingInterval: + format: int32 + type: integer + scaleTargetRef: + description: ScaleTarget holds the reference to the scale target Object + properties: + apiVersion: + type: string + envSourceContainerName: + type: string + kind: + type: string + name: + type: string + required: + - name + type: object + triggers: + items: + description: ScaleTriggers reference the scaler that will be used + properties: + authenticationRef: + description: |- + AuthenticationRef points to the TriggerAuthentication or ClusterTriggerAuthentication object that + is used to authenticate the scaler with the environment + properties: + kind: + description: Kind of the resource being referred to. Defaults + to TriggerAuthentication. + type: string + name: + type: string + required: + - name + type: object + metadata: + additionalProperties: + type: string + type: object + metricType: + description: |- + MetricTargetType specifies the type of metric being targeted, and should be either + "Value", "AverageValue", or "Utilization" + type: string + name: + type: string + type: + type: string + useCachedMetrics: + type: boolean + required: + - metadata + - type + type: object + type: array + required: + - scaleTargetRef + - triggers + type: object + status: + description: ScaledObjectStatus is the status for a ScaledObject resource + properties: + compositeScalerName: + type: string + conditions: + description: Conditions an array representation to store multiple + Conditions + items: + description: Condition to store the condition state + properties: + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition + type: string + required: + - status + - type + type: object + type: array + externalMetricNames: + items: + type: string + type: array + health: + additionalProperties: + description: HealthStatus is the status for a ScaledObject's health + properties: + numberOfFailures: + format: int32 + type: integer + status: + description: HealthStatusType is an indication of whether the + health status is happy or failing + type: string + type: object + type: object + hpaName: + type: string + lastActiveTime: + format: date-time + type: string + originalReplicaCount: + format: int32 + type: integer + pausedReplicaCount: + format: int32 + type: integer + resourceMetricNames: + items: + type: string + type: array + scaleTargetGVKR: + description: GroupVersionKindResource provides unified structure for + schema.GroupVersionKind and Resource + properties: + group: + type: string + kind: + type: string + resource: + type: string + version: + type: string + required: + - group + - kind + - resource + - version + type: object + scaleTargetKind: + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: keda/templates/crds/crd-triggerauthentications.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + labels: + app.kubernetes.io/name: keda-operator + helm.sh/chart: keda-2.16.0 + app.kubernetes.io/component: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: keda-operator + app.kubernetes.io/version: 2.16.0 + name: triggerauthentications.keda.sh +spec: + group: keda.sh + names: + kind: TriggerAuthentication + listKind: TriggerAuthenticationList + plural: triggerauthentications + shortNames: + - ta + - triggerauth + singular: triggerauthentication + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.podIdentity.provider + name: PodIdentity + type: string + - jsonPath: .spec.secretTargetRef[*].name + name: Secret + type: string + - jsonPath: .spec.env[*].name + name: Env + type: string + - jsonPath: .spec.hashiCorpVault.address + name: VaultAddress + type: string + - jsonPath: .status.scaledobjects + name: ScaledObjects + priority: 1 + type: string + - jsonPath: .status.scaledjobs + name: ScaledJobs + priority: 1 + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: TriggerAuthentication defines how a trigger can authenticate + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TriggerAuthenticationSpec defines the various ways to authenticate + properties: + awsSecretManager: + description: AwsSecretManager is used to authenticate using AwsSecretManager + properties: + credentials: + properties: + accessKey: + properties: + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + required: + - key + - name + type: object + required: + - secretKeyRef + type: object + required: + - valueFrom + type: object + accessSecretKey: + properties: + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + required: + - key + - name + type: object + required: + - secretKeyRef + type: object + required: + - valueFrom + type: object + accessToken: + properties: + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + required: + - key + - name + type: object + required: + - secretKeyRef + type: object + required: + - valueFrom + type: object + required: + - accessKey + - accessSecretKey + type: object + podIdentity: + description: |- + AuthPodIdentity allows users to select the platform native identity + mechanism + properties: + identityAuthorityHost: + description: Set identityAuthorityHost to override the default + Azure authority host. If this is set, then the IdentityTenantID + must also be set + type: string + identityId: + type: string + identityOwner: + description: IdentityOwner configures which identity has to + be used during auto discovery, keda or the scaled workload. + Mutually exclusive with roleArn + enum: + - keda + - workload + type: string + identityTenantId: + description: Set identityTenantId to override the default + Azure tenant id. If this is set, then the IdentityID must + also be set + type: string + provider: + description: PodIdentityProvider contains the list of providers + enum: + - azure-workload + - gcp + - aws + - aws-eks + - none + type: string + roleArn: + description: RoleArn sets the AWS RoleArn to be used. Mutually + exclusive with IdentityOwner + type: string + required: + - provider + type: object + region: + type: string + secrets: + items: + properties: + name: + type: string + parameter: + type: string + versionId: + type: string + versionStage: + type: string + required: + - name + - parameter + type: object + type: array + required: + - secrets + type: object + azureKeyVault: + description: AzureKeyVault is used to authenticate using Azure Key + Vault + properties: + cloud: + properties: + activeDirectoryEndpoint: + type: string + keyVaultResourceURL: + type: string + type: + type: string + required: + - type + type: object + credentials: + properties: + clientId: + type: string + clientSecret: + properties: + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + required: + - key + - name + type: object + required: + - secretKeyRef + type: object + required: + - valueFrom + type: object + tenantId: + type: string + required: + - clientId + - clientSecret + - tenantId + type: object + podIdentity: + description: |- + AuthPodIdentity allows users to select the platform native identity + mechanism + properties: + identityAuthorityHost: + description: Set identityAuthorityHost to override the default + Azure authority host. If this is set, then the IdentityTenantID + must also be set + type: string + identityId: + type: string + identityOwner: + description: IdentityOwner configures which identity has to + be used during auto discovery, keda or the scaled workload. + Mutually exclusive with roleArn + enum: + - keda + - workload + type: string + identityTenantId: + description: Set identityTenantId to override the default + Azure tenant id. If this is set, then the IdentityID must + also be set + type: string + provider: + description: PodIdentityProvider contains the list of providers + enum: + - azure-workload + - gcp + - aws + - aws-eks + - none + type: string + roleArn: + description: RoleArn sets the AWS RoleArn to be used. Mutually + exclusive with IdentityOwner + type: string + required: + - provider + type: object + secrets: + items: + properties: + name: + type: string + parameter: + type: string + version: + type: string + required: + - name + - parameter + type: object + type: array + vaultUri: + type: string + required: + - secrets + - vaultUri + type: object + configMapTargetRef: + items: + description: AuthConfigMapTargetRef is used to authenticate using + a reference to a config map + properties: + key: + type: string + name: + type: string + parameter: + type: string + required: + - key + - name + - parameter + type: object + type: array + env: + items: + description: |- + AuthEnvironment is used to authenticate using environment variables + in the destination ScaleTarget spec + properties: + containerName: + type: string + name: + type: string + parameter: + type: string + required: + - name + - parameter + type: object + type: array + gcpSecretManager: + properties: + credentials: + properties: + clientSecret: + properties: + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + required: + - key + - name + type: object + required: + - secretKeyRef + type: object + required: + - valueFrom + type: object + required: + - clientSecret + type: object + podIdentity: + description: |- + AuthPodIdentity allows users to select the platform native identity + mechanism + properties: + identityAuthorityHost: + description: Set identityAuthorityHost to override the default + Azure authority host. If this is set, then the IdentityTenantID + must also be set + type: string + identityId: + type: string + identityOwner: + description: IdentityOwner configures which identity has to + be used during auto discovery, keda or the scaled workload. + Mutually exclusive with roleArn + enum: + - keda + - workload + type: string + identityTenantId: + description: Set identityTenantId to override the default + Azure tenant id. If this is set, then the IdentityID must + also be set + type: string + provider: + description: PodIdentityProvider contains the list of providers + enum: + - azure-workload + - gcp + - aws + - aws-eks + - none + type: string + roleArn: + description: RoleArn sets the AWS RoleArn to be used. Mutually + exclusive with IdentityOwner + type: string + required: + - provider + type: object + secrets: + items: + properties: + id: + type: string + parameter: + type: string + version: + type: string + required: + - id + - parameter + type: object + type: array + required: + - secrets + type: object + hashiCorpVault: + description: HashiCorpVault is used to authenticate using Hashicorp + Vault + properties: + address: + type: string + authentication: + description: VaultAuthentication contains the list of Hashicorp + Vault authentication methods + type: string + credential: + description: Credential defines the Hashicorp Vault credentials + depending on the authentication method + properties: + serviceAccount: + type: string + token: + type: string + type: object + mount: + type: string + namespace: + type: string + role: + type: string + secrets: + items: + description: VaultSecret defines the mapping between the path + of the secret in Vault to the parameter + properties: + key: + type: string + parameter: + type: string + path: + type: string + pkiData: + properties: + altNames: + type: string + commonName: + type: string + format: + type: string + ipSans: + type: string + otherSans: + type: string + ttl: + type: string + uriSans: + type: string + type: object + type: + description: VaultSecretType defines the type of vault secret + type: string + required: + - key + - parameter + - path + type: object + type: array + required: + - address + - authentication + - secrets + type: object + podIdentity: + description: |- + AuthPodIdentity allows users to select the platform native identity + mechanism + properties: + identityAuthorityHost: + description: Set identityAuthorityHost to override the default + Azure authority host. If this is set, then the IdentityTenantID + must also be set + type: string + identityId: + type: string + identityOwner: + description: IdentityOwner configures which identity has to be + used during auto discovery, keda or the scaled workload. Mutually + exclusive with roleArn + enum: + - keda + - workload + type: string + identityTenantId: + description: Set identityTenantId to override the default Azure + tenant id. If this is set, then the IdentityID must also be + set + type: string + provider: + description: PodIdentityProvider contains the list of providers + enum: + - azure-workload + - gcp + - aws + - aws-eks + - none + type: string + roleArn: + description: RoleArn sets the AWS RoleArn to be used. Mutually + exclusive with IdentityOwner + type: string + required: + - provider + type: object + secretTargetRef: + items: + description: AuthSecretTargetRef is used to authenticate using a + reference to a secret + properties: + key: + type: string + name: + type: string + parameter: + type: string + required: + - key + - name + - parameter + type: object + type: array + type: object + status: + description: TriggerAuthenticationStatus defines the observed state of + TriggerAuthentication + properties: + scaledjobs: + type: string + scaledobjects: + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: keda/templates/manager/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: keda-operator + helm.sh/chart: keda-2.16.0 + app.kubernetes.io/component: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: keda-operator + app.kubernetes.io/version: 2.16.0 + app.kubernetes.io/instance: keda + name: keda-operator +rules: +- apiGroups: + - "" + resources: + - configmaps + - configmaps/status + - limitranges + - pods + - services + - serviceaccounts + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - secrets + verbs: + - list + - watch +- apiGroups: + - "*" + resources: + - "*/scale" + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - "*" + resources: + - "*" + verbs: + - get +- apiGroups: + - apps + resources: + - deployments/scale + - statefulsets/scale + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - apps + resources: + - deployments + - statefulsets + verbs: + - get + - list + - watch +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - eventing.keda.sh + resources: + - cloudeventsources + - cloudeventsources/status + - clustercloudeventsources + - clustercloudeventsources/status + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - keda.sh + resources: + - scaledjobs + - scaledjobs/finalizers + - scaledjobs/status + - scaledobjects + - scaledobjects/finalizers + - scaledobjects/status + - triggerauthentications + - triggerauthentications/status + verbs: + - get + - list + - patch + - update + - watch +--- +# Source: keda/templates/manager/minimal-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: keda-operator-minimal-cluster-role + helm.sh/chart: keda-2.16.0 + app.kubernetes.io/component: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: keda-operator + app.kubernetes.io/version: 2.16.0 + app.kubernetes.io/instance: keda + name: keda-operator-minimal-cluster-role +rules: +- apiGroups: + - keda.sh + resources: + - clustertriggerauthentications + - clustertriggerauthentications/status + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - apiregistration.k8s.io + resources: + - apiservices + verbs: + - get + - list + - patch + - update + - watch +--- +# Source: keda/templates/metrics-server/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: keda-operator-external-metrics-reader + helm.sh/chart: keda-2.16.0 + app.kubernetes.io/component: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: keda-operator + app.kubernetes.io/version: 2.16.0 + app.kubernetes.io/instance: keda + name: keda-operator-external-metrics-reader +rules: +- apiGroups: + - external.metrics.k8s.io + resources: + - 'externalmetrics' + verbs: + - 'get' +--- +# Source: keda/templates/webhooks/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: keda-operator-webhook + helm.sh/chart: keda-2.16.0 + app.kubernetes.io/component: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: keda-operator + app.kubernetes.io/version: 2.16.0 + app.kubernetes.io/instance: keda + name: keda-operator-webhook +rules: +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - list + - watch +- apiGroups: + - keda.sh + resources: + - scaledobjects + verbs: + - list + - watch +- apiGroups: + - apps + resources: + - deployments + - statefulsets + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - limitranges + verbs: + - list +--- +# Source: keda/templates/manager/clusterrolebindings.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: keda-operator + helm.sh/chart: keda-2.16.0 + app.kubernetes.io/component: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: keda-operator + app.kubernetes.io/version: 2.16.0 + app.kubernetes.io/instance: keda + name: keda-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: keda-operator +subjects: +- kind: ServiceAccount + name: keda-operator + namespace: keda +--- +# Source: keda/templates/manager/minimal-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: keda-operator-minimal + helm.sh/chart: keda-2.16.0 + app.kubernetes.io/component: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: keda-operator + app.kubernetes.io/version: 2.16.0 + app.kubernetes.io/instance: keda + name: keda-operator-minimal +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: keda-operator-minimal-cluster-role +subjects: +- kind: ServiceAccount + name: keda-operator + namespace: keda +--- +# Source: keda/templates/metrics-server/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: keda-operator-system-auth-delegator + helm.sh/chart: keda-2.16.0 + app.kubernetes.io/component: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: keda-operator + app.kubernetes.io/version: 2.16.0 + app.kubernetes.io/instance: keda + name: keda-operator-system-auth-delegator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: +- kind: ServiceAccount + name: keda-metrics-server + namespace: keda +--- +# Source: keda/templates/metrics-server/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: keda-operator-hpa-controller-external-metrics + helm.sh/chart: keda-2.16.0 + app.kubernetes.io/component: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: keda-operator + app.kubernetes.io/version: 2.16.0 + app.kubernetes.io/instance: keda + name: keda-operator-hpa-controller-external-metrics +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: keda-operator-external-metrics-reader +subjects: +- kind: ServiceAccount + name: horizontal-pod-autoscaler + namespace: kube-system +--- +# Source: keda/templates/webhooks/clusterrolebindings.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: keda-operator-webhook + helm.sh/chart: keda-2.16.0 + app.kubernetes.io/component: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: keda-operator + app.kubernetes.io/version: 2.16.0 + app.kubernetes.io/instance: keda + name: keda-operator-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: keda-operator-webhook +subjects: +- kind: ServiceAccount + name: keda-webhook + namespace: keda +--- +# Source: keda/templates/manager/minimal-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/name: keda-operator-certs + helm.sh/chart: keda-2.16.0 + app.kubernetes.io/component: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: keda-operator + app.kubernetes.io/version: 2.16.0 + app.kubernetes.io/instance: keda + name: keda-operator-certs + namespace: keda +rules: +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + resourceNames: + - "kedaorg-certs" +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - update +--- +# Source: keda/templates/manager/minimal-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/name: keda-operator-certs + helm.sh/chart: keda-2.16.0 + app.kubernetes.io/component: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: keda-operator + app.kubernetes.io/version: 2.16.0 + app.kubernetes.io/instance: keda + name: keda-operator-certs + namespace: keda +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: keda-operator-certs +subjects: +- kind: ServiceAccount + name: keda-operator + namespace: keda +--- +# Source: keda/templates/metrics-server/clusterrolebinding.yaml +# https://kubernetes.io/docs/tasks/extend-kubernetes/configure-aggregation-layer/#extension-apiserver-authenticates-the-request +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/name: keda-operator-auth-reader + helm.sh/chart: keda-2.16.0 + app.kubernetes.io/component: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: keda-operator + app.kubernetes.io/version: 2.16.0 + app.kubernetes.io/instance: keda + name: keda-operator-auth-reader + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: +- kind: ServiceAccount + name: keda-metrics-server + namespace: keda +--- +# Source: keda/templates/manager/service.yaml +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "8080" + prometheus.io/path: "/metrics" + labels: + app.kubernetes.io/name: keda-operator + helm.sh/chart: keda-2.16.0 + app.kubernetes.io/component: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: keda-operator + app.kubernetes.io/version: 2.16.0 + app.kubernetes.io/instance: keda + name: keda-operator + namespace: keda +spec: + ports: + - name: metricsservice + port: 9666 + targetPort: 9666 + - name: metrics + port: 8080 + targetPort: 8080 + selector: + app: keda-operator +--- +# Source: keda/templates/metrics-server/service.yaml +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/name: keda-operator-metrics-apiserver + app: keda-operator-metrics-apiserver + helm.sh/chart: keda-2.16.0 + app.kubernetes.io/component: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: keda-operator + app.kubernetes.io/version: 2.16.0 + app.kubernetes.io/instance: keda + name: keda-operator-metrics-apiserver + namespace: keda + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "8080" + prometheus.io/path: "/metrics" +spec: + type: ClusterIP + ports: + - name: https + port: 443 + targetPort: 6443 + protocol: TCP + appProtocol: https + - name: metrics + port: 8080 + targetPort: 8080 + protocol: TCP + selector: + app: keda-operator-metrics-apiserver +--- +# Source: keda/templates/webhooks/service.yaml +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "8080" + prometheus.io/path: "/metrics" + labels: + app.kubernetes.io/name: keda-admission-webhooks + helm.sh/chart: keda-2.16.0 + app.kubernetes.io/component: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: keda-operator + app.kubernetes.io/version: 2.16.0 + app.kubernetes.io/instance: keda + name: keda-admission-webhooks + namespace: keda +spec: + ports: + - name: https + port: 443 + protocol: TCP + targetPort: 9443 + appProtocol: https + - name: metrics + port: 8080 + targetPort: 8080 + selector: + app: keda-admission-webhooks +--- +# Source: keda/templates/manager/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: keda-operator + namespace: keda + labels: + app: keda-operator + name: keda-operator + app.kubernetes.io/name: keda-operator + helm.sh/chart: keda-2.16.0 + app.kubernetes.io/component: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: keda-operator + app.kubernetes.io/version: 2.16.0 + app.kubernetes.io/instance: keda +spec: + revisionHistoryLimit: 10 + replicas: 1 + selector: + matchLabels: + app: keda-operator + template: + metadata: + labels: + app: keda-operator + name: keda-operator + app.kubernetes.io/name: keda-operator + helm.sh/chart: keda-2.16.0 + app.kubernetes.io/component: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: keda-operator + app.kubernetes.io/version: 2.16.0 + app.kubernetes.io/instance: keda + spec: + serviceAccountName: keda-operator + automountServiceAccountToken: true + securityContext: + runAsNonRoot: true + containers: + - name: keda-operator + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + seccompProfile: + type: RuntimeDefault + image: "ghcr.io/kedacore/keda:2.16.0" + command: + - "/keda" + args: + - "--leader-elect" + - "--disable-compression=true" + - "--zap-log-level=info" + - "--zap-encoder=console" + - "--zap-time-encoding=rfc3339" + - "--cert-dir=/certs" + - "--enable-cert-rotation=true" + - "--cert-secret-name=kedaorg-certs" + - "--operator-service-name=keda-operator" + - "--metrics-server-service-name=keda-operator-metrics-apiserver" + - "--webhooks-service-name=keda-admission-webhooks" + - "--k8s-cluster-name=kubernetes-default" + - "--k8s-cluster-domain=cluster.local" + - "--enable-prometheus-metrics=true" + - "--metrics-bind-address=:8080" + imagePullPolicy: Always + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 25 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 20 + periodSeconds: 3 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + ports: + - containerPort: 8080 + name: metrics + protocol: TCP + - containerPort: 9666 + name: metricsservice + protocol: TCP + env: + - name: WATCH_NAMESPACE + value: "" + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: OPERATOR_NAME + value: keda-operator + - name: KEDA_HTTP_DEFAULT_TIMEOUT + value: "3000" + - name: KEDA_HTTP_MIN_TLS_VERSION + value: TLS12 + volumeMounts: + - mountPath: /certs + name: certificates + readOnly: true + resources: + limits: + cpu: 1 + memory: 1000Mi + requests: + cpu: 100m + memory: 100Mi + volumes: + - name: certificates + secret: + defaultMode: 420 + secretName: kedaorg-certs + optional: true + nodeSelector: + kubernetes.io/os: linux +--- +# Source: keda/templates/metrics-server/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: keda-operator-metrics-apiserver + namespace: keda + labels: + app: keda-operator-metrics-apiserver + app.kubernetes.io/name: keda-operator-metrics-apiserver + helm.sh/chart: keda-2.16.0 + app.kubernetes.io/component: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: keda-operator + app.kubernetes.io/version: 2.16.0 + app.kubernetes.io/instance: keda +spec: + revisionHistoryLimit: 10 + replicas: 1 + selector: + matchLabels: + app: keda-operator-metrics-apiserver + template: + metadata: + labels: + app: keda-operator-metrics-apiserver + app.kubernetes.io/name: keda-operator-metrics-apiserver + helm.sh/chart: keda-2.16.0 + app.kubernetes.io/component: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: keda-operator + app.kubernetes.io/version: 2.16.0 + app.kubernetes.io/instance: keda + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "8080" + spec: + serviceAccountName: keda-metrics-server + automountServiceAccountToken: true + securityContext: + runAsNonRoot: true + containers: + - name: keda-operator-metrics-apiserver + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + seccompProfile: + type: RuntimeDefault + image: "ghcr.io/kedacore/keda-metrics-apiserver:2.16.0" + imagePullPolicy: Always + livenessProbe: + httpGet: + path: /healthz + port: 6443 + scheme: HTTPS + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + readinessProbe: + httpGet: + path: /readyz + port: 6443 + scheme: HTTPS + initialDelaySeconds: 5 + periodSeconds: 3 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + env: + - name: WATCH_NAMESPACE + value: "" + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: KEDA_HTTP_DEFAULT_TIMEOUT + value: "3000" + - name: KEDA_HTTP_MIN_TLS_VERSION + value: TLS12 + command: + - /keda-adapter + args: + - --port=8080 + - --secure-port=6443 + - --logtostderr=true + - --stderrthreshold=ERROR + - --disable-compression=true + - --metrics-service-address=keda-operator.keda.svc.cluster.local:9666 + - --client-ca-file=/certs/ca.crt + - --tls-cert-file=/certs/tls.crt + - --tls-private-key-file=/certs/tls.key + - --cert-dir=/certs + - --v=0 + ports: + - containerPort: 6443 + name: https + protocol: TCP + - containerPort: 8080 + name: metrics + protocol: TCP + volumeMounts: + - mountPath: /certs + name: certificates + readOnly: true + resources: + limits: + cpu: 1 + memory: 1000Mi + requests: + cpu: 100m + memory: 100Mi + volumes: + - name: certificates + secret: + defaultMode: 420 + secretName: kedaorg-certs + dnsPolicy: ClusterFirst + hostNetwork: false + nodeSelector: + kubernetes.io/os: linux +--- +# Source: keda/templates/webhooks/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: keda-admission-webhooks + namespace: keda + labels: + app: keda-admission-webhooks + name: keda-admission-webhooks + app.kubernetes.io/name: keda-admission-webhooks + helm.sh/chart: keda-2.16.0 + app.kubernetes.io/component: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: keda-operator + app.kubernetes.io/version: 2.16.0 + app.kubernetes.io/instance: keda +spec: + revisionHistoryLimit: 10 + replicas: 1 + selector: + matchLabels: + app: keda-admission-webhooks + template: + metadata: + labels: + app: keda-admission-webhooks + name: keda-admission-webhooks + app.kubernetes.io/name: keda-admission-webhooks + helm.sh/chart: keda-2.16.0 + app.kubernetes.io/component: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: keda-operator + app.kubernetes.io/version: 2.16.0 + app.kubernetes.io/instance: keda + spec: + serviceAccountName: keda-webhook + automountServiceAccountToken: true + securityContext: + runAsNonRoot: true + containers: + - name: keda-admission-webhooks + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + seccompProfile: + type: RuntimeDefault + image: "ghcr.io/kedacore/keda-admission-webhooks:2.16.0" + command: + - /keda-admission-webhooks + args: + - "--zap-log-level=info" + - "--zap-encoder=console" + - "--zap-time-encoding=rfc3339" + - "--cert-dir=/certs" + - "--health-probe-bind-address=:8081" + - --metrics-bind-address=:8080 + imagePullPolicy: Always + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 25 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 20 + periodSeconds: 3 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + ports: + - containerPort: 9443 + name: http + protocol: TCP + - containerPort: 8080 + name: metrics + protocol: TCP + env: + - name: WATCH_NAMESPACE + value: "" + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - mountPath: /certs + name: certificates + readOnly: true + resources: + limits: + cpu: 1 + memory: 1000Mi + requests: + cpu: 100m + memory: 100Mi + volumes: + - name: certificates + secret: + defaultMode: 420 + secretName: kedaorg-certs + hostNetwork: false + nodeSelector: + kubernetes.io/os: linux +--- +# Source: keda/templates/metrics-server/apiservice.yaml +apiVersion: apiregistration.k8s.io/v1 +kind: APIService +metadata: + labels: + app.kubernetes.io/name: v1beta1.external.metrics.k8s.io + helm.sh/chart: keda-2.16.0 + app.kubernetes.io/component: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: keda-operator + app.kubernetes.io/version: 2.16.0 + app.kubernetes.io/instance: keda + name: v1beta1.external.metrics.k8s.io +spec: + service: + name: keda-operator-metrics-apiserver + namespace: keda + port: 443 + group: external.metrics.k8s.io + version: v1beta1 + groupPriorityMinimum: 100 + versionPriority: 100 +--- +# Source: keda/templates/webhooks/validatingconfiguration.yaml +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + app.kubernetes.io/name: keda-admission-webhooks + helm.sh/chart: keda-2.16.0 + app.kubernetes.io/component: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: keda-operator + app.kubernetes.io/version: 2.16.0 + app.kubernetes.io/instance: keda + name: keda-admission +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: keda-admission-webhooks + namespace: keda + path: /validate-keda-sh-v1alpha1-scaledobject + failurePolicy: Ignore + matchPolicy: Equivalent + name: vscaledobject.kb.io + namespaceSelector: {} + objectSelector: {} + rules: + - apiGroups: + - keda.sh + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - scaledobjects + sideEffects: None + timeoutSeconds: 10 +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: keda-admission-webhooks + namespace: keda + path: /validate-keda-sh-v1alpha1-triggerauthentication + failurePolicy: Ignore + matchPolicy: Equivalent + name: vstriggerauthentication.kb.io + namespaceSelector: {} + objectSelector: {} + rules: + - apiGroups: + - keda.sh + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - triggerauthentications + sideEffects: None + timeoutSeconds: 10 +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: keda-admission-webhooks + namespace: keda + path: /validate-keda-sh-v1alpha1-clustertriggerauthentication + failurePolicy: Ignore + matchPolicy: Equivalent + name: vsclustertriggerauthentication.kb.io + namespaceSelector: {} + objectSelector: {} + rules: + - apiGroups: + - keda.sh + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - clustertriggerauthentications + sideEffects: None + timeoutSeconds: 10 + diff --git a/keda/tests/test_e2e.py b/keda/tests/test_e2e.py new file mode 100644 index 0000000000000..088d4ae7cda7e --- /dev/null +++ b/keda/tests/test_e2e.py @@ -0,0 +1,12 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from datadog_checks.base.constants import ServiceCheck +from datadog_checks.dev.utils import assert_service_checks + + +def test_e2e_openmetrics_v2(dd_agent_check): + aggregator = dd_agent_check() + + aggregator.assert_service_check('keda.openmetrics.health', ServiceCheck.OK, count=1) + assert_service_checks(aggregator) diff --git a/keda/tests/test_unit.py b/keda/tests/test_unit.py new file mode 100644 index 0000000000000..14c6e3e1124ee --- /dev/null +++ b/keda/tests/test_unit.py @@ -0,0 +1,34 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +import pytest + +from datadog_checks.base.constants import ServiceCheck +from datadog_checks.dev.utils import get_metadata_metrics +from datadog_checks.keda import KedaCheck + +from .common import TEST_METRICS, get_fixture_path + + +def test_check_mock_keda_openmetrics(dd_run_check, instance, aggregator, mock_http_response): + mock_http_response(file_path=get_fixture_path('keda_metrics.txt')) + check = KedaCheck('keda', {}, [instance]) + dd_run_check(check) + + for metric in TEST_METRICS: + aggregator.assert_metric(metric) + aggregator.assert_metric_has_tag(metric, 'test:tag') + + aggregator.assert_all_metrics_covered() + aggregator.assert_metrics_using_metadata(get_metadata_metrics()) + aggregator.assert_service_check('keda.openmetrics.health', ServiceCheck.OK) + + +def test_empty_instance(dd_run_check): + with pytest.raises( + Exception, + match='The setting `openmetrics_endpoint` is required', + ): + check = KedaCheck('keda', {}, [{}]) + dd_run_check(check) diff --git a/milvus/CHANGELOG.md b/milvus/CHANGELOG.md new file mode 100644 index 0000000000000..1b010e7cd74f8 --- /dev/null +++ b/milvus/CHANGELOG.md @@ -0,0 +1,9 @@ +# CHANGELOG - Milvus + + + +## 1.0.0 / 2024-12-26 + +***Added***: + +* Initial Release ([#19055](https://github.com/DataDog/integrations-core/pull/19055)) diff --git a/milvus/README.md b/milvus/README.md new file mode 100644 index 0000000000000..e9278fa0871f9 --- /dev/null +++ b/milvus/README.md @@ -0,0 +1,68 @@ +# Agent Check: Milvus + +## Overview + +This check monitors [Milvus][1] through the Datadog Agent. It provides insights into your Milvus deployment's performance by collecting information about the latency and number of executions of individual operations. This integration also allows for monitoring the size and ressource allocation of your deployment. + +## Setup + +### Installation + +The Milvus check is included in the [Datadog Agent][2] package. +No additional installation is needed on your server. + +### Configuration + +#### Host + +Follow the instructions below to configure this check for an Agent running on a host. For containerized environments, see the [Containerized](#containerized) section. + + + + +1. Edit the `milvus.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your Milvus performance data. See the [sample milvus.d/conf.yaml][4] for all available configuration options. + +2. [Restart the Agent][5]. + + + + +#### Containerized + +For containerized environments, see the [Autodiscovery Integration Templates][3] for guidance on applying these instructions. + + + + +### Validation + +[Run the Agent's status subcommand][6] and look for `milvus` under the Checks section. + +## Data Collected + +### Metrics + +See [metadata.csv][7] for a list of metrics provided by this integration. + +### Events + +The Milvus integration does not include any events. + +### Service Checks + +The Milvus integration does not include any service checks. + +## Troubleshooting + +Need help? Contact [Datadog support][9]. + + +[1]: https://milvus.io/ +[2]: https://app.datadoghq.com/account/settings/agent/latest +[3]: https://docs.datadoghq.com/agent/kubernetes/integrations/ +[4]: https://github.com/DataDog/integrations-core/blob/master/milvus/datadog_checks/milvus/data/conf.yaml.example +[5]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent +[6]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information +[7]: https://github.com/DataDog/integrations-core/blob/master/milvus/metadata.csv +[8]: https://github.com/DataDog/integrations-core/blob/master/milvus/assets/service_checks.json +[9]: https://docs.datadoghq.com/help/ diff --git a/milvus/assets/configuration/spec.yaml b/milvus/assets/configuration/spec.yaml new file mode 100644 index 0000000000000..100b70fcc547f --- /dev/null +++ b/milvus/assets/configuration/spec.yaml @@ -0,0 +1,15 @@ +name: Milvus +files: +- name: milvus.yaml + options: + - template: init_config + options: + - template: init_config/openmetrics + - template: instances + options: + - template: instances/openmetrics + overrides: + openmetrics_endpoint.value.example: http://localhost:9091/metrics + openmetrics_endpoint.description: | + Endpoint exposing Milvus' Prometheus metrics. For more information, refer to + https://milvus.io/docs/monitor.md#Monitor-metrics-with-Prometheus. diff --git a/milvus/assets/service_checks.json b/milvus/assets/service_checks.json new file mode 100644 index 0000000000000..fe51488c7066f --- /dev/null +++ b/milvus/assets/service_checks.json @@ -0,0 +1 @@ +[] diff --git a/milvus/datadog_checks/__init__.py b/milvus/datadog_checks/__init__.py new file mode 100644 index 0000000000000..1517d901c0aae --- /dev/null +++ b/milvus/datadog_checks/__init__.py @@ -0,0 +1,4 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +__path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore diff --git a/milvus/datadog_checks/milvus/__about__.py b/milvus/datadog_checks/milvus/__about__.py new file mode 100644 index 0000000000000..acbfd1c866b84 --- /dev/null +++ b/milvus/datadog_checks/milvus/__about__.py @@ -0,0 +1,4 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +__version__ = '1.0.0' diff --git a/milvus/datadog_checks/milvus/__init__.py b/milvus/datadog_checks/milvus/__init__.py new file mode 100644 index 0000000000000..b0126262a42a2 --- /dev/null +++ b/milvus/datadog_checks/milvus/__init__.py @@ -0,0 +1,7 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from .__about__ import __version__ +from .check import MilvusCheck + +__all__ = ['__version__', 'MilvusCheck'] diff --git a/milvus/datadog_checks/milvus/check.py b/milvus/datadog_checks/milvus/check.py new file mode 100644 index 0000000000000..d1be0f56ca38e --- /dev/null +++ b/milvus/datadog_checks/milvus/check.py @@ -0,0 +1,21 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +from datadog_checks.base import OpenMetricsBaseCheckV2 +from datadog_checks.milvus.metrics import METRIC_MAP, RENAME_LABELS_MAP + + +class MilvusCheck(OpenMetricsBaseCheckV2): + __NAMESPACE__ = 'milvus' + + DEFAULT_METRIC_LIMIT = 0 + + def __init__(self, name, init_config, instances=None): + super(MilvusCheck, self).__init__(name, init_config, instances) + + def get_default_config(self): + return { + 'metrics': [METRIC_MAP], + 'rename_labels': RENAME_LABELS_MAP, + } diff --git a/milvus/datadog_checks/milvus/config_models/__init__.py b/milvus/datadog_checks/milvus/config_models/__init__.py new file mode 100644 index 0000000000000..106fff2032f68 --- /dev/null +++ b/milvus/datadog_checks/milvus/config_models/__init__.py @@ -0,0 +1,24 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +# This file is autogenerated. +# To change this file you should edit assets/configuration/spec.yaml and then run the following commands: +# ddev -x validate config -s +# ddev -x validate models -s + +from .instance import InstanceConfig +from .shared import SharedConfig + + +class ConfigMixin: + _config_model_instance: InstanceConfig + _config_model_shared: SharedConfig + + @property + def config(self) -> InstanceConfig: + return self._config_model_instance + + @property + def shared_config(self) -> SharedConfig: + return self._config_model_shared diff --git a/milvus/datadog_checks/milvus/config_models/defaults.py b/milvus/datadog_checks/milvus/config_models/defaults.py new file mode 100644 index 0000000000000..bf7519af75f42 --- /dev/null +++ b/milvus/datadog_checks/milvus/config_models/defaults.py @@ -0,0 +1,132 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +# This file is autogenerated. +# To change this file you should edit assets/configuration/spec.yaml and then run the following commands: +# ddev -x validate config -s +# ddev -x validate models -s + + +def shared_skip_proxy(): + return False + + +def shared_timeout(): + return 10 + + +def instance_allow_redirects(): + return True + + +def instance_auth_type(): + return 'basic' + + +def instance_cache_metric_wildcards(): + return True + + +def instance_cache_shared_labels(): + return True + + +def instance_collect_counters_with_distributions(): + return False + + +def instance_collect_histogram_buckets(): + return True + + +def instance_disable_generic_tags(): + return False + + +def instance_empty_default_hostname(): + return False + + +def instance_enable_health_service_check(): + return True + + +def instance_histogram_buckets_as_distributions(): + return False + + +def instance_ignore_connection_errors(): + return False + + +def instance_kerberos_auth(): + return 'disabled' + + +def instance_kerberos_delegate(): + return False + + +def instance_kerberos_force_initiate(): + return False + + +def instance_log_requests(): + return False + + +def instance_min_collection_interval(): + return 15 + + +def instance_non_cumulative_histogram_buckets(): + return False + + +def instance_persist_connections(): + return False + + +def instance_request_size(): + return 16 + + +def instance_skip_proxy(): + return False + + +def instance_tag_by_endpoint(): + return True + + +def instance_telemetry(): + return False + + +def instance_timeout(): + return 10 + + +def instance_tls_ignore_warning(): + return False + + +def instance_tls_use_host_header(): + return False + + +def instance_tls_verify(): + return True + + +def instance_use_latest_spec(): + return False + + +def instance_use_legacy_auth_encoding(): + return True + + +def instance_use_process_start_time(): + return False diff --git a/milvus/datadog_checks/milvus/config_models/instance.py b/milvus/datadog_checks/milvus/config_models/instance.py new file mode 100644 index 0000000000000..8e39a0e921719 --- /dev/null +++ b/milvus/datadog_checks/milvus/config_models/instance.py @@ -0,0 +1,171 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +# This file is autogenerated. +# To change this file you should edit assets/configuration/spec.yaml and then run the following commands: +# ddev -x validate config -s +# ddev -x validate models -s + +from __future__ import annotations + +from types import MappingProxyType +from typing import Any, Optional, Union + +from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator + +from datadog_checks.base.utils.functions import identity +from datadog_checks.base.utils.models import validation + +from . import defaults, validators + + +class AuthToken(BaseModel): + model_config = ConfigDict( + arbitrary_types_allowed=True, + frozen=True, + ) + reader: Optional[MappingProxyType[str, Any]] = None + writer: Optional[MappingProxyType[str, Any]] = None + + +class ExtraMetrics(BaseModel): + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra='allow', + frozen=True, + ) + name: Optional[str] = None + type: Optional[str] = None + + +class MetricPatterns(BaseModel): + model_config = ConfigDict( + arbitrary_types_allowed=True, + frozen=True, + ) + exclude: Optional[tuple[str, ...]] = None + include: Optional[tuple[str, ...]] = None + + +class Metrics(BaseModel): + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra='allow', + frozen=True, + ) + name: Optional[str] = None + type: Optional[str] = None + + +class Proxy(BaseModel): + model_config = ConfigDict( + arbitrary_types_allowed=True, + frozen=True, + ) + http: Optional[str] = None + https: Optional[str] = None + no_proxy: Optional[tuple[str, ...]] = None + + +class ShareLabels(BaseModel): + model_config = ConfigDict( + arbitrary_types_allowed=True, + frozen=True, + ) + labels: Optional[tuple[str, ...]] = None + match: Optional[tuple[str, ...]] = None + + +class InstanceConfig(BaseModel): + model_config = ConfigDict( + validate_default=True, + arbitrary_types_allowed=True, + frozen=True, + ) + allow_redirects: Optional[bool] = None + auth_token: Optional[AuthToken] = None + auth_type: Optional[str] = None + aws_host: Optional[str] = None + aws_region: Optional[str] = None + aws_service: Optional[str] = None + cache_metric_wildcards: Optional[bool] = None + cache_shared_labels: Optional[bool] = None + collect_counters_with_distributions: Optional[bool] = None + collect_histogram_buckets: Optional[bool] = None + connect_timeout: Optional[float] = None + disable_generic_tags: Optional[bool] = None + empty_default_hostname: Optional[bool] = None + enable_health_service_check: Optional[bool] = None + exclude_labels: Optional[tuple[str, ...]] = None + exclude_metrics: Optional[tuple[str, ...]] = None + exclude_metrics_by_labels: Optional[MappingProxyType[str, Union[bool, tuple[str, ...]]]] = None + extra_headers: Optional[MappingProxyType[str, Any]] = None + extra_metrics: Optional[tuple[Union[str, MappingProxyType[str, Union[str, ExtraMetrics]]], ...]] = None + headers: Optional[MappingProxyType[str, Any]] = None + histogram_buckets_as_distributions: Optional[bool] = None + hostname_format: Optional[str] = None + hostname_label: Optional[str] = None + ignore_connection_errors: Optional[bool] = None + ignore_tags: Optional[tuple[str, ...]] = None + include_labels: Optional[tuple[str, ...]] = None + kerberos_auth: Optional[str] = None + kerberos_cache: Optional[str] = None + kerberos_delegate: Optional[bool] = None + kerberos_force_initiate: Optional[bool] = None + kerberos_hostname: Optional[str] = None + kerberos_keytab: Optional[str] = None + kerberos_principal: Optional[str] = None + log_requests: Optional[bool] = None + metric_patterns: Optional[MetricPatterns] = None + metrics: Optional[tuple[Union[str, MappingProxyType[str, Union[str, Metrics]]], ...]] = None + min_collection_interval: Optional[float] = None + namespace: Optional[str] = Field(None, pattern='\\w*') + non_cumulative_histogram_buckets: Optional[bool] = None + ntlm_domain: Optional[str] = None + openmetrics_endpoint: str + password: Optional[str] = None + persist_connections: Optional[bool] = None + proxy: Optional[Proxy] = None + raw_line_filters: Optional[tuple[str, ...]] = None + raw_metric_prefix: Optional[str] = None + read_timeout: Optional[float] = None + rename_labels: Optional[MappingProxyType[str, Any]] = None + request_size: Optional[float] = None + service: Optional[str] = None + share_labels: Optional[MappingProxyType[str, Union[bool, ShareLabels]]] = None + skip_proxy: Optional[bool] = None + tag_by_endpoint: Optional[bool] = None + tags: Optional[tuple[str, ...]] = None + telemetry: Optional[bool] = None + timeout: Optional[float] = None + tls_ca_cert: Optional[str] = None + tls_cert: Optional[str] = None + tls_ignore_warning: Optional[bool] = None + tls_private_key: Optional[str] = None + tls_protocols_allowed: Optional[tuple[str, ...]] = None + tls_use_host_header: Optional[bool] = None + tls_verify: Optional[bool] = None + use_latest_spec: Optional[bool] = None + use_legacy_auth_encoding: Optional[bool] = None + use_process_start_time: Optional[bool] = None + username: Optional[str] = None + + @model_validator(mode='before') + def _initial_validation(cls, values): + return validation.core.initialize_config(getattr(validators, 'initialize_instance', identity)(values)) + + @field_validator('*', mode='before') + def _validate(cls, value, info): + field = cls.model_fields[info.field_name] + field_name = field.alias or info.field_name + if field_name in info.context['configured_fields']: + value = getattr(validators, f'instance_{info.field_name}', identity)(value, field=field) + else: + value = getattr(defaults, f'instance_{info.field_name}', lambda: value)() + + return validation.utils.make_immutable(value) + + @model_validator(mode='after') + def _final_validation(cls, model): + return validation.core.check_model(getattr(validators, 'check_instance', identity)(model)) diff --git a/milvus/datadog_checks/milvus/config_models/shared.py b/milvus/datadog_checks/milvus/config_models/shared.py new file mode 100644 index 0000000000000..0e8a9ecab10a2 --- /dev/null +++ b/milvus/datadog_checks/milvus/config_models/shared.py @@ -0,0 +1,60 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +# This file is autogenerated. +# To change this file you should edit assets/configuration/spec.yaml and then run the following commands: +# ddev -x validate config -s +# ddev -x validate models -s + +from __future__ import annotations + +from typing import Optional + +from pydantic import BaseModel, ConfigDict, field_validator, model_validator + +from datadog_checks.base.utils.functions import identity +from datadog_checks.base.utils.models import validation + +from . import defaults, validators + + +class Proxy(BaseModel): + model_config = ConfigDict( + arbitrary_types_allowed=True, + frozen=True, + ) + http: Optional[str] = None + https: Optional[str] = None + no_proxy: Optional[tuple[str, ...]] = None + + +class SharedConfig(BaseModel): + model_config = ConfigDict( + validate_default=True, + arbitrary_types_allowed=True, + frozen=True, + ) + proxy: Optional[Proxy] = None + service: Optional[str] = None + skip_proxy: Optional[bool] = None + timeout: Optional[float] = None + + @model_validator(mode='before') + def _initial_validation(cls, values): + return validation.core.initialize_config(getattr(validators, 'initialize_shared', identity)(values)) + + @field_validator('*', mode='before') + def _validate(cls, value, info): + field = cls.model_fields[info.field_name] + field_name = field.alias or info.field_name + if field_name in info.context['configured_fields']: + value = getattr(validators, f'shared_{info.field_name}', identity)(value, field=field) + else: + value = getattr(defaults, f'shared_{info.field_name}', lambda: value)() + + return validation.utils.make_immutable(value) + + @model_validator(mode='after') + def _final_validation(cls, model): + return validation.core.check_model(getattr(validators, 'check_shared', identity)(model)) diff --git a/milvus/datadog_checks/milvus/config_models/validators.py b/milvus/datadog_checks/milvus/config_models/validators.py new file mode 100644 index 0000000000000..1a91a82658202 --- /dev/null +++ b/milvus/datadog_checks/milvus/config_models/validators.py @@ -0,0 +1,40 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from urllib.parse import urlparse + + +def initialize_instance(values, **kwargs): + if 'openmetrics_endpoint' in values: + validate_url( + values['openmetrics_endpoint'], + required_path='/metrics', + example='http://localhost:9091/metrics', + config='openmetrics_endpoint', + ) + + return values + + +def validate_url(url, required_path=None, example=None, config=None): + # Validates the endpoint to ensure that the components are present. + # For OpenMetrics: Scheme, netloc and /metrics path + # For API: Scheme, netloc and no path + # Logs out troubleshooting errors and example + url_parsed = urlparse(url) + errors = [] + + if not url_parsed.netloc: + errors.append("couldn't properly parse endpoint") + if not url_parsed.scheme: + errors.append("http or https scheme is missing") + if required_path and url_parsed.path != required_path: + errors.append(f"URL should end with {required_path}") + if not required_path and url_parsed.path: + errors.append("should not contain a path or trailing /") + + if errors: + error_message = ", ".join(errors) + raise ValueError( + f"{config}: {url} is incorrectly configured. Errors detected: {error_message}. Example: {example}" + ) diff --git a/milvus/datadog_checks/milvus/data/conf.yaml.example b/milvus/datadog_checks/milvus/data/conf.yaml.example new file mode 100644 index 0000000000000..b275903aba352 --- /dev/null +++ b/milvus/datadog_checks/milvus/data/conf.yaml.example @@ -0,0 +1,626 @@ +## All options defined here are available to all instances. +# +init_config: + + ## @param proxy - mapping - optional + ## Set HTTP or HTTPS proxies for all instances. Use the `no_proxy` list + ## to specify hosts that must bypass proxies. + ## + ## The SOCKS protocol is also supported like so: + ## + ## socks5://user:pass@host:port + ## + ## Using the scheme `socks5` causes the DNS resolution to happen on the + ## client, rather than on the proxy server. This is in line with `curl`, + ## which uses the scheme to decide whether to do the DNS resolution on + ## the client or proxy. If you want to resolve the domains on the proxy + ## server, use `socks5h` as the scheme. + # + # proxy: + # http: http://: + # https: https://: + # no_proxy: + # - + # - + + ## @param skip_proxy - boolean - optional - default: false + ## If set to `true`, this makes the check bypass any proxy + ## settings enabled and attempt to reach services directly. + # + # skip_proxy: false + + ## @param timeout - number - optional - default: 10 + ## The timeout for connecting to services. + # + # timeout: 10 + + ## @param service - string - optional + ## Attach the tag `service:` to every metric, event, and service check emitted by this integration. + ## + ## Additionally, this sets the default `service` for every log source. + # + # service: + +## Every instance is scheduled independently of the others. +# +instances: + + ## @param openmetrics_endpoint - string - required + ## Endpoint exposing Milvus' Prometheus metrics. For more information, refer to + ## https://milvus.io/docs/monitor.md#Monitor-metrics-with-Prometheus. + # + - openmetrics_endpoint: http://localhost:9091/metrics + + ## @param raw_metric_prefix - string - optional + ## A prefix that is removed from all exposed metric names, if present. + ## All configuration options will use the prefix-less name. + # + # raw_metric_prefix: _ + + ## @param extra_metrics - (list of string or mapping) - optional + ## This list defines metrics to collect from the `openmetrics_endpoint`, in addition to + ## what the check collects by default. If the check already collects a metric, then + ## metric definitions here take precedence. Metrics may be defined in 3 ways: + ## + ## 1. If the item is a string, then it represents the exposed metric name, and + ## the sent metric name will be identical. For example: + ## + ## extra_metrics: + ## - + ## - + ## 2. If the item is a mapping, then the keys represent the exposed metric names. + ## + ## a. If a value is a string, then it represents the sent metric name. For example: + ## + ## extra_metrics: + ## - : + ## - : + ## b. If a value is a mapping, then it must have a `name` and/or `type` key. + ## The `name` represents the sent metric name, and the `type` represents how + ## the metric should be handled, overriding any type information the endpoint + ## may provide. For example: + ## + ## extra_metrics: + ## - : + ## name: + ## type: + ## - : + ## name: + ## type: + ## + ## The supported native types are `gauge`, `counter`, `histogram`, and `summary`. + ## + ## Note: To collect counter metrics with names ending in `_total`, specify the metric name without the `_total` + ## suffix. For example, to collect the counter metric `promhttp_metric_handler_requests_total`, specify + ## `promhttp_metric_handler_requests`. This submits to Datadog the metric name appended with `.count`. + ## For more information, see: + ## https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#suffixes + ## + ## Regular expressions may be used to match the exposed metric names, for example: + ## + ## extra_metrics: + ## - ^network_(ingress|egress)_.+ + ## - .+: + ## type: gauge + # + # extra_metrics: [] + + ## @param exclude_metrics - list of strings - optional + ## A list of metrics to exclude, with each entry being either + ## the exact metric name or a regular expression. + ## In order to exclude all metrics but the ones matching a specific filter, + ## you can use a negative lookahead regex like: + ## - ^(?!foo).*$ + # + # exclude_metrics: [] + + ## @param exclude_metrics_by_labels - mapping - optional + ## A mapping of labels to exclude metrics with matching label name and their corresponding metric values. To match + ## all values of a label, set it to `true`. + ## + ## Note: Label filtering happens before `rename_labels`. + ## + ## For example, the following configuration instructs the check to exclude all metrics with + ## a label `worker` or a label `pid` with the value of either `23` or `42`. + ## + ## exclude_metrics_by_labels: + ## worker: true + ## pid: + ## - '23' + ## - '42' + # + # exclude_metrics_by_labels: {} + + ## @param exclude_labels - list of strings - optional + ## A list of labels to exclude, useful for high cardinality values like timestamps or UUIDs. + ## May be used in conjunction with `include_labels`. + ## Labels defined in `exclude_labels` will take precedence in case of overlap. + ## + ## Note: Label filtering happens before `rename_labels`. + # + # exclude_labels: [] + + ## @param include_labels - list of strings - optional + ## A list of labels to include. May be used in conjunction with `exclude_labels`. + ## Labels defined in `exclude_labels` will take precedence in case of overlap. + ## + ## Note: Label filtering happens before `rename_labels`. + # + # include_labels: [] + + ## @param rename_labels - mapping - optional + ## A mapping of label names to their new names. + # + # rename_labels: + # : + # : + + ## @param enable_health_service_check - boolean - optional - default: true + ## Whether or not to send a service check named `.openmetrics.health` which reports + ## the health of the `openmetrics_endpoint`. + # + # enable_health_service_check: true + + ## @param ignore_connection_errors - boolean - optional - default: false + ## Whether or not to ignore connection errors when scraping `openmetrics_endpoint`. + # + # ignore_connection_errors: false + + ## @param hostname_label - string - optional + ## Override the hostname for every metric submission with the value of one of its labels. + # + # hostname_label: + + ## @param hostname_format - string - optional + ## When `hostname_label` is set, this instructs the check how to format the values. The string + ## `` is replaced by the value of the label defined by `hostname_label`. + # + # hostname_format: + + ## @param collect_histogram_buckets - boolean - optional - default: true + ## Whether or not to send histogram buckets. + # + # collect_histogram_buckets: true + + ## @param non_cumulative_histogram_buckets - boolean - optional - default: false + ## Whether or not histogram buckets are non-cumulative and to come with a `lower_bound` tag. + # + # non_cumulative_histogram_buckets: false + + ## @param histogram_buckets_as_distributions - boolean - optional - default: false + ## Whether or not to send histogram buckets as Datadog distribution metrics. This implicitly + ## enables the `collect_histogram_buckets` and `non_cumulative_histogram_buckets` options. + ## + ## Learn more about distribution metrics: + ## https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#metric-types + # + # histogram_buckets_as_distributions: false + + ## @param collect_counters_with_distributions - boolean - optional - default: false + ## Whether or not to also collect the observation counter metrics ending in `.sum` and `.count` + ## when sending histogram buckets as Datadog distribution metrics. This implicitly enables the + ## `histogram_buckets_as_distributions` option. + # + # collect_counters_with_distributions: false + + ## @param use_process_start_time - boolean - optional - default: false + ## Whether to enable a heuristic for reporting counter values on the first scrape. When true, + ## the first time an endpoint is scraped, check `process_start_time_seconds` to decide whether zero + ## initial value can be assumed for counters. This requires keeping metrics in memory until the entire + ## response is received. + # + # use_process_start_time: false + + ## @param share_labels - mapping - optional + ## This mapping allows for the sharing of labels across multiple metrics. The keys represent the + ## exposed metrics from which to share labels, and the values are mappings that configure the + ## sharing behavior. Each mapping must have at least one of the following keys: + ## + ## labels - This is a list of labels to share. All labels are shared if this is not set. + ## match - This is a list of labels to match on other metrics as a condition for sharing. + ## values - This is a list of allowed values as a condition for sharing. + ## + ## To unconditionally share all labels of a metric, set it to `true`. + ## + ## For example, the following configuration instructs the check to apply all labels from `metric_a` + ## to all other metrics, the `node` label from `metric_b` to only those metrics that have a `pod` + ## label value that matches the `pod` label value of `metric_b`, and all labels from `metric_c` + ## to all other metrics if their value is equal to `23` or `42`. + ## + ## share_labels: + ## metric_a: true + ## metric_b: + ## labels: + ## - node + ## match: + ## - pod + ## metric_c: + ## values: + ## - 23 + ## - 42 + # + # share_labels: {} + + ## @param cache_shared_labels - boolean - optional - default: true + ## When `share_labels` is set, it instructs the check to cache labels collected from the first payload + ## for improved performance. + ## + ## Set this to `false` to compute label sharing for every payload at the risk of potentially increased memory usage. + # + # cache_shared_labels: true + + ## @param raw_line_filters - list of strings - optional + ## A list of regular expressions used to exclude lines read from the `openmetrics_endpoint` + ## from being parsed. + # + # raw_line_filters: [] + + ## @param cache_metric_wildcards - boolean - optional - default: true + ## Whether or not to cache data from metrics that are defined by regular expressions rather + ## than the full metric name. + # + # cache_metric_wildcards: true + + ## @param telemetry - boolean - optional - default: false + ## Whether or not to submit metrics prefixed by `.telemetry.` for debugging purposes. + # + # telemetry: false + + ## @param ignore_tags - list of strings - optional + ## A list of regular expressions used to ignore tags added by Autodiscovery and entries in the `tags` option. + # + # ignore_tags: + # - + # - + # - + + ## @param proxy - mapping - optional + ## This overrides the `proxy` setting in `init_config`. + ## + ## Set HTTP or HTTPS proxies for this instance. Use the `no_proxy` list + ## to specify hosts that must bypass proxies. + ## + ## The SOCKS protocol is also supported, for example: + ## + ## socks5://user:pass@host:port + ## + ## Using the scheme `socks5` causes the DNS resolution to happen on the + ## client, rather than on the proxy server. This is in line with `curl`, + ## which uses the scheme to decide whether to do the DNS resolution on + ## the client or proxy. If you want to resolve the domains on the proxy + ## server, use `socks5h` as the scheme. + # + # proxy: + # http: http://: + # https: https://: + # no_proxy: + # - + # - + + ## @param skip_proxy - boolean - optional - default: false + ## This overrides the `skip_proxy` setting in `init_config`. + ## + ## If set to `true`, this makes the check bypass any proxy + ## settings enabled and attempt to reach services directly. + # + # skip_proxy: false + + ## @param auth_type - string - optional - default: basic + ## The type of authentication to use. The available types (and related options) are: + ## + ## - basic + ## |__ username + ## |__ password + ## |__ use_legacy_auth_encoding + ## - digest + ## |__ username + ## |__ password + ## - ntlm + ## |__ ntlm_domain + ## |__ password + ## - kerberos + ## |__ kerberos_auth + ## |__ kerberos_cache + ## |__ kerberos_delegate + ## |__ kerberos_force_initiate + ## |__ kerberos_hostname + ## |__ kerberos_keytab + ## |__ kerberos_principal + ## - aws + ## |__ aws_region + ## |__ aws_host + ## |__ aws_service + ## + ## The `aws` auth type relies on boto3 to automatically gather AWS credentials, for example: from `.aws/credentials`. + ## Details: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html#configuring-credentials + # + # auth_type: basic + + ## @param use_legacy_auth_encoding - boolean - optional - default: true + ## When `auth_type` is set to `basic`, this determines whether to encode as `latin1` rather than `utf-8`. + # + # use_legacy_auth_encoding: true + + ## @param username - string - optional + ## The username to use if services are behind basic or digest auth. + # + # username: + + ## @param password - string - optional + ## The password to use if services are behind basic or NTLM auth. + # + # password: + + ## @param ntlm_domain - string - optional + ## If your services use NTLM authentication, specify + ## the domain used in the check. For NTLM Auth, append + ## the username to domain, not as the `username` parameter. + # + # ntlm_domain: \ + + ## @param kerberos_auth - string - optional - default: disabled + ## If your services use Kerberos authentication, you can specify the Kerberos + ## strategy to use between: + ## + ## - required + ## - optional + ## - disabled + ## + ## See https://github.com/requests/requests-kerberos#mutual-authentication + # + # kerberos_auth: disabled + + ## @param kerberos_cache - string - optional + ## Sets the KRB5CCNAME environment variable. + ## It should point to a credential cache with a valid TGT. + # + # kerberos_cache: + + ## @param kerberos_delegate - boolean - optional - default: false + ## Set to `true` to enable Kerberos delegation of credentials to a server that requests delegation. + ## + ## See https://github.com/requests/requests-kerberos#delegation + # + # kerberos_delegate: false + + ## @param kerberos_force_initiate - boolean - optional - default: false + ## Set to `true` to preemptively initiate the Kerberos GSS exchange and + ## present a Kerberos ticket on the initial request (and all subsequent). + ## + ## See https://github.com/requests/requests-kerberos#preemptive-authentication + # + # kerberos_force_initiate: false + + ## @param kerberos_hostname - string - optional + ## Override the hostname used for the Kerberos GSS exchange if its DNS name doesn't + ## match its Kerberos hostname, for example: behind a content switch or load balancer. + ## + ## See https://github.com/requests/requests-kerberos#hostname-override + # + # kerberos_hostname: + + ## @param kerberos_principal - string - optional + ## Set an explicit principal, to force Kerberos to look for a + ## matching credential cache for the named user. + ## + ## See https://github.com/requests/requests-kerberos#explicit-principal + # + # kerberos_principal: + + ## @param kerberos_keytab - string - optional + ## Set the path to your Kerberos key tab file. + # + # kerberos_keytab: + + ## @param auth_token - mapping - optional + ## This allows for the use of authentication information from dynamic sources. + ## Both a reader and writer must be configured. + ## + ## The available readers are: + ## + ## - type: file + ## path (required): The absolute path for the file to read from. + ## pattern: A regular expression pattern with a single capture group used to find the + ## token rather than using the entire file, for example: Your secret is (.+) + ## - type: oauth + ## url (required): The token endpoint. + ## client_id (required): The client identifier. + ## client_secret (required): The client secret. + ## basic_auth: Whether the provider expects credentials to be transmitted in + ## an HTTP Basic Auth header. The default is: false + ## options: Mapping of additional options to pass to the provider, such as the audience + ## or the scope. For example: + ## options: + ## audience: https://example.com + ## scope: read:example + ## + ## The available writers are: + ## + ## - type: header + ## name (required): The name of the field, for example: Authorization + ## value: The template value, for example `Bearer `. The default is: + ## placeholder: The substring in `value` to replace with the token, defaults to: + # + # auth_token: + # reader: + # type: + # : + # : + # writer: + # type: + # : + # : + + ## @param aws_region - string - optional + ## If your services require AWS Signature Version 4 signing, set the region. + ## + ## See https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html + # + # aws_region: + + ## @param aws_host - string - optional + ## If your services require AWS Signature Version 4 signing, set the host. + ## This only needs the hostname and does not require the protocol (HTTP, HTTPS, and more). + ## For example, if connecting to https://us-east-1.amazonaws.com/, set `aws_host` to `us-east-1.amazonaws.com`. + ## + ## Note: This setting is not necessary for official integrations. + ## + ## See https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html + # + # aws_host: + + ## @param aws_service - string - optional + ## If your services require AWS Signature Version 4 signing, set the service code. For a list + ## of available service codes, see https://docs.aws.amazon.com/general/latest/gr/rande.html + ## + ## Note: This setting is not necessary for official integrations. + ## + ## See https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html + # + # aws_service: + + ## @param tls_verify - boolean - optional - default: true + ## Instructs the check to validate the TLS certificate of services. + # + # tls_verify: true + + ## @param tls_use_host_header - boolean - optional - default: false + ## If a `Host` header is set, this enables its use for SNI (matching against the TLS certificate CN or SAN). + # + # tls_use_host_header: false + + ## @param tls_ignore_warning - boolean - optional - default: false + ## If `tls_verify` is disabled, security warnings are logged by the check. + ## Disable those by setting `tls_ignore_warning` to true. + # + # tls_ignore_warning: false + + ## @param tls_cert - string - optional + ## The path to a single file in PEM format containing a certificate as well as any + ## number of CA certificates needed to establish the certificate's authenticity for + ## use when connecting to services. It may also contain an unencrypted private key to use. + # + # tls_cert: + + ## @param tls_private_key - string - optional + ## The unencrypted private key to use for `tls_cert` when connecting to services. This is + ## required if `tls_cert` is set and it does not already contain a private key. + # + # tls_private_key: + + ## @param tls_ca_cert - string - optional + ## The path to a file of concatenated CA certificates in PEM format or a directory + ## containing several CA certificates in PEM format. If a directory, the directory + ## must have been processed using the `openssl rehash` command. See: + ## https://www.openssl.org/docs/man3.2/man1/c_rehash.html + # + # tls_ca_cert: + + ## @param tls_protocols_allowed - list of strings - optional + ## The expected versions of TLS/SSL when fetching intermediate certificates. + ## Only `SSLv3`, `TLSv1.2`, `TLSv1.3` are allowed by default. The possible values are: + ## SSLv3 + ## TLSv1 + ## TLSv1.1 + ## TLSv1.2 + ## TLSv1.3 + # + # tls_protocols_allowed: + # - SSLv3 + # - TLSv1.2 + # - TLSv1.3 + + ## @param headers - mapping - optional + ## The headers parameter allows you to send specific headers with every request. + ## You can use it for explicitly specifying the host header or adding headers for + ## authorization purposes. + ## + ## This overrides any default headers. + # + # headers: + # Host: + # X-Auth-Token: + + ## @param extra_headers - mapping - optional + ## Additional headers to send with every request. + # + # extra_headers: + # Host: + # X-Auth-Token: + + ## @param timeout - number - optional - default: 10 + ## The timeout for accessing services. + ## + ## This overrides the `timeout` setting in `init_config`. + # + # timeout: 10 + + ## @param connect_timeout - number - optional + ## The connect timeout for accessing services. Defaults to `timeout`. + # + # connect_timeout: + + ## @param read_timeout - number - optional + ## The read timeout for accessing services. Defaults to `timeout`. + # + # read_timeout: + + ## @param request_size - number - optional - default: 16 + ## The number of kibibytes (KiB) to read from streaming HTTP responses at a time. + # + # request_size: 16 + + ## @param log_requests - boolean - optional - default: false + ## Whether or not to debug log the HTTP(S) requests made, including the method and URL. + # + # log_requests: false + + ## @param persist_connections - boolean - optional - default: false + ## Whether or not to persist cookies and use connection pooling for improved performance. + # + # persist_connections: false + + ## @param allow_redirects - boolean - optional - default: true + ## Whether or not to allow URL redirection. + # + # allow_redirects: true + + ## @param tags - list of strings - optional + ## A list of tags to attach to every metric and service check emitted by this instance. + ## + ## Learn more about tagging at https://docs.datadoghq.com/tagging + # + # tags: + # - : + # - : + + ## @param service - string - optional + ## Attach the tag `service:` to every metric, event, and service check emitted by this integration. + ## + ## Overrides any `service` defined in the `init_config` section. + # + # service: + + ## @param min_collection_interval - number - optional - default: 15 + ## This changes the collection interval of the check. For more information, see: + ## https://docs.datadoghq.com/developers/write_agent_check/#collection-interval + # + # min_collection_interval: 15 + + ## @param empty_default_hostname - boolean - optional - default: false + ## This forces the check to send metrics with no hostname. + ## + ## This is useful for cluster-level checks. + # + # empty_default_hostname: false + + ## @param metric_patterns - mapping - optional + ## A mapping of metrics to include or exclude, with each entry being a regular expression. + ## + ## Metrics defined in `exclude` will take precedence in case of overlap. + # + # metric_patterns: + # include: + # - + # exclude: + # - diff --git a/milvus/datadog_checks/milvus/metrics.py b/milvus/datadog_checks/milvus/metrics.py new file mode 100644 index 0000000000000..bd6a7ff744e59 --- /dev/null +++ b/milvus/datadog_checks/milvus/metrics.py @@ -0,0 +1,231 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +METRIC_MAP = { + 'milvus_build_info': {'type': 'metadata', 'label': 'version', 'name': 'version'}, + 'milvus_cgo_active_future_total': 'cgo.active_future_total', + 'milvus_cgo_cgo_duration_seconds': 'cgo.cgo_duration_seconds', + 'milvus_cgo_cgo_queue_duration_seconds': 'cgo.cgo_queue_duration_seconds', + 'milvus_cgo_running_cgo_call_total': 'cgo.running_cgo_call_total', + 'milvus_datacoord_channel_checkpoint_unix_seconds': { + 'name': 'datacoord.time_since_channel_checkpoint', + 'type': 'time_elapsed', + }, + 'milvus_datacoord_collection_num': 'datacoord.collection_num', + 'milvus_datacoord_consume_datanode_tt_lag_ms': 'datacoord.consume_datanode_tt_lag_ms', + 'milvus_datacoord_datanode_num': 'datacoord.datanode_num', + 'milvus_datacoord_import_tasks': 'datacoord.import_tasks', + 'milvus_datacoord_index_node_num': 'datacoord.index.node_num', + 'milvus_datacoord_index_req_count': 'datacoord.index.req', + 'milvus_datacoord_index_task_count': 'datacoord.index.task', + 'milvus_datacoord_segment_num': 'datacoord.segment_num', + 'milvus_datacoord_stored_binlog_size': 'datacoord.stored.binlog_size', + 'milvus_datacoord_stored_index_files_size': 'datacoord.stored.index_files_size', + 'milvus_datacoord_stored_rows_num': 'datacoord.stored.rows_num', + 'milvus_datacoord_task_execute_max_latency': 'datacoord.task_execute_max_latency', + 'milvus_datacoord_watched_dml_chanel_num': 'datacoord.watched_dml_chanel_num', + 'milvus_datanode_autoflush_buffer_op_count': 'datanode.autoflush_buffer_op', + 'milvus_datanode_consume_bytes_count': 'datanode.consume.bytes', + 'milvus_datanode_consume_msg_count': 'datanode.consume.msg', + 'milvus_datanode_consume_tt_lag_ms': 'datanode.consume.tt_lag_ms', + 'milvus_datanode_encode_buffer_latency': 'datanode.encode_buffer_latency', + 'milvus_datanode_flowgraph_num': 'datanode.flowgraph_num', + 'milvus_datanode_flush_buffer_op_count': 'datanode.flush.buffer_op', + 'milvus_datanode_flush_req_count': 'datanode.flush.req', + 'milvus_datanode_flushed_data_rows': 'datanode.flushed_data.rows', + 'milvus_datanode_flushed_data_size': 'datanode.flushed_data.size', + 'milvus_datanode_msg_dispatcher_tt_lag_ms': 'datanode.msg.dispatcher_tt_lag_ms', + 'milvus_datanode_msg_rows_count': 'datanode.msg.rows', + 'milvus_datanode_save_latency': 'datanode.save_latency', + 'milvus_flushed_segment_file_num': 'flushed_segment_file_num', + 'milvus_indexnode_build_index_latency': 'indexnode.build_index_latency', + 'milvus_indexnode_encode_index_latency': 'indexnode.encode_index_latency', + 'milvus_indexnode_index_task_count': 'indexnode.index.task', + 'milvus_indexnode_index_task_latency_in_queue': 'indexnode.index.task_latency_in_queue', + 'milvus_indexnode_knowhere_build_index_latency': 'indexnode.knowhere_build_index_latency', + 'milvus_indexnode_save_index_latency': 'indexnode.save_index_latency', + 'milvus_meta_kv_size': 'meta.kv_size', + 'milvus_meta_op_count': 'meta.op', + 'milvus_meta_request_latency': 'meta.request_latency', + 'milvus_msg_queue_consumer_num': 'msg_queue_consumer_num', + 'milvus_msgstream_op_count': 'msgstream.op', + 'milvus_msgstream_request_latency': 'msgstream.request_latency', + 'milvus_num_node': 'num_node', + 'milvus_proxy_apply_pk_latency': 'proxy.apply.pk_latency', + 'milvus_proxy_apply_timestamp_latency': 'proxy.apply.timestamp_latency', + 'milvus_proxy_assign_segmentID_latency': 'proxy.assign_segmentID_latency', + 'milvus_proxy_cache_hit_count': 'proxy.cache.hit', + 'milvus_proxy_cache_update_latency': 'proxy.cache.update_latency', + 'milvus_proxy_delete_vectors_count': 'proxy.delete_vectors', + 'milvus_proxy_msgstream_obj_num': 'proxy.msgstream_obj_num', + 'milvus_proxy_mutation_send_latency': 'proxy.mutation_send_latency', + 'milvus_proxy_rate_limit_req_count': 'proxy.rate_limit_req', + 'milvus_proxy_report_value': 'proxy.report_value', + 'milvus_proxy_req_count': 'proxy.req', + 'milvus_proxy_req_in_queue_latency': 'proxy.req.in_queue_latency', + 'milvus_proxy_req_latency': 'proxy.req.latency', + 'milvus_proxy_send_bytes_count': 'proxy.send_bytes', + 'milvus_proxy_sq_decode_result_latency': 'proxy.sq.decode_result_latency', + 'milvus_proxy_sq_reduce_result_latency': 'proxy.sq.reduce_result_latency', + 'milvus_proxy_sq_wait_result_latency': 'proxy.sq.wait_result_latency', + 'milvus_proxy_sync_segment_request_length': 'proxy.sync_segment_request_length', + 'milvus_proxy_tt_lag_ms': 'proxy.tt_lag_ms', + 'milvus_querycoord_collection_num': 'querycoord.collection_num', + 'milvus_querycoord_current_target_checkpoint_unix_seconds': { + 'name': 'querycoord.current_target_checkpoint_unix_seconds', + 'type': 'time_elapsed', + }, + 'milvus_querycoord_load_latency': 'querycoord.load.latency', + 'milvus_querycoord_load_req_count': 'querycoord.load.req', + 'milvus_querycoord_partition_num': 'querycoord.partition_num', + 'milvus_querycoord_querynode_num': 'querycoord.querynode_num', + 'milvus_querycoord_release_latency': 'querycoord.release.latency', + 'milvus_querycoord_release_req_count': 'querycoord.release.req', + 'milvus_querycoord_task_num': 'querycoord_task_num', + 'milvus_querynode_apply_bf_latency': 'querynode.apply_bf_latency', + 'milvus_querynode_collection_num': 'querynode.collection_num', + 'milvus_querynode_consume_bytes_counter': 'querynode.consume.bytes_counter', + 'milvus_querynode_consume_msg_count': 'querynode.consume.msg', + 'milvus_querynode_consume_tt_lag_ms': 'querynode.consume.tt_lag_ms', + 'milvus_querynode_disk_cache_evict_bytes': 'querynode.disk.cache.evict.bytes', + 'milvus_querynode_disk_cache_evict_duration': 'querynode.disk.cache.evict.duration', + 'milvus_querynode_disk_cache_evict_global_duration': 'querynode.disk.cache.evict.global_duration', + 'milvus_querynode_disk_cache_evict': 'querynode.disk.cache.evict', + 'milvus_querynode_disk_cache_load_bytes': 'querynode.disk.cache.load.bytes', + 'milvus_querynode_disk_cache_load_duration': 'querynode.disk.cache.load.duration', + 'milvus_querynode_disk_cache_load_global_duration': 'querynode.disk.cache.load.global_duration', + 'milvus_querynode_disk_cache_load': 'querynode.disk.cache.load', + 'milvus_querynode_disk_used_size': 'querynode.disk.used_size', + 'milvus_querynode_dml_vchannel_num': 'querynode.dml_vchannel_num', + 'milvus_querynode_entity_num': 'querynode.entity.num', + 'milvus_querynode_entity_size': 'querynode.entity.size', + 'milvus_querynode_execute_bytes_counter': 'querynode.execute_bytes_counter', + 'milvus_querynode_flowgraph_num': 'querynode.flowgraph_num', + 'milvus_querynode_forward_delete_latency': 'querynode.forward_delete_latency', + 'milvus_querynode_load_index_latency': 'querynode.load.index_latency', + 'milvus_querynode_load_segment_concurrency': 'querynode.load.segment.concurrency', + 'milvus_querynode_load_segment_latency': 'querynode.load.segment.latency', + 'milvus_querynode_msg_dispatcher_tt_lag_ms': 'querynode.msg_dispatcher_tt_lag_ms', + 'milvus_querynode_partition_num': 'querynode.partition_num', + 'milvus_querynode_process_insert_or_delete_latency': 'querynode.process_insert_or_delete_latency', + 'milvus_querynode_read_task_concurrency': 'querynode.read_task.concurrency', + 'milvus_querynode_read_task_ready_len': 'querynode.read_task.ready_len', + 'milvus_querynode_read_task_unsolved_len': 'querynode.read_task.unsolved_len', + 'milvus_querynode_search_group_nq': 'querynode.search.group.nq', + 'milvus_querynode_search_group_size': 'querynode.search.group.size', + 'milvus_querynode_search_group_topk': 'querynode.search.group.topk', + 'milvus_querynode_search_nq': 'querynode.search.nq', + 'milvus_querynode_search_topk': 'querynode.search.topk', + 'milvus_querynode_segment_access_duration': 'querynode.segment.access.duration', + 'milvus_querynode_segment_access_global_duration': 'querynode.segment.access.global_duration', + 'milvus_querynode_segment_access': 'querynode.segment.access', + 'milvus_querynode_segment_access_wait_cache_duration': 'querynode.segment.access.wait_cache.duration', + 'milvus_querynode_segment_access_wait_cache_global_duration': 'querynode.segment.access.wait_cache.global_duration', + 'milvus_querynode_segment_access_wait_cache': 'querynode.segment.access.wait_cache', + 'milvus_querynode_segment_latency_per_vector': 'querynode.segment.latency_per_vector', + 'milvus_querynode_segment_num': 'querynode.segment.num', + 'milvus_querynode_sq_core_latency': 'querynode.sq.core_latency', + 'milvus_querynode_sq_queue_latency': 'querynode.sq.queue.latency', + 'milvus_querynode_sq_queue_user_latency': 'querynode.sq.queue.user_latency', + 'milvus_querynode_sq_reduce_latency': 'querynode.sq.reduce_latency', + 'milvus_querynode_sq_req_count': 'querynode.sq.req', + 'milvus_querynode_sq_req_latency': 'querynode.sq.req.latency', + 'milvus_querynode_sq_segment_latency': 'querynode.sq.segment_latency', + 'milvus_querynode_sq_wait_tsafe_latency': 'querynode.sq.wait_tsafe_latency', + 'milvus_querynode_wait_processing_msg_count': 'querynode.wait_processing_msg', + 'milvus_querynode_watch_dml_channel_latency': 'querynode.watch_dml_channel_latency', + 'milvus_rootcoord_collection_num': 'rootcoord.collection_num', + 'milvus_rootcoord_credential_num': 'rootcoord.credential_num', + 'milvus_rootcoord_ddl_req_count': 'rootcoord.ddl_req', + 'milvus_rootcoord_ddl_req_latency': 'rootcoord.ddl_req.latency', + 'milvus_rootcoord_ddl_req_latency_in_queue': 'rootcoord.ddl_req.latency_in_queue', + 'milvus_rootcoord_dml_channel_num': 'rootcoord.dml_channel_num', + 'milvus_rootcoord_entity_num': 'rootcoord.entity_num', + 'milvus_rootcoord_force_deny_writing_counter': 'rootcoord.force_deny_writing_counter', + 'milvus_rootcoord_id_alloc_count': 'rootcoord.id_alloc', + 'milvus_rootcoord_indexed_entity_num': 'rootcoord.indexed_entity_num', + 'milvus_rootcoord_msgstream_obj_num': 'rootcoord.msgstream_obj_num', + 'milvus_rootcoord_num_of_roles': 'rootcoord.num_of_roles', + 'milvus_rootcoord_partition_num': 'rootcoord.partition_num', + 'milvus_rootcoord_produce_tt_lag_ms': 'rootcoord.produce_tt_lag_ms', + 'milvus_rootcoord_proxy_num': 'rootcoord.proxy_num', + 'milvus_rootcoord_qn_mem_high_water_level': 'rootcoord.qn_mem_high_water_level', + 'milvus_rootcoord_sync_timetick_latency': 'rootcoord.sync_timetick_latency', + 'milvus_rootcoord_timestamp': 'rootcoord.timestamp', + 'milvus_rootcoord_timestamp_saved': 'rootcoord.timestamp_saved', + 'milvus_runtime_info': 'runtime_info', + 'milvus_storage_kv_size': 'storage.kv_size', + 'milvus_storage_op_count': 'storage.op', + 'milvus_storage_request_latency': 'storage.request_latency', + 'bf_search_cnt': 'bf_search_cnt', + 'bitset_ratio': 'bitset_ratio', + 'build_latency': 'build_latency', + 'cache_hit_cnt': 'cache_hit_cnt', + 'diskann_range_search_iters': 'diskann.range_search_iters', + 'diskann_search_hops': 'diskann.search_hops', + 'diskann_bitset_ratio': 'diskann_bitset_ratio', + 'exec_latency': 'exec_latency', + 'filter_connectivity_ratio': 'filter.connectivity_ratio', + 'filter_mv_activated_fields_cnt': 'filter.mv.activated_fields_cnt', + 'filter_mv_change_base_cnt': 'filter.mv.change_base_cnt', + 'filter_mv_only_cnt': 'filter.mv.only_cnt', + 'filter_mv_supplement_ep_bool_cnt': 'filter.mv.supplement_ep_bool_cnt', + 'go_gc_duration_seconds': 'go.gc_duration_seconds', + 'go_goroutines': 'go.goroutines', + 'go_info': 'go.info', + 'go_memstats_alloc_bytes': {'name': 'go.memstats.alloc_bytes', 'type': 'native_dynamic'}, + 'go_memstats_buck_hash_sys_bytes': 'go.memstats.buck_hash_sys_bytes', + 'go_memstats_frees': 'go.memstats.frees', + 'go_memstats_gc_sys_bytes': 'go.memstats.gc_sys_bytes', + 'go_memstats_heap_alloc_bytes': 'go.memstats.heap.alloc_bytes', + 'go_memstats_heap_idle_bytes': 'go.memstats.heap.idle_bytes', + 'go_memstats_heap_inuse_bytes': 'go.memstats.heap.inuse_bytes', + 'go_memstats_heap_objects': 'go.memstats.heap.objects', + 'go_memstats_heap_released_bytes': 'go.memstats.heap.released_bytes', + 'go_memstats_heap_sys_bytes': 'go.memstats.heap.sys_bytes', + 'go_memstats_last_gc_time_seconds': 'go.memstats.last_gc_time_seconds', + 'go_memstats_lookups': 'go.memstats.lookups', + 'go_memstats_mallocs': 'go.memstats.mallocs', + 'go_memstats_mcache_inuse_bytes': 'go.memstats.mcache.inuse_bytes', + 'go_memstats_mcache_sys_bytes': 'go.memstats.mcache.sys_bytes', + 'go_memstats_mspan_inuse_bytes': 'go.memstats.mspan.inuse_bytes', + 'go_memstats_mspan_sys_bytes': 'go.memstats.mspan.sys_bytes', + 'go_memstats_next_gc_bytes': 'go.memstats.next_gc_bytes', + 'go_memstats_other_sys_bytes': 'go.memstats.other_sys_bytes', + 'go_memstats_stack_inuse_bytes': 'go.memstats.stack.inuse_bytes', + 'go_memstats_stack_sys_bytes': 'go.memstats.stack.sys_bytes', + 'go_memstats_sys_bytes': 'go.memstats.sys_bytes', + 'go_threads': 'go.threads', + 'graph_search_cnt': 'graph_search_cnt', + 'hnsw_bitset_ratio': 'hnsw.bitset_ratio', + 'hnsw_search_hops': 'hnsw.search_hops', + 'internal_core_search_latency': 'internal.core_search_latency', + 'internal_mmap_allocated_space_bytes': 'internal.mmap.allocated_space_bytes', + 'internal_mmap_in_used_space_bytes': 'internal.mmap.in_used_space_bytes', + 'internal_storage_kv_size': 'internal.storage.kv_size', + 'internal_storage_load_duration': 'internal.storage.load_duration', + 'internal_storage_op_count': 'internal.storage.op', + 'internal_storage_request_latency': 'internal.storage.request_latency', + 'io_cnt': 'io_cnt', + 'ivf_search_cnt': 'ivf_search_cnt', + 'load_latency': 'load_latency', + 'process_cpu_seconds': 'process.cpu_seconds', + 'process_max_fds': 'process.max_fds', + 'process_open_fds': 'process.open_fds', + 'process_resident_memory_bytes': 'process.resident_memory_bytes', + 'process_start_time_seconds': {'name': 'process.start_time_seconds', 'type': 'time_elapsed'}, + 'process_virtual_memory_bytes': 'process.virtual_memory.bytes', + 'process_virtual_memory_max_bytes': 'process.virtual_memory.max_bytes', + 'quant_compute_cnt': 'quant.compute_cnt', + 'queue_latency': 'queue.latency', + 'range_search_latency': 'range_search_latency', + 'raw_compute_cnt': 'raw_compute_cnt', + 're_search_cnt': 're_search_cnt', + 'search_latency': 'search.latency', + 'search_topk': 'search.topk', +} + +RENAME_LABELS_MAP = { + 'version': 'milvus_version', +} diff --git a/milvus/hatch.toml b/milvus/hatch.toml new file mode 100644 index 0000000000000..c85c5f07a7df2 --- /dev/null +++ b/milvus/hatch.toml @@ -0,0 +1,4 @@ +[env.collectors.datadog-checks] + +[[envs.default.matrix]] +python = ["3.12"] diff --git a/milvus/manifest.json b/milvus/manifest.json new file mode 100644 index 0000000000000..bf25c410ae353 --- /dev/null +++ b/milvus/manifest.json @@ -0,0 +1,51 @@ +{ + "manifest_version": "2.0.0", + "app_uuid": "38ddb395-6770-4b81-9730-e43cf4b4b2a0", + "app_id": "milvus", + "display_on_public_website": false, + "tile": { + "overview": "README.md#Overview", + "configuration": "README.md#Setup", + "support": "README.md#Support", + "changelog": "CHANGELOG.md", + "description": "Monitor the performance and usage of your Milvus deployments.", + "title": "Milvus", + "media": [], + "classifier_tags": [ + "Supported OS::Linux", + "Supported OS::Windows", + "Supported OS::macOS", + "Category::AI/ML", + "Offering::Integration", + "Submitted Data Type::Metrics", + "Submitted Data Type::Logs" + ] + }, + "assets": { + "integration": { + "auto_install": true, + "source_type_id": 30880529, + "source_type_name": "Milvus", + "configuration": { + "spec": "assets/configuration/spec.yaml" + }, + "events": { + "creates_events": false + }, + "metrics": { + "prefix": "milvus.", + "check": "milvus.build_info", + "metadata_path": "metadata.csv" + }, + "service_checks": { + "metadata_path": "assets/service_checks.json" + } + } + }, + "author": { + "support_email": "help@datadoghq.com", + "name": "Datadog", + "homepage": "https://www.datadoghq.com", + "sales_email": "info@datadoghq.com" + } +} diff --git a/milvus/metadata.csv b/milvus/metadata.csv new file mode 100644 index 0000000000000..a845480cf1521 --- /dev/null +++ b/milvus/metadata.csv @@ -0,0 +1,387 @@ +metric_name,metric_type,interval,unit_name,per_unit_name,description,orientation,integration,short_name,curated_metric,sample_tags +milvus.bf_search_cnt.bucket,count,,,,Histogram bucket for the number of brute-force searches per request,0,milvus,,, +milvus.bf_search_cnt.count,count,,,,Count aggregation of brute-force searches per request,0,milvus,,, +milvus.bf_search_cnt.sum,count,,,,Sum aggregation of brute-force searches per request,0,milvus,,, +milvus.bitset_ratio.bucket,count,,,,Bitset ratio histogram bucket,0,milvus,,, +milvus.bitset_ratio.count,count,,,,Count of bitset ratio calculations,0,milvus,,, +milvus.bitset_ratio.sum,count,,,,Sum of bitset ratios,0,milvus,,, +milvus.build_info,gauge,,,,Build information of milvus,0,milvus,,, +milvus.build_latency.bucket,count,,,,Index build latency histogram bucket,0,milvus,,, +milvus.build_latency.count,count,,,,Count of index builds,0,milvus,,, +milvus.build_latency.sum,count,,second,,Sum of index build latencies(s),0,milvus,,, +milvus.cache_hit_cnt.bucket,count,,,,Histogram bucket for the cache hit count per request,0,milvus,,, +milvus.cache_hit_cnt.count,count,,,,Count aggregation of cache hit count per request,0,milvus,,, +milvus.cache_hit_cnt.sum,count,,,,Sum aggregation of cache hit count per request,0,milvus,,, +milvus.cgo.active_future_total,gauge,,,,Total number of active futures.,0,milvus,,, +milvus.cgo.cgo_duration_seconds.bucket,count,,,,Histogram bucket of cgo call duration in seconds,0,milvus,,, +milvus.cgo.cgo_duration_seconds.count,count,,,,Count of cgo calls,0,milvus,,, +milvus.cgo.cgo_duration_seconds.sum,count,,second,,Sum of cgo call duration in seconds.,0,milvus,,, +milvus.cgo.cgo_queue_duration_seconds.bucket,count,,,,Histogram bucket for the duration of cgo calls in queue,0,milvus,,, +milvus.cgo.cgo_queue_duration_seconds.count,count,,,,Counnt of cgo calls in queue,0,milvus,,, +milvus.cgo.cgo_queue_duration_seconds.sum,count,,second,,Sum of the duration of cgo calls in queue,0,milvus,,, +milvus.cgo.running_cgo_call_total,gauge,,,,Total number of running cgo calls.,0,milvus,,, +milvus.datacoord.channel_checkpoint_unix_seconds,gauge,,,,Channel checkpoint timestamp in Unix seconds,0,milvus,,, +milvus.datacoord.collection_num,gauge,,,,Number of collections,0,milvus,,, +milvus.datacoord.consume_datanode_tt_lag_ms,gauge,,millisecond,,Now time minus time travel per physical channel,0,milvus,,, +milvus.datacoord.datanode_num,gauge,,,,Number of data nodes,0,milvus,,, +milvus.datacoord.import_tasks,gauge,,,,The import tasks grouping by type and state,0,milvus,,, +milvus.datacoord.index.node_num,gauge,,,,Number of IndexNodes managed by IndexCoord,0,milvus,,, +milvus.datacoord.index.req.count,count,,,,Number of building index requests,0,milvus,,, +milvus.datacoord.index.task,gauge,,,,Number of index tasks of each type,0,milvus,,, +milvus.datacoord.segment_num,gauge,,,,Number of segments,0,milvus,,, +milvus.datacoord.stored.binlog_size,gauge,,byte,,Binlog size of healthy segments,0,milvus,,, +milvus.datacoord.stored.index_files_size,gauge,,byte,,Index files size of the segments,0,milvus,,, +milvus.datacoord.stored.rows_num,gauge,,,,Number of stored rows of healthy segment,0,milvus,,, +milvus.datacoord.task_execute_max_latency.bucket,count,,,,Histogram bucket for the latency of task execute operation,0,milvus,,, +milvus.datacoord.task_execute_max_latency.count,count,,,,Count of task execute operations,0,milvus,,, +milvus.datacoord.task_execute_max_latency.sum,count,,millisecond,,Sum of the latency of task execute operations,0,milvus,,, +milvus.datacoord.watched_dml_chanel_num,gauge,,,,The num of data manipulation language channel watched by datanode,0,milvus,,, +milvus.datanode.autoflush_buffer_op.count,count,,,,Count of auto flush buffer operations,0,milvus,,, +milvus.datanode.consume.bytes.count,count,,,,Count of consumed bytes,0,milvus,,, +milvus.datanode.consume.msg.count,count,,,,Count of consumed msg,0,milvus,,, +milvus.datanode.consume.tt_lag_ms,gauge,,millisecond,,Now time minus time travel per physical channel,0,milvus,,, +milvus.datanode.encode_buffer_latency.bucket,count,,,,Histogram bucket for the latency of encode buffer data,0,milvus,,, +milvus.datanode.encode_buffer_latency.count,count,,,,Count of encode buffer data operations,0,milvus,,, +milvus.datanode.encode_buffer_latency.sum,count,,millisecond,,Sum of the latency of encode buffer data operations,0,milvus,,, +milvus.datanode.flowgraph_num,gauge,,,,Number of flowgraphs,0,milvus,,, +milvus.datanode.flush.buffer_op.count,count,,,,Count of flush buffer operations,0,milvus,,, +milvus.datanode.flush.req.count,count,,,,Count of flush request,0,milvus,,, +milvus.datanode.flushed_data.rows.count,count,,,,Num of rows flushed to storage,0,milvus,,, +milvus.datanode.flushed_data.size.count,count,,byte,,Byte size of data flushed to storage,0,milvus,,, +milvus.datanode.msg.dispatcher_tt_lag_ms,gauge,,millisecond,,time.Now() sub dispatcher's current consume time,0,milvus,,, +milvus.datanode.msg.rows.count,count,,,,Count of rows consumed from msgStream,0,milvus,,, +milvus.datanode.save_latency.bucket,count,,,,Histogram bucket for the latency of saving flush data to storage,0,milvus,,, +milvus.datanode.save_latency.count,count,,,,Count of saving flush data to storage events,0,milvus,,, +milvus.datanode.save_latency.sum,count,,millisecond,,Sum of the latency of saving flush data to storage events,0,milvus,,, +milvus.diskann.range_search_iters.bucket,count,,,,Histogram bucket for DISKANN range search iterations,0,milvus,,, +milvus.diskann.range_search_iters.count,count,,,,Count of DISKANN range search events,0,milvus,,, +milvus.diskann.range_search_iters.sum,count,,,,Sum of DISKANN range search iterations,0,milvus,,, +milvus.diskann.search_hops.bucket,count,,,,Histogram for DISKANN search hops,0,milvus,,, +milvus.diskann.search_hops.count,count,,,,Count of DISKANN search hop events,0,milvus,,, +milvus.diskann.search_hops.sum,count,,,,Sum of DISKANN search hops,0,milvus,,, +milvus.diskann_bitset_ratio.bucket,count,,,,Histogram bucket for the DISKANN bitset ratio for search and range search,0,milvus,,, +milvus.diskann_bitset_ratio.count,count,,,,Count of the DISKANN bitset ratio operations for search and range search,0,milvus,,, +milvus.diskann_bitset_ratio.sum,count,,,,Sum of the DISKANN bitset ratio for search and range search,0,milvus,,, +milvus.exec_latency.bucket,count,,,,Histogram bucket for the execute latency per request,0,milvus,,, +milvus.exec_latency.count,count,,,,Count aggregation of the execute latency per request,0,milvus,,, +milvus.exec_latency.sum,count,,,,Sum aggregation of the execute latency per request,0,milvus,,, +milvus.filter.connectivity_ratio.bucket,count,,,,Histogram bucket for the average connectivity ratio set under filtering per request,0,milvus,,, +milvus.filter.connectivity_ratio.count,count,,,,Count of the connectivity ratios set under filtering per request,0,milvus,,, +milvus.filter.connectivity_ratio.sum,count,,,,Sum of the average connectivity ratios set under filtering per request,0,milvus,,, +milvus.filter.mv.activated_fields_cnt.bucket,count,,,,Histogram bucket of the average materialized view activated fields per request,0,milvus,,, +milvus.filter.mv.activated_fields_cnt.count,count,,,,Count of the requests activating materialized view fields,0,milvus,,, +milvus.filter.mv.activated_fields_cnt.sum,count,,,,Sum of the average materialized view activated fields per request,0,milvus,,, +milvus.filter.mv.change_base_cnt.bucket,count,,,,Histogram bucket for the materialized view change base count per request,0,milvus,,, +milvus.filter.mv.change_base_cnt.count,count,,,,Count of requests triggering materialized view change base events,0,milvus,,, +milvus.filter.mv.change_base_cnt.sum,count,,,,Sum of materialized view change base counts per request,0,milvus,,, +milvus.filter.mv.only_cnt.bucket,count,,,,Histogram bucket for the materialized view only count per request,0,milvus,,, +milvus.filter.mv.only_cnt.count,count,,,,Count of requests that trigger materialized view only events,0,milvus,,, +milvus.filter.mv.only_cnt.sum,count,,,,Sum of materialized view only counts,0,milvus,,, +milvus.filter.mv.supplement_ep_bool_cnt.bucket,count,,,,Histogram bucket for materialized view supplement entry point from bitset boolean count per request,0,milvus,,, +milvus.filter.mv.supplement_ep_bool_cnt.count,count,,,,Count of requests triggering a materialized view supplement entry point from bitset boolean event,0,milvus,,, +milvus.filter.mv.supplement_ep_bool_cnt.sum,count,,,,Sum of materialized view supplement entry point from bitset boolean events,0,milvus,,, +milvus.flushed_segment_file_num.bucket,count,,,,Histogram bucket for the number of files for flushed segment,0,milvus,,, +milvus.flushed_segment_file_num.count,count,,,,Count of flushed segment events,0,milvus,,, +milvus.flushed_segment_file_num.sum,count,,,,Sum of the number of files for flushed segments,0,milvus,,, +milvus.go.gc_duration_seconds.count,count,,,,A summary of the pause duration of garbage collection cycles.,0,milvus,,, +milvus.go.gc_duration_seconds.quantile,gauge,,,,A summary of the pause duration of garbage collection cycles.,0,milvus,,, +milvus.go.gc_duration_seconds.sum,count,,,,A summary of the pause duration of garbage collection cycles.,0,milvus,,, +milvus.go.goroutines,gauge,,,,Number of goroutines that currently exist.,0,milvus,,, +milvus.go.info,gauge,,,,Information about the Go environment.,0,milvus,,, +milvus.go.memstats.alloc_bytes,gauge,,,,Number of bytes allocated and still in use.,0,milvus,,, +milvus.go.memstats.alloc_bytes.count,count,,,,"Total number of bytes allocated, even if freed.",0,milvus,,, +milvus.go.memstats.buck_hash_sys_bytes,gauge,,,,Number of bytes used by the profiling bucket hash table.,0,milvus,,, +milvus.go.memstats.frees.count,count,,,,Total number of frees.,0,milvus,,, +milvus.go.memstats.gc_sys_bytes,gauge,,,,Number of bytes used for garbage collection system metadata.,0,milvus,,, +milvus.go.memstats.heap.alloc_bytes,gauge,,,,Number of heap bytes allocated and still in use.,0,milvus,,, +milvus.go.memstats.heap.idle_bytes,gauge,,,,Number of heap bytes waiting to be used.,0,milvus,,, +milvus.go.memstats.heap.inuse_bytes,gauge,,,,Number of heap bytes that are in use.,0,milvus,,, +milvus.go.memstats.heap.objects,gauge,,,,Number of allocated objects.,0,milvus,,, +milvus.go.memstats.heap.released_bytes,gauge,,,,Number of heap bytes released to OS.,0,milvus,,, +milvus.go.memstats.heap.sys_bytes,gauge,,,,Number of heap bytes obtained from system.,0,milvus,,, +milvus.go.memstats.last_gc_time_seconds,gauge,,,,Number of seconds since 1970 of last garbage collection.,0,milvus,,, +milvus.go.memstats.lookups.count,count,,,,Total number of pointer lookups.,0,milvus,,, +milvus.go.memstats.mallocs.count,count,,,,Total number of mallocs.,0,milvus,,, +milvus.go.memstats.mcache.inuse_bytes,gauge,,,,Number of bytes in use by mcache structures.,0,milvus,,, +milvus.go.memstats.mcache.sys_bytes,gauge,,,,Number of bytes used for mcache structures obtained from system.,0,milvus,,, +milvus.go.memstats.mspan.inuse_bytes,gauge,,,,Number of bytes in use by mspan structures.,0,milvus,,, +milvus.go.memstats.mspan.sys_bytes,gauge,,,,Number of bytes used for mspan structures obtained from system.,0,milvus,,, +milvus.go.memstats.next_gc_bytes,gauge,,,,Number of heap bytes when next garbage collection will take place.,0,milvus,,, +milvus.go.memstats.other_sys_bytes,gauge,,,,Number of bytes used for other system allocations.,0,milvus,,, +milvus.go.memstats.stack.inuse_bytes,gauge,,,,Number of bytes in use by the stack allocator.,0,milvus,,, +milvus.go.memstats.stack.sys_bytes,gauge,,,,Number of bytes obtained from system for stack allocator.,0,milvus,,, +milvus.go.memstats.sys_bytes,gauge,,,,Number of bytes obtained from system.,0,milvus,,, +milvus.go.threads,gauge,,,,Number of OS threads created.,0,milvus,,, +milvus.graph_search_cnt.bucket,count,,,,Histogram bucket for the number of graph search per request,0,milvus,,, +milvus.graph_search_cnt.count,count,,,,Count of requests that trigger a graph search,0,milvus,,, +milvus.graph_search_cnt.sum,count,,,,Sum of graph searches,0,milvus,,, +milvus.hnsw.bitset_ratio.bucket,count,,,,Histogram bucket of the HNSW bitset ratio for search and range search,0,milvus,,, +milvus.hnsw.bitset_ratio.count,count,,,,Count aggregation of the HNSW bitset ratio for search and range search,0,milvus,,, +milvus.hnsw.bitset_ratio.sum,count,,,,Sum aggregation of the HNSW bitset ratio for search and range search,0,milvus,,, +milvus.hnsw.search_hops.bucket,count,,,,Histogram bucket of the HNSW search hops in layer 0,0,milvus,,, +milvus.hnsw.search_hops.count,count,,,,Count aggregation of the HNSW search hops in layer 0,0,milvus,,, +milvus.hnsw.search_hops.sum,count,,,,Sum aggregation of the HNSW search hops in layer 0,0,milvus,,, +milvus.indexnode.build_index_latency.bucket,count,,,,Histogram bucket of the latency of build index for segment,0,milvus,,, +milvus.indexnode.build_index_latency.count,count,,,,Count of build index events,0,milvus,,, +milvus.indexnode.build_index_latency.sum,count,,millisecond,,Sum of the latency of build index events,0,milvus,,, +milvus.indexnode.encode_index_latency.bucket,count,,,,Histogram bucket of the latency of encoding the index file,0,milvus,,, +milvus.indexnode.encode_index_latency.count,count,,,,Count of the index file encoding events,0,milvus,,, +milvus.indexnode.encode_index_latency.sum,count,,millisecond,,Sum of the latency of index file encoding events,0,milvus,,, +milvus.indexnode.index.task.count,count,,,,Number of tasks that index node received,0,milvus,,, +milvus.indexnode.index.task_latency_in_queue.bucket,count,,,,Histogram bucket for the latency of index task in queue,0,milvus,,, +milvus.indexnode.index.task_latency_in_queue.count,count,,,,Count of index task in queue,0,milvus,,, +milvus.indexnode.index.task_latency_in_queue.sum,count,,millisecond,,Sum of the latency of index task in queue,0,milvus,,, +milvus.indexnode.knowhere_build_index_latency.bucket,count,,,,Histogram bucket for the latency of building the index by knowhere,0,milvus,,, +milvus.indexnode.knowhere_build_index_latency.count,count,,,,Count of index builds by knowhere,0,milvus,,, +milvus.indexnode.knowhere_build_index_latency.sum,count,,millisecond,,Sum of the latency of building the index by knowhere,0,milvus,,, +milvus.indexnode.save_index_latency.bucket,count,,,,Histogram bucket for the latency of saving the index file,0,milvus,,, +milvus.indexnode.save_index_latency.count,count,,,,Count of index file save envents,0,milvus,,, +milvus.indexnode.save_index_latency.sum,count,,millisecond,,Sum of the latency of saving the index file,0,milvus,,, +milvus.internal.core_search_latency.bucket,count,,,,Histogram bucket for the [cpp]latency(us) of search on segment,0,milvus,,, +milvus.internal.core_search_latency.count,count,,,,Count aggregation of [cpp]latency(us) of search on segment,0,milvus,,, +milvus.internal.core_search_latency.sum,count,,microsecond,,Sum aggregation of [cpp]latency(us) of search on segment,0,milvus,,, +milvus.internal.mmap.allocated_space_bytes.bucket,count,,,,Histogram bucket for the [cpp]mmap allocated space stats,0,milvus,,, +milvus.internal.mmap.allocated_space_bytes.count,count,,,,Count aggregation of [cpp]mmap allocated space stats,0,milvus,,, +milvus.internal.mmap.allocated_space_bytes.sum,count,,,,Sum aggregation of [cpp]mmap allocated space stats,0,milvus,,, +milvus.internal.mmap.in_used_space_bytes,gauge,,,,[cpp]mmap in used space stats,0,milvus,,, +milvus.internal.storage.kv_size.bucket,count,,,,Histogram bucket for the [cpp]key-value size stats,0,milvus,,, +milvus.internal.storage.kv_size.count,count,,,,Count aggregation of [cpp]key-value size stats,0,milvus,,, +milvus.internal.storage.kv_size.sum,count,,,,Sum aggregation of [cpp]key-value size stats,0,milvus,,, +milvus.internal.storage.load_duration.bucket,count,,,,Histogram bucket for the [cpp]durations of load segment,0,milvus,,, +milvus.internal.storage.load_duration.count,count,,,,Count aggregation of [cpp]durations of load segment,0,milvus,,, +milvus.internal.storage.load_duration.sum,count,,,,Sum aggregation of [cpp]durations of load segment,0,milvus,,, +milvus.internal.storage.op.count,count,,,,[cpp]count of persistent data operation,0,milvus,,, +milvus.internal.storage.request_latency.bucket,count,,,,Histogram bucket for the [cpp]request latency(ms) on the client side,0,milvus,,, +milvus.internal.storage.request_latency.count,count,,,,Count aggregation of [cpp]request latency(ms) on the client side,0,milvus,,, +milvus.internal.storage.request_latency.sum,count,,millisecond,,Sum aggregation of [cpp]request latency(ms) on the client side,0,milvus,,, +milvus.io_cnt.bucket,count,,,,Histogram bucket for the IO count per request,0,milvus,,, +milvus.io_cnt.count,count,,,,Count of requests triggering IO operations,0,milvus,,, +milvus.io_cnt.sum,count,,,,Sum of IO operation counts,0,milvus,,, +milvus.ivf_search_cnt.bucket,count,,,,Histogram bucket for the number of inverted file search per request,0,milvus,,, +milvus.ivf_search_cnt.count,count,,,,Count of requests trigerring an inverted file search,0,milvus,,, +milvus.ivf_search_cnt.sum,count,,,,Sum of inverted file searches,0,milvus,,, +milvus.load_latency.bucket,count,,,,Histogram bucket for the index load latency (ms),0,milvus,,, +milvus.load_latency.count,count,,,,Count of index load events,0,milvus,,, +milvus.load_latency.sum,count,,millisecond,,Sum of index load latency (ms),0,milvus,,, +milvus.meta.kv_size.bucket,count,,,,Histogram for the key-value size stats,0,milvus,,, +milvus.meta.kv_size.count,count,,,,Count aggregation of the key-value size stats,0,milvus,,, +milvus.meta.kv_size.sum,count,,,,Sum aggregation of the key-value size stats,0,milvus,,, +milvus.meta.op.count,count,,,,Count of meta operation,0,milvus,,, +milvus.meta.request_latency.bucket,count,,,,Histogram bucket for the request latency on the client side,0,milvus,,, +milvus.meta.request_latency.count,count,,,,Count aggregation of the request latency on the client side,0,milvus,,, +milvus.meta.request_latency.sum,count,,millisecond,,Sum aggregation of the request latency on the client side,0,milvus,,, +milvus.msg_queue_consumer_num,gauge,,,,Number of consumers,0,milvus,,, +milvus.msgstream.op.count,count,,,,Count of stream message operation,0,milvus,,, +milvus.msgstream.request_latency.bucket,count,,,,Histogram bucket for the request latency on the client side,0,milvus,,, +milvus.msgstream.request_latency.count,count,,,,Count aggregation of the request latency on the client side,0,milvus,,, +milvus.msgstream.request_latency.sum,count,,millisecond,,Sum aggregation of the request latency on the client side,0,milvus,,, +milvus.num_node,gauge,,,,Number of nodes and coordinates,0,milvus,,, +milvus.process.cpu_seconds.count,count,,,,Total user and system CPU time spent in seconds.,0,milvus,,, +milvus.process.max_fds,gauge,,,,Maximum number of open file descriptors.,0,milvus,,, +milvus.process.open_fds,gauge,,,,Number of open file descriptors.,0,milvus,,, +milvus.process.resident_memory_bytes,gauge,,,,Resident memory size in bytes.,0,milvus,,, +milvus.process.start_time_seconds,gauge,,,,Start time of the process since Unix epoch in seconds.,0,milvus,,, +milvus.process.virtual_memory.bytes,gauge,,,,Virtual memory size in bytes.,0,milvus,,, +milvus.process.virtual_memory.max_bytes,gauge,,,,Maximum amount of virtual memory available in bytes.,0,milvus,,, +milvus.proxy.apply.pk_latency.bucket,count,,,,"Histogram bucket for the latency of 'apply primary key' events",0,milvus,,, +milvus.proxy.apply.pk_latency.count,count,,,,"Count of 'apply primary key' events",0,milvus,,, +milvus.proxy.apply.pk_latency.sum,count,,millisecond,,"Sum of latencies for 'apply primary key' events",0,milvus,,, +milvus.proxy.apply.timestamp_latency.bucket,count,,,,"Histogram bucket for the latency of proxy 'apply timestamp' events",0,milvus,,, +milvus.proxy.apply.timestamp_latency.count,count,,,,"Count of proxy 'apply timestamp' events",0,milvus,,, +milvus.proxy.apply.timestamp_latency.sum,count,,millisecond,,"Sum of latencies of proxy 'apply timestamp' events",0,milvus,,, +milvus.proxy.assign_segmentID_latency.bucket,count,,,,"Histogram bucket for the latency of proxy 'get segmentID from dataCoord' events",0,milvus,,, +milvus.proxy.assign_segmentID_latency.count,count,,,,"Count of proxy 'get segmentID from dataCoord' events",0,milvus,,, +milvus.proxy.assign_segmentID_latency.sum,count,,millisecond,,"Sum of latencies for proxy 'get segmentID from dataCoord' events",0,milvus,,, +milvus.proxy.cache.hit.count,count,,,,Count of cache hits/miss,0,milvus,,, +milvus.proxy.cache.update_latency.bucket,count,,,,"Histogram bucket for the latency of proxy 'update cache when cache miss' events",0,milvus,,, +milvus.proxy.cache.update_latency.count,count,,,,"Count of proxy 'update cache when cache miss' events",0,milvus,,, +milvus.proxy.cache.update_latency.sum,count,,millisecond,,"Sum of latencies of proxy 'update cache when cache miss'",0,milvus,,, +milvus.proxy.delete_vectors.count,count,,,,Counter of vectors successfully deleted,0,milvus,,, +milvus.proxy.msgstream_obj_num,gauge,,,,Number of MsgStream objects per physical channel,0,milvus,,, +milvus.proxy.mutation_send_latency.bucket,count,,,,"Histogram bucket for the latency of proxy 'send insert request to MsgStream' events",0,milvus,,, +milvus.proxy.mutation_send_latency.count,count,,,,"Count of proxy 'send insert request to MsgStream' events",0,milvus,,, +milvus.proxy.mutation_send_latency.sum,count,,millisecond,,"Sum of latencies of proxy 'send insert request to MsgStream' events",0,milvus,,, +milvus.proxy.rate_limit_req.count,count,,,,Count of operation executed,0,milvus,,, +milvus.proxy.report_value.count,count,,,,Report value about the request,0,milvus,,, +milvus.proxy.req.count,count,,,,Count of operation executed,0,milvus,,, +milvus.proxy.req.in_queue_latency.bucket,count,,,,Histogram bucket for the time in queue of request,0,milvus,,, +milvus.proxy.req.in_queue_latency.count,count,,,,Count of request in queue,0,milvus,,, +milvus.proxy.req.in_queue_latency.sum,count,,millisecond,,Sum of the time requests spend in queue,0,milvus,,, +milvus.proxy.req.latency.bucket,count,,,,Histogram bucket for the latency of each request,0,milvus,,, +milvus.proxy.req.latency.count,count,,,,Count of request latency events,0,milvus,,, +milvus.proxy.req.latency.sum,count,,millisecond,,Sum of latencies of requests,0,milvus,,, +milvus.proxy.send_bytes.count,count,,,,Count of bytes sent back to sdk,0,milvus,,, +milvus.proxy.sq.decode_result_latency.bucket,count,,,,"Histogram bucket for the latency of proxy 'decode the search result' events",0,milvus,,, +milvus.proxy.sq.decode_result_latency.count,count,,,,"Count of proxy 'decode the search result' events",0,milvus,,, +milvus.proxy.sq.decode_result_latency.sum,count,,millisecond,,"Sum of latencies for proxy 'decode the search result' events",0,milvus,,, +milvus.proxy.sq.reduce_result_latency.bucket,count,,,,"Histogram bucket for the latency of proxy 'reduces search result' events",0,milvus,,, +milvus.proxy.sq.reduce_result_latency.count,count,,,,"Count of proxy 'reduces search result' events",0,milvus,,, +milvus.proxy.sq.reduce_result_latency.sum,count,,millisecond,,"Sum of latencies for proxy 'reduces search result' events",0,milvus,,, +milvus.proxy.sq.wait_result_latency.bucket,count,,,,"Histogram bucket for the latency of proxy 'waits for the result' events",0,milvus,,, +milvus.proxy.sq.wait_result_latency.count,count,,,,"Count of proxy 'waits for the result' events",0,milvus,,, +milvus.proxy.sq.wait_result_latency.sum,count,,millisecond,,"Sum of latencies for proxy 'waits for the result' events",0,milvus,,, +milvus.proxy.sync_segment_request_length.bucket,count,,,,Histogram bucket for the length of SegmentIDRequests when assigning segments for insert,0,milvus,,, +milvus.proxy.sync_segment_request_length.count,count,,,,Count of assigning segments for insert events,0,milvus,,, +milvus.proxy.sync_segment_request_length.sum,count,,,,Sum of the length of SegmentIDRequests when assigning segments for insert,0,milvus,,, +milvus.proxy.tt_lag_ms,gauge,,millisecond,,Now time minus time travel per physical channel,0,milvus,,, +milvus.quant.compute_cnt.bucket,count,,,,Histogram bucket for the quant compute count per request,0,milvus,,, +milvus.quant.compute_cnt.count,count,,,,Count of requests triggering quant compute events,0,milvus,,, +milvus.quant.compute_cnt.sum,count,,,,Sum of quant compute event counts,0,milvus,,, +milvus.querycoord.collection_num,gauge,,,,Number of collections,0,milvus,,, +milvus.querycoord.current_target_checkpoint_unix_seconds,gauge,,,,Current target checkpoint timestamp in Unix seconds,0,milvus,,, +milvus.querycoord.load.latency.bucket,count,,,,Histogram bucket Latency of load the entire collection,0,milvus,,, +milvus.querycoord.load.latency.count,count,,,,Latency of load the entire collection,0,milvus,,, +milvus.querycoord.load.latency.sum,count,,millisecond,,Latency of load the entire collection,0,milvus,,, +milvus.querycoord.load.req.count,count,,,,Count of load request,0,milvus,,, +milvus.querycoord.partition_num,gauge,,,,Number of partitions,0,milvus,,, +milvus.querycoord.querynode_num,gauge,,,,Number of QueryNodes managered by QueryCoord,0,milvus,,, +milvus.querycoord.release.latency.bucket,count,,,,Histogram bucket for the latency of release request,0,milvus,,, +milvus.querycoord.release.latency.count,count,,,,Count of release request events,0,milvus,,, +milvus.querycoord.release.latency.sum,count,,millisecond,,Sum of the latencies of release request events,0,milvus,,, +milvus.querycoord.release.req.count,count,,,,Count of release request,0,milvus,,, +milvus.querycoord_task_num,gauge,,,,the number of tasks in QueryCoord's scheduler,0,milvus,,, +milvus.querynode.apply_bf_latency.bucket,count,,,,Histogram bucket for the apply brute-force cost in ms,0,milvus,,, +milvus.querynode.apply_bf_latency.count,count,,,,Count of apply brute-force events,0,milvus,,, +milvus.querynode.apply_bf_latency.sum,count,,millisecond,,Sum of the apply brute-force cost in ms,0,milvus,,, +milvus.querynode.collection_num,gauge,,,,Number of collections loaded,0,milvus,,, +milvus.querynode.consume.bytes_counter.count,count,,,,Count of consumed bytes,0,milvus,,, +milvus.querynode.consume.msg.count,count,,,,Count of consumed msg,0,milvus,,, +milvus.querynode.consume.tt_lag_ms,gauge,,millisecond,,Now time minus time travel per physical channel,0,milvus,,, +milvus.querynode.disk.cache.evict.bytes.count,count,,byte,,Number of bytes evicted from disk cache,0,milvus,,, +milvus.querynode.disk.cache.evict.count,count,,,,Number of segments evicted from disk cache,0,milvus,,, +milvus.querynode.disk.cache.evict.duration.count,count,,,,Total time cost of evicting segments from disk cache,0,milvus,,, +milvus.querynode.disk.cache.evict.global_duration.bucket,count,,,,Histogram bucket for the global duration of evicting segments from disk cache,0,milvus,,, +milvus.querynode.disk.cache.evict.global_duration.count,count,,,,Count of evicting segments from disk cache events,0,milvus,,, +milvus.querynode.disk.cache.evict.global_duration.sum,count,,millisecond,,Sum of the global duration of evicting segments from disk cache,0,milvus,,, +milvus.querynode.disk.cache.load.bytes.count,count,,byte,,Number of bytes loaded from disk cache,0,milvus,,, +milvus.querynode.disk.cache.load.count,count,,,,Number of segments loaded from disk cache,0,milvus,,, +milvus.querynode.disk.cache.load.duration.count,count,,,,Total time cost of loading segments from disk cache,0,milvus,,, +milvus.querynode.disk.cache.load.global_duration.bucket,count,,,,Histogram bucket for the global duration of loading segments from disk cache,0,milvus,,, +milvus.querynode.disk.cache.load.global_duration.count,count,,,,Count of loading segments from disk cache events,0,milvus,,, +milvus.querynode.disk.cache.load.global_duration.sum,count,,millisecond,,Sum of the global duration of loading segments from disk cache,0,milvus,,, +milvus.querynode.disk.used_size,gauge,,mebibyte,,Disk used size(MB),0,milvus,,, +milvus.querynode.dml_vchannel_num,gauge,,,,Number of data manipulation language channels watched,0,milvus,,, +milvus.querynode.entity.num,gauge,,,,"Number of entities which can be searched/queried, clustered by collection, partition and state",0,milvus,,, +milvus.querynode.entity.size,gauge,,byte,,"Entities' memory size, clustered by collection and state",0,milvus,,, +milvus.querynode.execute_bytes_counter.count,count,,,,Count of execute bytes,0,milvus,,, +milvus.querynode.flowgraph_num,gauge,,,,Number of flowgraphs,0,milvus,,, +milvus.querynode.forward_delete_latency.bucket,count,,,,Histogram bucket for the forward delete cost in ms,0,milvus,,, +milvus.querynode.forward_delete_latency.count,count,,,,Count of forward delete events,0,milvus,,, +milvus.querynode.forward_delete_latency.sum,count,,millisecond,,Sum of the forward delete cost in ms,0,milvus,,, +milvus.querynode.load.index_latency.bucket,count,,,,"Histogram bucket for the latency of load per segment's index, in milliseconds",0,milvus,,, +milvus.querynode.load.index_latency.count,count,,,,"Count of load per segment's index events",0,milvus,,, +milvus.querynode.load.index_latency.sum,count,,millisecond,,"Sum of the latencies of load per segment's index events, in milliseconds",0,milvus,,, +milvus.querynode.load.segment.concurrency,gauge,,,,Number of concurrent loading segments in QueryNode,0,milvus,,, +milvus.querynode.load.segment.latency.bucket,count,,,,Histogram bucket for the latency of load per segment,0,milvus,,, +milvus.querynode.load.segment.latency.count,count,,,,Count of load per segment events,0,milvus,,, +milvus.querynode.load.segment.latency.sum,count,,millisecond,,Sum of the latencies of load per segment events,0,milvus,,, +milvus.querynode.msg_dispatcher_tt_lag_ms,gauge,,millisecond,,time.Now() sub dispatcher's current consume time,0,milvus,,, +milvus.querynode.partition_num,gauge,,,,Number of partitions loaded,0,milvus,,, +milvus.querynode.process_insert_or_delete_latency.bucket,count,,,,Histogram bucket for the process insert or delete cost in ms,0,milvus,,, +milvus.querynode.process_insert_or_delete_latency.count,count,,,,Count of process insert or delete events,0,milvus,,, +milvus.querynode.process_insert_or_delete_latency.sum,count,,millisecond,,Sum of process insert or delete cost in ms,0,milvus,,, +milvus.querynode.read_task.concurrency,gauge,,,,Number of concurrent executing read tasks in QueryNode,0,milvus,,, +milvus.querynode.read_task.ready_len,gauge,,,,Number of ready read tasks in readyQueue,0,milvus,,, +milvus.querynode.read_task.unsolved_len,gauge,,,,Number of unsolved read tasks in unsolvedQueue,0,milvus,,, +milvus.querynode.search.group.nq.bucket,count,,,,Histogram bucket for the number of queries of each grouped search task,0,milvus,,, +milvus.querynode.search.group.nq.count,count,,,,Count of grouped search tasks,0,milvus,,, +milvus.querynode.search.group.nq.sum,count,,,,Sum of the number of queries of grouped search tasks,0,milvus,,, +milvus.querynode.search.group.size.bucket,count,,,,Histogram bucket for the number of tasks of each grouped search task,0,milvus,,, +milvus.querynode.search.group.size.count,count,,,,Count of grouped search tasks,0,milvus,,, +milvus.querynode.search.group.size.sum,count,,,,Sum of the number of tasks of grouped search tasks,0,milvus,,, +milvus.querynode.search.group.topk.bucket,count,,,,Histogram bucket for the topK of each grouped search task,0,milvus,,, +milvus.querynode.search.group.topk.count,count,,,,Count of grouped search tasks,0,milvus,,, +milvus.querynode.search.group.topk.sum,count,,,,Sum of the topK of grouped search tasks,0,milvus,,, +milvus.querynode.search.nq.bucket,count,,,,Histogram bucket for the number of queries of each search task,0,milvus,,, +milvus.querynode.search.nq.count,count,,,,Count of search tasks,0,milvus,,, +milvus.querynode.search.nq.sum,count,,,,Sum of the number of queries of search tasks,0,milvus,,, +milvus.querynode.search.topk.bucket,count,,,,Histogram bucket for the top of each search task,0,milvus,,, +milvus.querynode.search.topk.count,count,,,,Count of search tasks,0,milvus,,, +milvus.querynode.search.topk.sum,count,,,,Sum of the top of search tasks,0,milvus,,, +milvus.querynode.segment.access.count,count,,,,Number of segments accessed,0,milvus,,, +milvus.querynode.segment.access.duration.count,count,,,,Total time cost of accessing segments,0,milvus,,, +milvus.querynode.segment.access.global_duration.bucket,count,,,,Histogram bucket for the global time cost of accessing segments,0,milvus,,, +milvus.querynode.segment.access.global_duration.count,count,,,,Count of accessing segments events,0,milvus,,, +milvus.querynode.segment.access.global_duration.sum,count,,millisecond,,Sum of the global time cost of accessing segments,0,milvus,,, +milvus.querynode.segment.access.wait_cache.count,count,,,,Number of segments waiting for loading access,0,milvus,,, +milvus.querynode.segment.access.wait_cache.duration.count,count,,,,Total time cost of waiting for loading access,0,milvus,,, +milvus.querynode.segment.access.wait_cache.global_duration.bucket,count,,,,Histogram bucket for the global time cost of waiting for loading access,0,milvus,,, +milvus.querynode.segment.access.wait_cache.global_duration.count,count,,,,Count of waiting for loading access events,0,milvus,,, +milvus.querynode.segment.access.wait_cache.global_duration.sum,count,,millisecond,,Sum of the global time cost of waiting for loading access events,0,milvus,,, +milvus.querynode.segment.latency_per_vector.bucket,count,,,,Histogram bucket for one vector's search latency per segment,0,milvus,,, +milvus.querynode.segment.latency_per_vector.count,count,,,,Count aggregation of one vector's search latency per segment,0,milvus,,, +milvus.querynode.segment.latency_per_vector.sum,count,,millisecond,,Sum aggregation of one vector's search latency per segment,0,milvus,,, +milvus.querynode.segment.num,gauge,,,,"Number of segments loaded, clustered by its collection, partition, state and # of indexed fields",0,milvus,,, +milvus.querynode.sq.core_latency.bucket,count,,,,Histogram bucket for the latency of searches or queries in segcore,0,milvus,,, +milvus.querynode.sq.core_latency.count,count,,,,Count of search or query events in segcore,0,milvus,,, +milvus.querynode.sq.core_latency.sum,count,,millisecond,,Sum of latencies of searches or queries in segcore,0,milvus,,, +milvus.querynode.sq.queue.latency.bucket,count,,,,Histogram bucket for the latency of searches or queries in queue,0,milvus,,, +milvus.querynode.sq.queue.latency.count,count,,,,Count of search or query events,0,milvus,,, +milvus.querynode.sq.queue.latency.sum,count,,millisecond,,Sum of latencies of searches or queries in queue,0,milvus,,, +milvus.querynode.sq.queue.user_latency.bucket,count,,,,Histgram bucket for the latency per user of search or query in queue,0,milvus,,, +milvus.querynode.sq.queue.user_latency.count,count,,,,Count of search or query in queue,0,milvus,,, +milvus.querynode.sq.queue.user_latency.sum,count,,millisecond,,Sum of latencies per user of search or query in queue,0,milvus,,, +milvus.querynode.sq.reduce_latency.bucket,count,,,,Histogram bucket for the latencies of reduce search or query result,0,milvus,,, +milvus.querynode.sq.reduce_latency.count,count,,,,Count of reduce search or query result events,0,milvus,,, +milvus.querynode.sq.reduce_latency.sum,count,,millisecond,,Sum of latencies of reduce search or query result,0,milvus,,, +milvus.querynode.sq.req.count,count,,,,count of search / query request,0,milvus,,, +milvus.querynode.sq.req.latency.bucket,count,,,,Histogram bucket for the latency of search or query requests,0,milvus,,, +milvus.querynode.sq.req.latency.count,count,,,,Count of search or query requests,0,milvus,,, +milvus.querynode.sq.req.latency.sum,count,,millisecond,,Sum of latencies of search or query requests,0,milvus,,, +milvus.querynode.sq.segment_latency.bucket,count,,,,Histogram bucket for the latency of search or query per segment,0,milvus,,, +milvus.querynode.sq.segment_latency.count,count,,,,Count of search or query per segment,0,milvus,,, +milvus.querynode.sq.segment_latency.sum,count,,millisecond,,Sum of latencies of search or query per segment,0,milvus,,, +milvus.querynode.sq.wait_tsafe_latency.bucket,count,,,,Histogram bucket for the time that search or query waits for tsafe,0,milvus,,, +milvus.querynode.sq.wait_tsafe_latency.count,count,,,,Count of occurrences of search or query waiting for tsafe,0,milvus,,, +milvus.querynode.sq.wait_tsafe_latency.sum,count,,millisecond,,Sum of the time that search or query wait for tsafe,0,milvus,,, +milvus.querynode.wait_processing_msg,gauge,,,,Count of wait processing msg,0,milvus,,, +milvus.querynode.watch_dml_channel_latency.bucket,count,,,,Histogram bucket for the latencies of watch data manipulation language channel,0,milvus,,, +milvus.querynode.watch_dml_channel_latency.count,count,,,,Count of watch data manipulation language channel events,0,milvus,,, +milvus.querynode.watch_dml_channel_latency.sum,count,,millisecond,,Sum of the latencies of watch data manipulation language channel events,0,milvus,,, +milvus.queue.latency.bucket,count,,,,Histogram bucket for the queue latency per request,0,milvus,,, +milvus.queue.latency.count,count,,,,Count of request with queue latency,0,milvus,,, +milvus.queue.latency.sum,count,,,,Sum of queue latencies,0,milvus,,, +milvus.range_search_latency.bucket,count,,,,Histogram bucket for range search latency (ms),0,milvus,,, +milvus.range_search_latency.count,count,,,,Count of range search operations,0,milvus,,, +milvus.range_search_latency.sum,count,,millisecond,,Sum of range search latencies (ms),0,milvus,,, +milvus.raw_compute_cnt.bucket,count,,,,Histogram bucket for the raw compute count per request,0,milvus,,, +milvus.raw_compute_cnt.count,count,,,,Count of request that trigger raw compute operations,0,milvus,,, +milvus.raw_compute_cnt.sum,count,,,,Sum of raw compute operations,0,milvus,,, +milvus.re_search_cnt.bucket,count,,,,Histogram bucket for the number of fallback search per request,0,milvus,,, +milvus.re_search_cnt.count,count,,,,Count of requests triggering fallback search operations,0,milvus,,, +milvus.re_search_cnt.sum,count,,,,Sum of fallback searches,0,milvus,,, +milvus.rootcoord.collection_num,gauge,,,,Number of collections,0,milvus,,, +milvus.rootcoord.credential_num,gauge,,,,Number of credentials,0,milvus,,, +milvus.rootcoord.ddl_req.count,count,,,,Count of DDL operations,0,milvus,,, +milvus.rootcoord.ddl_req.latency.bucket,count,,,,Histogram bucket for the latency of each DDL operation,0,milvus,,, +milvus.rootcoord.ddl_req.latency.count,count,,,,Count of DDL operations,0,milvus,,, +milvus.rootcoord.ddl_req.latency.sum,count,,millisecond,,Sum of the latencies of DDL operations,0,milvus,,, +milvus.rootcoord.ddl_req.latency_in_queue.bucket,count,,,,Histogram bucket for the latencies of each DDL operation in queue,0,milvus,,, +milvus.rootcoord.ddl_req.latency_in_queue.count,count,,,,Count of DDL operations in queue,0,milvus,,, +milvus.rootcoord.ddl_req.latency_in_queue.sum,count,,millisecond,,Sum of the latencies of DDL operations in queue,0,milvus,,, +milvus.rootcoord.dml_channel_num,gauge,,,,Number of DML channels,0,milvus,,, +milvus.rootcoord.entity_num,gauge,,,,"Number of entities, clustered by collection and their status(loaded/total)",0,milvus,,, +milvus.rootcoord.force_deny_writing_counter.count,count,,,,The number of times milvus turns into force-deny-writing states,0,milvus,,, +milvus.rootcoord.id_alloc.count,count,,,,Count of ID allocated,0,milvus,,, +milvus.rootcoord.indexed_entity_num,gauge,,,,"Indexed number of entities, clustered by collection, index name and whether it's a vector index",0,milvus,,, +milvus.rootcoord.msgstream_obj_num,gauge,,,,Number of message streams,0,milvus,,, +milvus.rootcoord.num_of_roles,gauge,,,,The number of roles,0,milvus,,, +milvus.rootcoord.partition_num,gauge,,,,Number of partitions,0,milvus,,, +milvus.rootcoord.produce_tt_lag_ms,gauge,,millisecond,,Now time minus time travel per physical channel,0,milvus,,, +milvus.rootcoord.proxy_num,gauge,,,,Number of proxy nodes managered by rootcoord,0,milvus,,, +milvus.rootcoord.qn_mem_high_water_level,gauge,,,,Querynode memory high water level,0,milvus,,, +milvus.rootcoord.sync_timetick_latency.bucket,count,,,,Histogram bucket for the latencies of synchronizing timetick message,0,milvus,,, +milvus.rootcoord.sync_timetick_latency.count,count,,,,Count of synchronizing timetick message events,0,milvus,,, +milvus.rootcoord.sync_timetick_latency.sum,count,,millisecond,,Sum of latencies of synchronizing timetick message events,0,milvus,,, +milvus.rootcoord.timestamp,gauge,,,,Latest timestamp allocated in memory,0,milvus,,, +milvus.rootcoord.timestamp_saved,gauge,,,,Timestamp saved in meta storage,0,milvus,,, +milvus.runtime_info,gauge,,,,Runtime information of Milvus,0,milvus,,, +milvus.search.latency.bucket,count,,,,Histogram bucket for search latencies (ms),0,milvus,,, +milvus.search.latency.count,count,,,,Count of search events,0,milvus,,, +milvus.search.latency.sum,count,,millisecond,,Sum of search latencies (ms),0,milvus,,, +milvus.search.topk.bucket,count,,,,Histogram bucket for search topk,0,milvus,,, +milvus.search.topk.count,count,,,,Count aggregation of search topk,0,milvus,,, +milvus.search.topk.sum,count,,,,Sum aggregation of search topk,0,milvus,,, +milvus.storage.kv_size,gauge,,,,Key-value size stats,0,milvus,,, +milvus.storage.op.count,count,,,,Count of persistent data operation,0,milvus,,, +milvus.storage.request_latency,gauge,,,,Request latency on the client side,0,milvus,,, diff --git a/milvus/pyproject.toml b/milvus/pyproject.toml new file mode 100644 index 0000000000000..7f94dbf91c0d2 --- /dev/null +++ b/milvus/pyproject.toml @@ -0,0 +1,60 @@ +[build-system] +requires = [ + "hatchling>=0.13.0", +] +build-backend = "hatchling.build" + +[project] +name = "datadog-milvus" +description = "The Milvus check" +readme = "README.md" +license = "BSD-3-Clause" +requires-python = ">=3.11" +keywords = [ + "datadog", + "datadog agent", + "datadog check", + "milvus", +] +authors = [ + { name = "Datadog", email = "packages@datadoghq.com" }, +] +classifiers = [ + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "Intended Audience :: System Administrators", + "License :: OSI Approved :: BSD License", + "Private :: Do Not Upload", + "Programming Language :: Python :: 3.11", + "Topic :: System :: Monitoring", +] +dependencies = [ + "datadog-checks-base>=32.6.0", +] +dynamic = [ + "version", +] + +[project.optional-dependencies] +deps = [] + +[project.urls] +Source = "https://github.com/DataDog/integrations-core" + +[tool.hatch.version] +path = "datadog_checks/milvus/__about__.py" + +[tool.hatch.build.targets.sdist] +include = [ + "/datadog_checks", + "/tests", + "/manifest.json", +] + +[tool.hatch.build.targets.wheel] +include = [ + "/datadog_checks/milvus", +] +dev-mode-dirs = [ + ".", +] diff --git a/milvus/tests/__init__.py b/milvus/tests/__init__.py new file mode 100644 index 0000000000000..9103122bf028d --- /dev/null +++ b/milvus/tests/__init__.py @@ -0,0 +1,3 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) diff --git a/milvus/tests/common.py b/milvus/tests/common.py new file mode 100644 index 0000000000000..e2524182310f2 --- /dev/null +++ b/milvus/tests/common.py @@ -0,0 +1,416 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +import os + +from datadog_checks.dev import get_docker_hostname, get_here + +HERE = get_here() +HOST = get_docker_hostname() +PORT = 9091 + + +def get_fixture_path(filename): + return os.path.join(HERE, 'fixtures', filename) + + +MOCKED_INSTANCE = { + "openmetrics_endpoint": f"http://{HOST}:{PORT}/metrics", + 'tags': ['test:tag'], +} + +COMPOSE_FILE = os.path.join(HERE, 'compose', 'docker-compose.yaml') + +STANDALONE_TEST_METRICS = { + 'milvus.go.gc_duration_seconds.quantile': 'gauge', + 'milvus.go.gc_duration_seconds.sum': 'monotonic_count', + 'milvus.go.gc_duration_seconds.count': 'monotonic_count', + 'milvus.go.goroutines': 'gauge', + 'milvus.go.info': 'gauge', + 'milvus.go.memstats.alloc_bytes': 'gauge', + 'milvus.go.memstats.alloc_bytes.count': 'monotonic_count', + 'milvus.go.memstats.buck_hash_sys_bytes': 'gauge', + 'milvus.go.memstats.frees.count': 'monotonic_count', + 'milvus.go.memstats.gc_sys_bytes': 'gauge', + 'milvus.go.memstats.heap.alloc_bytes': 'gauge', + 'milvus.go.memstats.heap.idle_bytes': 'gauge', + 'milvus.go.memstats.heap.inuse_bytes': 'gauge', + 'milvus.go.memstats.heap.objects': 'gauge', + 'milvus.go.memstats.heap.released_bytes': 'gauge', + 'milvus.go.memstats.heap.sys_bytes': 'gauge', + 'milvus.go.memstats.last_gc_time_seconds': 'gauge', + 'milvus.go.memstats.lookups.count': 'monotonic_count', + 'milvus.go.memstats.mallocs.count': 'monotonic_count', + 'milvus.go.memstats.mcache.inuse_bytes': 'gauge', + 'milvus.go.memstats.mcache.sys_bytes': 'gauge', + 'milvus.go.memstats.mspan.inuse_bytes': 'gauge', + 'milvus.go.memstats.mspan.sys_bytes': 'gauge', + 'milvus.go.memstats.next_gc_bytes': 'gauge', + 'milvus.go.memstats.other_sys_bytes': 'gauge', + 'milvus.go.memstats.stack.inuse_bytes': 'gauge', + 'milvus.go.memstats.stack.sys_bytes': 'gauge', + 'milvus.go.memstats.sys_bytes': 'gauge', + 'milvus.go.threads': 'gauge', + 'milvus.cgo.active_future_total': 'gauge', + 'milvus.cgo.cgo_duration_seconds.sum': 'monotonic_count', + 'milvus.cgo.cgo_duration_seconds.count': 'monotonic_count', + 'milvus.cgo.cgo_duration_seconds.bucket': 'monotonic_count', + 'milvus.cgo.cgo_queue_duration_seconds.sum': 'monotonic_count', + 'milvus.cgo.cgo_queue_duration_seconds.count': 'monotonic_count', + 'milvus.cgo.cgo_queue_duration_seconds.bucket': 'monotonic_count', + 'milvus.cgo.running_cgo_call_total': 'gauge', + 'milvus.datacoord.collection_num': 'gauge', + 'milvus.datacoord.consume_datanode_tt_lag_ms': 'gauge', + 'milvus.datacoord.datanode_num': 'gauge', + 'milvus.datacoord.import_tasks': 'gauge', + 'milvus.datacoord.index.task': 'gauge', + 'milvus.datacoord.index.node_num': 'gauge', + 'milvus.datacoord.index.req.count': 'monotonic_count', + 'milvus.datacoord.segment_num': 'gauge', + 'milvus.datacoord.task_execute_max_latency.sum': 'monotonic_count', + 'milvus.datacoord.task_execute_max_latency.count': 'monotonic_count', + 'milvus.datacoord.task_execute_max_latency.bucket': 'monotonic_count', + 'milvus.datanode.autoflush_buffer_op.count': 'monotonic_count', + 'milvus.datanode.consume.bytes.count': 'monotonic_count', + 'milvus.datanode.encode_buffer_latency.sum': 'monotonic_count', + 'milvus.datanode.encode_buffer_latency.count': 'monotonic_count', + 'milvus.datanode.encode_buffer_latency.bucket': 'monotonic_count', + 'milvus.datanode.flowgraph_num': 'gauge', + 'milvus.datanode.flush.buffer_op.count': 'monotonic_count', + 'milvus.datanode.flush.req.count': 'monotonic_count', + 'milvus.datanode.flushed_data.rows.count': 'monotonic_count', + 'milvus.datanode.flushed_data.size.count': 'monotonic_count', + 'milvus.datanode.msg.rows.count': 'monotonic_count', + 'milvus.datanode.save_latency.sum': 'monotonic_count', + 'milvus.datanode.save_latency.count': 'monotonic_count', + 'milvus.datanode.save_latency.bucket': 'monotonic_count', + 'milvus.flushed_segment_file_num.sum': 'monotonic_count', + 'milvus.flushed_segment_file_num.count': 'monotonic_count', + 'milvus.flushed_segment_file_num.bucket': 'monotonic_count', + 'milvus.indexnode.build_index_latency.sum': 'monotonic_count', + 'milvus.indexnode.build_index_latency.count': 'monotonic_count', + 'milvus.indexnode.build_index_latency.bucket': 'monotonic_count', + 'milvus.indexnode.encode_index_latency.sum': 'monotonic_count', + 'milvus.indexnode.encode_index_latency.count': 'monotonic_count', + 'milvus.indexnode.encode_index_latency.bucket': 'monotonic_count', + 'milvus.indexnode.index.task.count': 'monotonic_count', + 'milvus.indexnode.index.task_latency_in_queue.count': 'monotonic_count', + 'milvus.indexnode.index.task_latency_in_queue.sum': 'monotonic_count', + 'milvus.indexnode.index.task_latency_in_queue.bucket': 'monotonic_count', + 'milvus.indexnode.knowhere_build_index_latency.sum': 'monotonic_count', + 'milvus.indexnode.knowhere_build_index_latency.count': 'monotonic_count', + 'milvus.indexnode.knowhere_build_index_latency.bucket': 'monotonic_count', + 'milvus.indexnode.save_index_latency.sum': 'monotonic_count', + 'milvus.indexnode.save_index_latency.count': 'monotonic_count', + 'milvus.indexnode.save_index_latency.bucket': 'monotonic_count', + 'milvus.meta.kv_size.sum': 'monotonic_count', + 'milvus.meta.kv_size.count': 'monotonic_count', + 'milvus.meta.kv_size.bucket': 'monotonic_count', + 'milvus.meta.op.count': 'monotonic_count', + 'milvus.meta.request_latency.sum': 'monotonic_count', + 'milvus.meta.request_latency.count': 'monotonic_count', + 'milvus.meta.request_latency.bucket': 'monotonic_count', + 'milvus.msg_queue_consumer_num': 'gauge', + 'milvus.msgstream.op.count': 'monotonic_count', + 'milvus.msgstream.request_latency.sum': 'monotonic_count', + 'milvus.msgstream.request_latency.count': 'monotonic_count', + 'milvus.msgstream.request_latency.bucket': 'monotonic_count', + 'milvus.num_node': 'gauge', + 'milvus.proxy.apply.pk_latency.sum': 'monotonic_count', + 'milvus.proxy.apply.pk_latency.count': 'monotonic_count', + 'milvus.proxy.apply.pk_latency.bucket': 'monotonic_count', + 'milvus.proxy.apply.timestamp_latency.sum': 'monotonic_count', + 'milvus.proxy.apply.timestamp_latency.count': 'monotonic_count', + 'milvus.proxy.apply.timestamp_latency.bucket': 'monotonic_count', + 'milvus.proxy.assign_segmentID_latency.sum': 'monotonic_count', + 'milvus.proxy.assign_segmentID_latency.count': 'monotonic_count', + 'milvus.proxy.assign_segmentID_latency.bucket': 'monotonic_count', + 'milvus.proxy.cache.hit.count': 'monotonic_count', + 'milvus.proxy.cache.update_latency.sum': 'monotonic_count', + 'milvus.proxy.cache.update_latency.count': 'monotonic_count', + 'milvus.proxy.cache.update_latency.bucket': 'monotonic_count', + 'milvus.proxy.delete_vectors.count': 'monotonic_count', + 'milvus.proxy.msgstream_obj_num': 'gauge', + 'milvus.proxy.mutation_send_latency.sum': 'monotonic_count', + 'milvus.proxy.mutation_send_latency.count': 'monotonic_count', + 'milvus.proxy.mutation_send_latency.bucket': 'monotonic_count', + 'milvus.proxy.rate_limit_req.count': 'monotonic_count', + 'milvus.proxy.report_value.count': 'monotonic_count', + 'milvus.proxy.req.count': 'monotonic_count', + 'milvus.proxy.req.in_queue_latency.sum': 'monotonic_count', + 'milvus.proxy.req.in_queue_latency.count': 'monotonic_count', + 'milvus.proxy.req.in_queue_latency.bucket': 'monotonic_count', + 'milvus.proxy.req.latency.sum': 'monotonic_count', + 'milvus.proxy.req.latency.count': 'monotonic_count', + 'milvus.proxy.req.latency.bucket': 'monotonic_count', + 'milvus.proxy.send_bytes.count': 'monotonic_count', + 'milvus.proxy.sq.decode_result_latency.sum': 'monotonic_count', + 'milvus.proxy.sq.decode_result_latency.count': 'monotonic_count', + 'milvus.proxy.sq.decode_result_latency.bucket': 'monotonic_count', + 'milvus.proxy.sq.reduce_result_latency.sum': 'monotonic_count', + 'milvus.proxy.sq.reduce_result_latency.count': 'monotonic_count', + 'milvus.proxy.sq.reduce_result_latency.bucket': 'monotonic_count', + 'milvus.proxy.sq.wait_result_latency.sum': 'monotonic_count', + 'milvus.proxy.sq.wait_result_latency.count': 'monotonic_count', + 'milvus.proxy.sq.wait_result_latency.bucket': 'monotonic_count', + 'milvus.proxy.sync_segment_request_length.sum': 'monotonic_count', + 'milvus.proxy.sync_segment_request_length.count': 'monotonic_count', + 'milvus.proxy.sync_segment_request_length.bucket': 'monotonic_count', + 'milvus.proxy.tt_lag_ms': 'gauge', + 'milvus.querycoord.collection_num': 'gauge', + 'milvus.querycoord.load.latency.sum': 'monotonic_count', + 'milvus.querycoord.load.latency.count': 'monotonic_count', + 'milvus.querycoord.load.latency.bucket': 'monotonic_count', + 'milvus.querycoord.load.req.count': 'monotonic_count', + 'milvus.querycoord.partition_num': 'gauge', + 'milvus.querycoord.querynode_num': 'gauge', + 'milvus.querycoord.release.latency.sum': 'monotonic_count', + 'milvus.querycoord.release.latency.count': 'monotonic_count', + 'milvus.querycoord.release.latency.bucket': 'monotonic_count', + 'milvus.querycoord.release.req.count': 'monotonic_count', + 'milvus.querycoord_task_num': 'gauge', + 'milvus.querynode.apply_bf_latency.sum': 'monotonic_count', + 'milvus.querynode.apply_bf_latency.count': 'monotonic_count', + 'milvus.querynode.apply_bf_latency.bucket': 'monotonic_count', + 'milvus.querynode.collection_num': 'gauge', + 'milvus.querynode.consume.bytes_counter.count': 'monotonic_count', + 'milvus.querynode.disk.cache.evict.bytes.count': 'monotonic_count', + 'milvus.querynode.disk.cache.evict.duration.count': 'monotonic_count', + 'milvus.querynode.disk.cache.evict.global_duration.sum': 'monotonic_count', + 'milvus.querynode.disk.cache.evict.global_duration.count': 'monotonic_count', + 'milvus.querynode.disk.cache.evict.global_duration.bucket': 'monotonic_count', + 'milvus.querynode.disk.cache.evict.count': 'monotonic_count', + 'milvus.querynode.disk.cache.load.bytes.count': 'monotonic_count', + 'milvus.querynode.disk.cache.load.duration.count': 'monotonic_count', + 'milvus.querynode.disk.cache.load.global_duration.sum': 'monotonic_count', + 'milvus.querynode.disk.cache.load.global_duration.count': 'monotonic_count', + 'milvus.querynode.disk.cache.load.global_duration.bucket': 'monotonic_count', + 'milvus.querynode.disk.cache.load.count': 'monotonic_count', + 'milvus.querynode.disk.used_size': 'gauge', + 'milvus.querynode.dml_vchannel_num': 'gauge', + 'milvus.querynode.execute_bytes_counter.count': 'monotonic_count', + 'milvus.querynode.flowgraph_num': 'gauge', + 'milvus.querynode.forward_delete_latency.sum': 'monotonic_count', + 'milvus.querynode.forward_delete_latency.count': 'monotonic_count', + 'milvus.querynode.forward_delete_latency.bucket': 'monotonic_count', + 'milvus.querynode.load.index_latency.sum': 'monotonic_count', + 'milvus.querynode.load.index_latency.count': 'monotonic_count', + 'milvus.querynode.load.index_latency.bucket': 'monotonic_count', + 'milvus.querynode.load.segment.concurrency': 'gauge', + 'milvus.querynode.load.segment.latency.sum': 'monotonic_count', + 'milvus.querynode.load.segment.latency.count': 'monotonic_count', + 'milvus.querynode.load.segment.latency.bucket': 'monotonic_count', + 'milvus.querynode.process_insert_or_delete_latency.sum': 'monotonic_count', + 'milvus.querynode.process_insert_or_delete_latency.count': 'monotonic_count', + 'milvus.querynode.process_insert_or_delete_latency.bucket': 'monotonic_count', + 'milvus.querynode.read_task.concurrency': 'gauge', + 'milvus.querynode.read_task.ready_len': 'gauge', + 'milvus.querynode.read_task.unsolved_len': 'gauge', + 'milvus.querynode.search.group.nq.sum': 'monotonic_count', + 'milvus.querynode.search.group.nq.count': 'monotonic_count', + 'milvus.querynode.search.group.nq.bucket': 'monotonic_count', + 'milvus.querynode.search.group.size.sum': 'monotonic_count', + 'milvus.querynode.search.group.size.count': 'monotonic_count', + 'milvus.querynode.search.group.size.bucket': 'monotonic_count', + 'milvus.querynode.search.group.topk.sum': 'monotonic_count', + 'milvus.querynode.search.group.topk.count': 'monotonic_count', + 'milvus.querynode.search.group.topk.bucket': 'monotonic_count', + 'milvus.querynode.search.nq.sum': 'monotonic_count', + 'milvus.querynode.search.nq.count': 'monotonic_count', + 'milvus.querynode.search.nq.bucket': 'monotonic_count', + 'milvus.querynode.search.topk.sum': 'monotonic_count', + 'milvus.querynode.search.topk.count': 'monotonic_count', + 'milvus.querynode.search.topk.bucket': 'monotonic_count', + 'milvus.querynode.segment.access.duration.count': 'monotonic_count', + 'milvus.querynode.segment.access.global_duration.sum': 'monotonic_count', + 'milvus.querynode.segment.access.global_duration.count': 'monotonic_count', + 'milvus.querynode.segment.access.global_duration.bucket': 'monotonic_count', + 'milvus.querynode.segment.access.count': 'monotonic_count', + 'milvus.querynode.segment.access.wait_cache.duration.count': 'monotonic_count', + 'milvus.querynode.segment.access.wait_cache.global_duration.sum': 'monotonic_count', + 'milvus.querynode.segment.access.wait_cache.global_duration.count': 'monotonic_count', + 'milvus.querynode.segment.access.wait_cache.global_duration.bucket': 'monotonic_count', + 'milvus.querynode.segment.access.wait_cache.count': 'monotonic_count', + 'milvus.querynode.segment.latency_per_vector.sum': 'monotonic_count', + 'milvus.querynode.segment.latency_per_vector.count': 'monotonic_count', + 'milvus.querynode.segment.latency_per_vector.bucket': 'monotonic_count', + 'milvus.querynode.sq.core_latency.sum': 'monotonic_count', + 'milvus.querynode.sq.core_latency.count': 'monotonic_count', + 'milvus.querynode.sq.core_latency.bucket': 'monotonic_count', + 'milvus.querynode.sq.queue.latency.sum': 'monotonic_count', + 'milvus.querynode.sq.queue.latency.count': 'monotonic_count', + 'milvus.querynode.sq.queue.latency.bucket': 'monotonic_count', + 'milvus.querynode.sq.queue.user_latency.sum': 'monotonic_count', + 'milvus.querynode.sq.queue.user_latency.count': 'monotonic_count', + 'milvus.querynode.sq.queue.user_latency.bucket': 'monotonic_count', + 'milvus.querynode.sq.reduce_latency.sum': 'monotonic_count', + 'milvus.querynode.sq.reduce_latency.count': 'monotonic_count', + 'milvus.querynode.sq.reduce_latency.bucket': 'monotonic_count', + 'milvus.querynode.sq.req.latency.sum': 'monotonic_count', + 'milvus.querynode.sq.req.latency.count': 'monotonic_count', + 'milvus.querynode.sq.req.latency.bucket': 'monotonic_count', + 'milvus.querynode.sq.segment_latency.sum': 'monotonic_count', + 'milvus.querynode.sq.segment_latency.count': 'monotonic_count', + 'milvus.querynode.sq.segment_latency.bucket': 'monotonic_count', + 'milvus.querynode.sq.wait_tsafe_latency.sum': 'monotonic_count', + 'milvus.querynode.sq.wait_tsafe_latency.count': 'monotonic_count', + 'milvus.querynode.sq.wait_tsafe_latency.bucket': 'monotonic_count', + 'milvus.querynode.wait_processing_msg': 'gauge', + 'milvus.querynode.watch_dml_channel_latency.sum': 'monotonic_count', + 'milvus.querynode.watch_dml_channel_latency.count': 'monotonic_count', + 'milvus.querynode.watch_dml_channel_latency.bucket': 'monotonic_count', + 'milvus.rootcoord.collection_num': 'gauge', + 'milvus.rootcoord.credential_num': 'gauge', + 'milvus.rootcoord.ddl_req.count': 'monotonic_count', + 'milvus.rootcoord.sync_timetick_latency.sum': 'monotonic_count', + 'milvus.rootcoord.sync_timetick_latency.bucket': 'monotonic_count', + 'milvus.rootcoord.sync_timetick_latency.count': 'monotonic_count', + 'milvus.rootcoord.ddl_req.latency.sum': 'monotonic_count', + 'milvus.rootcoord.ddl_req.latency.count': 'monotonic_count', + 'milvus.rootcoord.ddl_req.latency.bucket': 'monotonic_count', + 'milvus.rootcoord.ddl_req.latency_in_queue.sum': 'monotonic_count', + 'milvus.rootcoord.ddl_req.latency_in_queue.count': 'monotonic_count', + 'milvus.rootcoord.ddl_req.latency_in_queue.bucket': 'monotonic_count', + 'milvus.rootcoord.dml_channel_num': 'gauge', + 'milvus.rootcoord.entity_num': 'gauge', + 'milvus.rootcoord.force_deny_writing_counter.count': 'monotonic_count', + 'milvus.rootcoord.id_alloc.count': 'monotonic_count', + 'milvus.rootcoord.indexed_entity_num': 'gauge', + 'milvus.rootcoord.msgstream_obj_num': 'gauge', + 'milvus.rootcoord.num_of_roles': 'gauge', + 'milvus.rootcoord.partition_num': 'gauge', + 'milvus.rootcoord.produce_tt_lag_ms': 'gauge', + 'milvus.rootcoord.proxy_num': 'gauge', + 'milvus.rootcoord.qn_mem_high_water_level': 'gauge', + 'milvus.rootcoord.timestamp': 'gauge', + 'milvus.rootcoord.timestamp_saved': 'gauge', + 'milvus.runtime_info': 'gauge', + 'milvus.process.cpu_seconds.count': 'monotonic_count', + 'milvus.process.max_fds': 'gauge', + 'milvus.process.open_fds': 'gauge', + 'milvus.process.resident_memory_bytes': 'gauge', + 'milvus.process.start_time_seconds': 'gauge', + 'milvus.process.virtual_memory.bytes': 'gauge', + 'milvus.process.virtual_memory.max_bytes': 'gauge', + 'milvus.bf_search_cnt.sum': 'monotonic_count', + 'milvus.bf_search_cnt.count': 'monotonic_count', + 'milvus.bf_search_cnt.bucket': 'monotonic_count', + 'milvus.bitset_ratio.sum': 'monotonic_count', + 'milvus.bitset_ratio.count': 'monotonic_count', + 'milvus.bitset_ratio.bucket': 'monotonic_count', + 'milvus.build_latency.sum': 'monotonic_count', + 'milvus.build_latency.count': 'monotonic_count', + 'milvus.build_latency.bucket': 'monotonic_count', + 'milvus.cache_hit_cnt.sum': 'monotonic_count', + 'milvus.cache_hit_cnt.count': 'monotonic_count', + 'milvus.cache_hit_cnt.bucket': 'monotonic_count', + 'milvus.diskann_bitset_ratio.sum': 'monotonic_count', + 'milvus.diskann_bitset_ratio.count': 'monotonic_count', + 'milvus.diskann_bitset_ratio.bucket': 'monotonic_count', + 'milvus.diskann.range_search_iters.sum': 'monotonic_count', + 'milvus.diskann.range_search_iters.count': 'monotonic_count', + 'milvus.diskann.range_search_iters.bucket': 'monotonic_count', + 'milvus.diskann.search_hops.sum': 'monotonic_count', + 'milvus.diskann.search_hops.count': 'monotonic_count', + 'milvus.diskann.search_hops.bucket': 'monotonic_count', + 'milvus.exec_latency.sum': 'monotonic_count', + 'milvus.exec_latency.count': 'monotonic_count', + 'milvus.exec_latency.bucket': 'monotonic_count', + 'milvus.filter.connectivity_ratio.sum': 'monotonic_count', + 'milvus.filter.connectivity_ratio.count': 'monotonic_count', + 'milvus.filter.connectivity_ratio.bucket': 'monotonic_count', + 'milvus.filter.mv.activated_fields_cnt.sum': 'monotonic_count', + 'milvus.filter.mv.activated_fields_cnt.count': 'monotonic_count', + 'milvus.filter.mv.activated_fields_cnt.bucket': 'monotonic_count', + 'milvus.filter.mv.change_base_cnt.sum': 'monotonic_count', + 'milvus.filter.mv.change_base_cnt.count': 'monotonic_count', + 'milvus.filter.mv.change_base_cnt.bucket': 'monotonic_count', + 'milvus.filter.mv.only_cnt.sum': 'monotonic_count', + 'milvus.filter.mv.only_cnt.count': 'monotonic_count', + 'milvus.filter.mv.only_cnt.bucket': 'monotonic_count', + 'milvus.filter.mv.supplement_ep_bool_cnt.sum': 'monotonic_count', + 'milvus.filter.mv.supplement_ep_bool_cnt.count': 'monotonic_count', + 'milvus.filter.mv.supplement_ep_bool_cnt.bucket': 'monotonic_count', + 'milvus.graph_search_cnt.sum': 'monotonic_count', + 'milvus.graph_search_cnt.count': 'monotonic_count', + 'milvus.graph_search_cnt.bucket': 'monotonic_count', + 'milvus.hnsw.bitset_ratio.sum': 'monotonic_count', + 'milvus.hnsw.bitset_ratio.count': 'monotonic_count', + 'milvus.hnsw.bitset_ratio.bucket': 'monotonic_count', + 'milvus.hnsw.search_hops.sum': 'monotonic_count', + 'milvus.hnsw.search_hops.count': 'monotonic_count', + 'milvus.hnsw.search_hops.bucket': 'monotonic_count', + 'milvus.internal.core_search_latency.sum': 'monotonic_count', + 'milvus.internal.core_search_latency.count': 'monotonic_count', + 'milvus.internal.core_search_latency.bucket': 'monotonic_count', + 'milvus.internal.mmap.allocated_space_bytes.sum': 'monotonic_count', + 'milvus.internal.mmap.allocated_space_bytes.count': 'monotonic_count', + 'milvus.internal.mmap.allocated_space_bytes.bucket': 'monotonic_count', + 'milvus.internal.mmap.in_used_space_bytes': 'gauge', + 'milvus.internal.storage.kv_size.sum': 'monotonic_count', + 'milvus.internal.storage.kv_size.count': 'monotonic_count', + 'milvus.internal.storage.kv_size.bucket': 'monotonic_count', + 'milvus.internal.storage.load_duration.sum': 'monotonic_count', + 'milvus.internal.storage.load_duration.count': 'monotonic_count', + 'milvus.internal.storage.load_duration.bucket': 'monotonic_count', + 'milvus.internal.storage.op.count': 'monotonic_count', + 'milvus.internal.storage.request_latency.sum': 'monotonic_count', + 'milvus.internal.storage.request_latency.count': 'monotonic_count', + 'milvus.internal.storage.request_latency.bucket': 'monotonic_count', + 'milvus.io_cnt.sum': 'monotonic_count', + 'milvus.io_cnt.count': 'monotonic_count', + 'milvus.io_cnt.bucket': 'monotonic_count', + 'milvus.ivf_search_cnt.sum': 'monotonic_count', + 'milvus.ivf_search_cnt.count': 'monotonic_count', + 'milvus.ivf_search_cnt.bucket': 'monotonic_count', + 'milvus.load_latency.sum': 'monotonic_count', + 'milvus.load_latency.count': 'monotonic_count', + 'milvus.load_latency.bucket': 'monotonic_count', + 'milvus.quant.compute_cnt.sum': 'monotonic_count', + 'milvus.quant.compute_cnt.count': 'monotonic_count', + 'milvus.quant.compute_cnt.bucket': 'monotonic_count', + 'milvus.queue.latency.sum': 'monotonic_count', + 'milvus.queue.latency.count': 'monotonic_count', + 'milvus.queue.latency.bucket': 'monotonic_count', + 'milvus.range_search_latency.sum': 'monotonic_count', + 'milvus.range_search_latency.count': 'monotonic_count', + 'milvus.range_search_latency.bucket': 'monotonic_count', + 'milvus.raw_compute_cnt.sum': 'monotonic_count', + 'milvus.raw_compute_cnt.count': 'monotonic_count', + 'milvus.raw_compute_cnt.bucket': 'monotonic_count', + 'milvus.re_search_cnt.sum': 'monotonic_count', + 'milvus.re_search_cnt.count': 'monotonic_count', + 'milvus.re_search_cnt.bucket': 'monotonic_count', + 'milvus.search.latency.sum': 'monotonic_count', + 'milvus.search.latency.count': 'monotonic_count', + 'milvus.search.latency.bucket': 'monotonic_count', + 'milvus.search.topk.sum': 'monotonic_count', + 'milvus.search.topk.count': 'monotonic_count', + 'milvus.search.topk.bucket': 'monotonic_count', +} + +OTHER_TEST_METRICS = { + 'milvus.datacoord.channel_checkpoint_unix_seconds': 'gauge', + 'milvus.datacoord.stored.binlog_size': 'gauge', + 'milvus.datacoord.stored.index_files_size': 'gauge', + 'milvus.datacoord.stored.rows_num': 'gauge', + 'milvus.datacoord.watched_dml_chanel_num': 'gauge', + 'milvus.datanode.consume.msg_count': 'count', + 'milvus.datanode.consume.tt_lag_ms': 'gauge', + 'milvus.datanode.msg.dispatcher_tt_lag_ms': 'gauge', + 'milvus.querycoord.current_target_checkpoint_unix_seconds': 'gauge', + 'milvus.querynode.consume.msg_count': 'count', + 'milvus.querynode.consume.tt_lag_ms': 'gauge', + 'milvus.querynode.entity.num': 'gauge', + 'milvus.querynode.entity.size': 'gauge', + 'milvus.querynode.msg_dispatcher_tt_lag_ms': 'gauge', + 'milvus.querynode.partition_num': 'gauge', + 'milvus.querynode.segment.num': 'gauge', + 'milvus.querynode.sq.req.count': 'monotonic_count', + 'milvus.rootcoord.sync_timetick_latency.bucket': 'count', + 'milvus.rootcoord.sync_timetick_latency.count': 'count', + 'milvus.rootcoord.timestamp': 'gauge', + 'milvus.storage.kv_size': 'gauge', + 'milvus.storage.op_count': 'count', + 'milvus.storage.request_latency': 'gauge', +} diff --git a/milvus/tests/compose/Dockerfile b/milvus/tests/compose/Dockerfile new file mode 100644 index 0000000000000..9ec85e75bb888 --- /dev/null +++ b/milvus/tests/compose/Dockerfile @@ -0,0 +1,10 @@ +FROM python:3.9-slim + +# Set working directory +WORKDIR /app + +# Copy requirements file and install dependencies +COPY requirements.txt /app/requirements.txt +RUN pip install --no-cache-dir -r requirements.txt + +COPY script.py /app/script.py diff --git a/milvus/tests/compose/docker-compose.yaml b/milvus/tests/compose/docker-compose.yaml new file mode 100644 index 0000000000000..d6a4e03eeaf0d --- /dev/null +++ b/milvus/tests/compose/docker-compose.yaml @@ -0,0 +1,41 @@ +services: + milvus: + image: milvusdb/milvus:v2.5.0 + security_opt: + - seccomp:unconfined + environment: + - ETCD_USE_EMBED=true + - ETCD_DATA_DIR=/var/lib/milvus/etcd + - ETCD_CONFIG_PATH=/milvus/configs/embedEtcd.yaml + - COMMON_STORAGETYPE=local + volumes: + - /tmp/volumes/milvus:/var/lib/milvus + - ./embedEtcd.yaml:/milvus/configs/embedEtcd.yaml + - ./user.yaml:/milvus/configs/user.yaml + ports: + - "19530:19530" + - "9091:9091" + - "2379:2379" + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:9091/healthz"] + interval: 30s + timeout: 20s + start_period: 90s + retries: 3 + command: milvus run standalone + + script-runner: + build: + context: . + dockerfile: Dockerfile + container_name: script-runner + depends_on: + milvus: + condition: service_healthy + volumes: + - ./script.py:/app/script.py + working_dir: /app + command: ["python", "script.py"] + environment: + - MILVUS_HOST=milvus + - MILVUS_PORT=19530 diff --git a/milvus/tests/compose/embedEtcd.yaml b/milvus/tests/compose/embedEtcd.yaml new file mode 100644 index 0000000000000..32954faa8b1fc --- /dev/null +++ b/milvus/tests/compose/embedEtcd.yaml @@ -0,0 +1,5 @@ +listen-client-urls: http://0.0.0.0:2379 +advertise-client-urls: http://0.0.0.0:2379 +quota-backend-bytes: 4294967296 +auto-compaction-mode: revision +auto-compaction-retention: '1000' diff --git a/milvus/tests/compose/requirements.txt b/milvus/tests/compose/requirements.txt new file mode 100644 index 0000000000000..f96eefaacf1ea --- /dev/null +++ b/milvus/tests/compose/requirements.txt @@ -0,0 +1,2 @@ +numpy==1.26.4 +pymilvus==2.5.1 diff --git a/milvus/tests/compose/script.py b/milvus/tests/compose/script.py new file mode 100644 index 0000000000000..c525220925a11 --- /dev/null +++ b/milvus/tests/compose/script.py @@ -0,0 +1,151 @@ +# hello_milvus.py demonstrates the basic operations of PyMilvus, a Python SDK of Milvus. +# 1. connect to Milvus +# 2. create collection +# 3. insert data +# 4. create index +# 5. search, query, and hybrid search on entities +# 6. delete entities by PK +# 7. drop collection +import os +import time + +import numpy as np +from pymilvus import Collection, CollectionSchema, DataType, FieldSchema, connections, utility + +num_entities, dim = 3000, 8 + +# Retrieve host and port from environment variables +MILVUS_HOST = os.getenv("MILVUS_HOST", "localhost") +MILVUS_PORT = os.getenv("MILVUS_PORT", "19530") + +################################################################################# +# 1. connect to Milvus +# Add a new connection alias `default` for Milvus server +connections.connect("default", host=MILVUS_HOST, port=MILVUS_PORT) + +has = utility.has_collection("hello_milvus") + +################################################################################# +# 2. create collection +# We're going to create a collection with 3 fields. +# +-+------------+------------+------------------+------------------------------+ +# | | field name | field type | other attributes | field description | +# +-+------------+------------+------------------+------------------------------+ +# |1| "pk" | VarChar | is_primary=True | "primary field" | +# | | | | auto_id=False | | +# +-+------------+------------+------------------+------------------------------+ +# |2| "random" | Double | | "a double field" | +# +-+------------+------------+------------------+------------------------------+ +# |3|"embeddings"| FloatVector| dim=8 | "float vector with dim 8" | +# +-+------------+------------+------------------+------------------------------+ +fields = [ + FieldSchema(name="pk", dtype=DataType.VARCHAR, is_primary=True, auto_id=False, max_length=100), + FieldSchema(name="random", dtype=DataType.DOUBLE), + FieldSchema(name="embeddings", dtype=DataType.FLOAT_VECTOR, dim=dim), +] + +schema = CollectionSchema(fields, "hello_milvus is the simplest demo to introduce the APIs") + +hello_milvus = Collection("hello_milvus", schema, consistency_level="Strong") + +################################################################################ +# 3. insert data +# We are going to insert 3000 rows of data into `hello_milvus` +# Data to be inserted must be organized in fields. +# +# The insert() method returns: +# - either automatically generated primary keys by Milvus if auto_id=True in the schema; +# - or the existing primary key field from the entities if auto_id=False in the schema. + +rng = np.random.default_rng(seed=19530) +entities = [ + # provide the pk field because `auto_id` is set to False + [str(i) for i in range(num_entities)], + rng.random(num_entities).tolist(), # field random, only supports list + rng.random((num_entities, dim), np.float32), # field embeddings, supports numpy.ndarray and list +] + +insert_result = hello_milvus.insert(entities) + +row = {"pk": "19530", "random": 0.5, "embeddings": rng.random((1, dim), np.float32)[0]} +hello_milvus.insert(row) + +hello_milvus.flush() + +################################################################################ +# 4. create index +# We are going to create an IVF_FLAT index for hello_milvus collection. +# create_index() can only be applied to `FloatVector` and `BinaryVector` fields. +index = { + "index_type": "IVF_FLAT", + "metric_type": "L2", + "params": {"nlist": 128}, +} + +hello_milvus.create_index("embeddings", index) + +################################################################################ +# 5. search, query, and hybrid search +# After data were inserted into Milvus and indexed, you can perform: +# - search based on vector similarity +# - query based on scalar filtering(boolean, int, etc.) +# - hybrid search based on vector similarity and scalar filtering. +# + +# Before conducting a search or a query, you need to load the data in `hello_milvus` into memory. +hello_milvus.load() + +# ----------------------------------------------------------------------------- +# search based on vector similarity +vectors_to_search = entities[-1][-2:] +search_params = { + "metric_type": "L2", + "params": {"nprobe": 10}, +} + +start_time = time.time() +result = hello_milvus.search(vectors_to_search, "embeddings", search_params, limit=3, output_fields=["random"]) +end_time = time.time() + +# ----------------------------------------------------------------------------- +# query based on scalar filtering(boolean, int, etc.) + +start_time = time.time() +result = hello_milvus.query(expr="random > 0.5", output_fields=["random", "embeddings"]) +end_time = time.time() + +# ----------------------------------------------------------------------------- +# pagination +r1 = hello_milvus.query(expr="random > 0.5", limit=4, output_fields=["random"]) +r2 = hello_milvus.query(expr="random > 0.5", offset=1, limit=3, output_fields=["random"]) + + +# ----------------------------------------------------------------------------- +# hybrid search + +start_time = time.time() +result = hello_milvus.search( + vectors_to_search, "embeddings", search_params, limit=3, expr="random > 0.5", output_fields=["random"] +) +end_time = time.time() + +############################################################################### +# 6. delete entities by PK +# You can delete entities by their PK values using boolean expressions. +ids = insert_result.primary_keys + +expr = f'pk in ["{ids[0]}" , "{ids[1]}"]' + +result = hello_milvus.query(expr=expr, output_fields=["random", "embeddings"]) + +hello_milvus.delete(expr) + +result = hello_milvus.query(expr=expr, output_fields=["random", "embeddings"]) + + +############################################################################### +# 7. drop collection +# Finally, drop the hello_milvus collection +utility.drop_collection("hello_milvus") + +print("Setup complete") diff --git a/milvus/tests/compose/user.yaml b/milvus/tests/compose/user.yaml new file mode 100644 index 0000000000000..8d312694bb2c8 --- /dev/null +++ b/milvus/tests/compose/user.yaml @@ -0,0 +1 @@ +# Extra config to override default milvus.yaml diff --git a/milvus/tests/conftest.py b/milvus/tests/conftest.py new file mode 100644 index 0000000000000..ab0a5dc8b52a7 --- /dev/null +++ b/milvus/tests/conftest.py @@ -0,0 +1,33 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +import copy +import logging + +import pytest + +from datadog_checks.dev import docker_run +from datadog_checks.dev.conditions import CheckDockerLogs, CheckEndpoints + +from . import common + +SCRIPT_COMPLETION_STR = "Setup complete" + + +@pytest.fixture(scope='session') +def dd_environment(): + compose_file = common.COMPOSE_FILE + conditions = [ + CheckEndpoints(common.MOCKED_INSTANCE["openmetrics_endpoint"]), + CheckDockerLogs("script-runner", SCRIPT_COMPLETION_STR), + ] + logging.info(conditions) + with docker_run(compose_file, conditions=conditions): + yield { + 'instances': [common.MOCKED_INSTANCE], + } + + +@pytest.fixture +def instance(): + return copy.deepcopy(common.MOCKED_INSTANCE) diff --git a/milvus/tests/fixtures/milvus_payload.txt b/milvus/tests/fixtures/milvus_payload.txt new file mode 100644 index 0000000000000..77895e0a21a2d --- /dev/null +++ b/milvus/tests/fixtures/milvus_payload.txt @@ -0,0 +1,4138 @@ +# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{quantile="0"} 2.2583e-05 +go_gc_duration_seconds{quantile="0.25"} 5.5624e-05 +go_gc_duration_seconds{quantile="0.5"} 7e-05 +go_gc_duration_seconds{quantile="0.75"} 0.000267583 +go_gc_duration_seconds{quantile="1"} 0.000976792 +go_gc_duration_seconds_sum 0.003638707 +go_gc_duration_seconds_count 18 +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines 385 +# HELP go_info Information about the Go environment. +# TYPE go_info gauge +go_info{version="go1.22.0"} 1 +# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. +# TYPE go_memstats_alloc_bytes gauge +go_memstats_alloc_bytes 8.1514072e+07 +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. +# TYPE go_memstats_alloc_bytes_total counter +go_memstats_alloc_bytes_total 6.38517176e+08 +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. +# TYPE go_memstats_buck_hash_sys_bytes gauge +go_memstats_buck_hash_sys_bytes 1.803289e+06 +# HELP go_memstats_frees_total Total number of frees. +# TYPE go_memstats_frees_total counter +go_memstats_frees_total 2.890576e+06 +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. +# TYPE go_memstats_gc_sys_bytes gauge +go_memstats_gc_sys_bytes 5.005784e+06 +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. +# TYPE go_memstats_heap_alloc_bytes gauge +go_memstats_heap_alloc_bytes 8.1514072e+07 +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. +# TYPE go_memstats_heap_idle_bytes gauge +go_memstats_heap_idle_bytes 1.70811392e+08 +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. +# TYPE go_memstats_heap_inuse_bytes gauge +go_memstats_heap_inuse_bytes 9.3151232e+07 +# HELP go_memstats_heap_objects Number of allocated objects. +# TYPE go_memstats_heap_objects gauge +go_memstats_heap_objects 689980 +# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. +# TYPE go_memstats_heap_released_bytes gauge +go_memstats_heap_released_bytes 1.550336e+08 +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. +# TYPE go_memstats_heap_sys_bytes gauge +go_memstats_heap_sys_bytes 2.63962624e+08 +# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE go_memstats_last_gc_time_seconds gauge +go_memstats_last_gc_time_seconds 1.7349596816399348e+09 +# HELP go_memstats_lookups_total Total number of pointer lookups. +# TYPE go_memstats_lookups_total counter +go_memstats_lookups_total 0 +# HELP go_memstats_mallocs_total Total number of mallocs. +# TYPE go_memstats_mallocs_total counter +go_memstats_mallocs_total 3.580556e+06 +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. +# TYPE go_memstats_mcache_inuse_bytes gauge +go_memstats_mcache_inuse_bytes 12000 +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. +# TYPE go_memstats_mcache_sys_bytes gauge +go_memstats_mcache_sys_bytes 15600 +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. +# TYPE go_memstats_mspan_inuse_bytes gauge +go_memstats_mspan_inuse_bytes 1.24096e+06 +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. +# TYPE go_memstats_mspan_sys_bytes gauge +go_memstats_mspan_sys_bytes 1.51776e+06 +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. +# TYPE go_memstats_next_gc_bytes gauge +go_memstats_next_gc_bytes 1.33652608e+08 +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. +# TYPE go_memstats_other_sys_bytes gauge +go_memstats_other_sys_bytes 1.994135e+06 +# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. +# TYPE go_memstats_stack_inuse_bytes gauge +go_memstats_stack_inuse_bytes 4.42368e+06 +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. +# TYPE go_memstats_stack_sys_bytes gauge +go_memstats_stack_sys_bytes 4.42368e+06 +# HELP go_memstats_sys_bytes Number of bytes obtained from system. +# TYPE go_memstats_sys_bytes gauge +go_memstats_sys_bytes 2.78722872e+08 +# HELP go_threads Number of OS threads created. +# TYPE go_threads gauge +go_threads 21 +# HELP milvus_build_info Build information of milvus +# TYPE milvus_build_info gauge +milvus_build_info{built="Fri Dec 20 13:51:45 UTC 2024",git_commit="7fe2cd0",version="v2.5.0"} 1 +# HELP milvus_cgo_active_future_total Total number of active futures. +# TYPE milvus_cgo_active_future_total gauge +milvus_cgo_active_future_total{node_id="37"} 0 +# HELP milvus_cgo_cgo_duration_seconds Histogram of cgo call duration in seconds. +# TYPE milvus_cgo_cgo_duration_seconds histogram +milvus_cgo_cgo_duration_seconds_bucket{name="future_destroy",node_id="37",le="1e-08"} 0 +milvus_cgo_cgo_duration_seconds_bucket{name="future_destroy",node_id="37",le="1.0000000000000001e-07"} 0 +milvus_cgo_cgo_duration_seconds_bucket{name="future_destroy",node_id="37",le="2.5000000000000004e-07"} 0 +milvus_cgo_cgo_duration_seconds_bucket{name="future_destroy",node_id="37",le="5.000000000000001e-07"} 0 +milvus_cgo_cgo_duration_seconds_bucket{name="future_destroy",node_id="37",le="1e-06"} 0 +milvus_cgo_cgo_duration_seconds_bucket{name="future_destroy",node_id="37",le="9.999999999999999e-06"} 5 +milvus_cgo_cgo_duration_seconds_bucket{name="future_destroy",node_id="37",le="1.9999999999999998e-05"} 5 +milvus_cgo_cgo_duration_seconds_bucket{name="future_destroy",node_id="37",le="4.9999999999999996e-05"} 7 +milvus_cgo_cgo_duration_seconds_bucket{name="future_destroy",node_id="37",le="9.999999999999999e-05"} 7 +milvus_cgo_cgo_duration_seconds_bucket{name="future_destroy",node_id="37",le="0.00025"} 7 +milvus_cgo_cgo_duration_seconds_bucket{name="future_destroy",node_id="37",le="0.0005"} 7 +milvus_cgo_cgo_duration_seconds_bucket{name="future_destroy",node_id="37",le="0.001"} 7 +milvus_cgo_cgo_duration_seconds_bucket{name="future_destroy",node_id="37",le="0.002"} 7 +milvus_cgo_cgo_duration_seconds_bucket{name="future_destroy",node_id="37",le="0.01"} 7 +milvus_cgo_cgo_duration_seconds_bucket{name="future_destroy",node_id="37",le="+Inf"} 7 +milvus_cgo_cgo_duration_seconds_sum{name="future_destroy",node_id="37"} 0.000116711 +milvus_cgo_cgo_duration_seconds_count{name="future_destroy",node_id="37"} 7 +milvus_cgo_cgo_duration_seconds_bucket{name="future_go_register_ready_callback",node_id="37",le="1e-08"} 0 +milvus_cgo_cgo_duration_seconds_bucket{name="future_go_register_ready_callback",node_id="37",le="1.0000000000000001e-07"} 0 +milvus_cgo_cgo_duration_seconds_bucket{name="future_go_register_ready_callback",node_id="37",le="2.5000000000000004e-07"} 0 +milvus_cgo_cgo_duration_seconds_bucket{name="future_go_register_ready_callback",node_id="37",le="5.000000000000001e-07"} 0 +milvus_cgo_cgo_duration_seconds_bucket{name="future_go_register_ready_callback",node_id="37",le="1e-06"} 0 +milvus_cgo_cgo_duration_seconds_bucket{name="future_go_register_ready_callback",node_id="37",le="9.999999999999999e-06"} 5 +milvus_cgo_cgo_duration_seconds_bucket{name="future_go_register_ready_callback",node_id="37",le="1.9999999999999998e-05"} 7 +milvus_cgo_cgo_duration_seconds_bucket{name="future_go_register_ready_callback",node_id="37",le="4.9999999999999996e-05"} 7 +milvus_cgo_cgo_duration_seconds_bucket{name="future_go_register_ready_callback",node_id="37",le="9.999999999999999e-05"} 7 +milvus_cgo_cgo_duration_seconds_bucket{name="future_go_register_ready_callback",node_id="37",le="0.00025"} 7 +milvus_cgo_cgo_duration_seconds_bucket{name="future_go_register_ready_callback",node_id="37",le="0.0005"} 7 +milvus_cgo_cgo_duration_seconds_bucket{name="future_go_register_ready_callback",node_id="37",le="0.001"} 7 +milvus_cgo_cgo_duration_seconds_bucket{name="future_go_register_ready_callback",node_id="37",le="0.002"} 7 +milvus_cgo_cgo_duration_seconds_bucket{name="future_go_register_ready_callback",node_id="37",le="0.01"} 7 +milvus_cgo_cgo_duration_seconds_bucket{name="future_go_register_ready_callback",node_id="37",le="+Inf"} 7 +milvus_cgo_cgo_duration_seconds_sum{name="future_go_register_ready_callback",node_id="37"} 6.487499999999999e-05 +milvus_cgo_cgo_duration_seconds_count{name="future_go_register_ready_callback",node_id="37"} 7 +milvus_cgo_cgo_duration_seconds_bucket{name="future_leak_and_get",node_id="37",le="1e-08"} 0 +milvus_cgo_cgo_duration_seconds_bucket{name="future_leak_and_get",node_id="37",le="1.0000000000000001e-07"} 0 +milvus_cgo_cgo_duration_seconds_bucket{name="future_leak_and_get",node_id="37",le="2.5000000000000004e-07"} 0 +milvus_cgo_cgo_duration_seconds_bucket{name="future_leak_and_get",node_id="37",le="5.000000000000001e-07"} 0 +milvus_cgo_cgo_duration_seconds_bucket{name="future_leak_and_get",node_id="37",le="1e-06"} 0 +milvus_cgo_cgo_duration_seconds_bucket{name="future_leak_and_get",node_id="37",le="9.999999999999999e-06"} 5 +milvus_cgo_cgo_duration_seconds_bucket{name="future_leak_and_get",node_id="37",le="1.9999999999999998e-05"} 6 +milvus_cgo_cgo_duration_seconds_bucket{name="future_leak_and_get",node_id="37",le="4.9999999999999996e-05"} 7 +milvus_cgo_cgo_duration_seconds_bucket{name="future_leak_and_get",node_id="37",le="9.999999999999999e-05"} 7 +milvus_cgo_cgo_duration_seconds_bucket{name="future_leak_and_get",node_id="37",le="0.00025"} 7 +milvus_cgo_cgo_duration_seconds_bucket{name="future_leak_and_get",node_id="37",le="0.0005"} 7 +milvus_cgo_cgo_duration_seconds_bucket{name="future_leak_and_get",node_id="37",le="0.001"} 7 +milvus_cgo_cgo_duration_seconds_bucket{name="future_leak_and_get",node_id="37",le="0.002"} 7 +milvus_cgo_cgo_duration_seconds_bucket{name="future_leak_and_get",node_id="37",le="0.01"} 7 +milvus_cgo_cgo_duration_seconds_bucket{name="future_leak_and_get",node_id="37",le="+Inf"} 7 +milvus_cgo_cgo_duration_seconds_sum{name="future_leak_and_get",node_id="37"} 9.270699999999999e-05 +milvus_cgo_cgo_duration_seconds_count{name="future_leak_and_get",node_id="37"} 7 +milvus_cgo_cgo_duration_seconds_bucket{name="retrieve",node_id="37",le="1e-08"} 0 +milvus_cgo_cgo_duration_seconds_bucket{name="retrieve",node_id="37",le="1.0000000000000001e-07"} 0 +milvus_cgo_cgo_duration_seconds_bucket{name="retrieve",node_id="37",le="2.5000000000000004e-07"} 0 +milvus_cgo_cgo_duration_seconds_bucket{name="retrieve",node_id="37",le="5.000000000000001e-07"} 0 +milvus_cgo_cgo_duration_seconds_bucket{name="retrieve",node_id="37",le="1e-06"} 0 +milvus_cgo_cgo_duration_seconds_bucket{name="retrieve",node_id="37",le="9.999999999999999e-06"} 0 +milvus_cgo_cgo_duration_seconds_bucket{name="retrieve",node_id="37",le="1.9999999999999998e-05"} 0 +milvus_cgo_cgo_duration_seconds_bucket{name="retrieve",node_id="37",le="4.9999999999999996e-05"} 0 +milvus_cgo_cgo_duration_seconds_bucket{name="retrieve",node_id="37",le="9.999999999999999e-05"} 0 +milvus_cgo_cgo_duration_seconds_bucket{name="retrieve",node_id="37",le="0.00025"} 1 +milvus_cgo_cgo_duration_seconds_bucket{name="retrieve",node_id="37",le="0.0005"} 5 +milvus_cgo_cgo_duration_seconds_bucket{name="retrieve",node_id="37",le="0.001"} 5 +milvus_cgo_cgo_duration_seconds_bucket{name="retrieve",node_id="37",le="0.002"} 5 +milvus_cgo_cgo_duration_seconds_bucket{name="retrieve",node_id="37",le="0.01"} 5 +milvus_cgo_cgo_duration_seconds_bucket{name="retrieve",node_id="37",le="+Inf"} 5 +milvus_cgo_cgo_duration_seconds_sum{name="retrieve",node_id="37"} 0.0015534569999999998 +milvus_cgo_cgo_duration_seconds_count{name="retrieve",node_id="37"} 5 +milvus_cgo_cgo_duration_seconds_bucket{name="search",node_id="37",le="1e-08"} 0 +milvus_cgo_cgo_duration_seconds_bucket{name="search",node_id="37",le="1.0000000000000001e-07"} 0 +milvus_cgo_cgo_duration_seconds_bucket{name="search",node_id="37",le="2.5000000000000004e-07"} 0 +milvus_cgo_cgo_duration_seconds_bucket{name="search",node_id="37",le="5.000000000000001e-07"} 0 +milvus_cgo_cgo_duration_seconds_bucket{name="search",node_id="37",le="1e-06"} 0 +milvus_cgo_cgo_duration_seconds_bucket{name="search",node_id="37",le="9.999999999999999e-06"} 0 +milvus_cgo_cgo_duration_seconds_bucket{name="search",node_id="37",le="1.9999999999999998e-05"} 0 +milvus_cgo_cgo_duration_seconds_bucket{name="search",node_id="37",le="4.9999999999999996e-05"} 0 +milvus_cgo_cgo_duration_seconds_bucket{name="search",node_id="37",le="9.999999999999999e-05"} 0 +milvus_cgo_cgo_duration_seconds_bucket{name="search",node_id="37",le="0.00025"} 2 +milvus_cgo_cgo_duration_seconds_bucket{name="search",node_id="37",le="0.0005"} 2 +milvus_cgo_cgo_duration_seconds_bucket{name="search",node_id="37",le="0.001"} 2 +milvus_cgo_cgo_duration_seconds_bucket{name="search",node_id="37",le="0.002"} 2 +milvus_cgo_cgo_duration_seconds_bucket{name="search",node_id="37",le="0.01"} 2 +milvus_cgo_cgo_duration_seconds_bucket{name="search",node_id="37",le="+Inf"} 2 +milvus_cgo_cgo_duration_seconds_sum{name="search",node_id="37"} 0.000372251 +milvus_cgo_cgo_duration_seconds_count{name="search",node_id="37"} 2 +# HELP milvus_cgo_cgo_queue_duration_seconds Duration of cgo call in queue. +# TYPE milvus_cgo_cgo_queue_duration_seconds histogram +milvus_cgo_cgo_queue_duration_seconds_bucket{node_id="37",le="1e-08"} 0 +milvus_cgo_cgo_queue_duration_seconds_bucket{node_id="37",le="1.0000000000000001e-07"} 1 +milvus_cgo_cgo_queue_duration_seconds_bucket{node_id="37",le="2.5000000000000004e-07"} 13 +milvus_cgo_cgo_queue_duration_seconds_bucket{node_id="37",le="5.000000000000001e-07"} 18 +milvus_cgo_cgo_queue_duration_seconds_bucket{node_id="37",le="1e-06"} 22 +milvus_cgo_cgo_queue_duration_seconds_bucket{node_id="37",le="9.999999999999999e-06"} 28 +milvus_cgo_cgo_queue_duration_seconds_bucket{node_id="37",le="1.9999999999999998e-05"} 28 +milvus_cgo_cgo_queue_duration_seconds_bucket{node_id="37",le="4.9999999999999996e-05"} 28 +milvus_cgo_cgo_queue_duration_seconds_bucket{node_id="37",le="9.999999999999999e-05"} 28 +milvus_cgo_cgo_queue_duration_seconds_bucket{node_id="37",le="0.00025"} 28 +milvus_cgo_cgo_queue_duration_seconds_bucket{node_id="37",le="0.0005"} 28 +milvus_cgo_cgo_queue_duration_seconds_bucket{node_id="37",le="0.001"} 28 +milvus_cgo_cgo_queue_duration_seconds_bucket{node_id="37",le="0.002"} 28 +milvus_cgo_cgo_queue_duration_seconds_bucket{node_id="37",le="0.01"} 28 +milvus_cgo_cgo_queue_duration_seconds_bucket{node_id="37",le="+Inf"} 28 +milvus_cgo_cgo_queue_duration_seconds_sum{node_id="37"} 1.6873e-05 +milvus_cgo_cgo_queue_duration_seconds_count{node_id="37"} 28 +# HELP milvus_cgo_running_cgo_call_total Total number of running cgo calls. +# TYPE milvus_cgo_running_cgo_call_total gauge +milvus_cgo_running_cgo_call_total{node_id="37"} 0 +# HELP milvus_datacoord_collection_num number of collections +# TYPE milvus_datacoord_collection_num gauge +milvus_datacoord_collection_num 0 +# HELP milvus_datacoord_consume_datanode_tt_lag_ms now time minus tt per physical channel +# TYPE milvus_datacoord_consume_datanode_tt_lag_ms gauge +milvus_datacoord_consume_datanode_tt_lag_ms{channel_name="by-dev-rootcoord-dml_10",node_id="37"} 487 +milvus_datacoord_consume_datanode_tt_lag_ms{channel_name="by-dev-rootcoord-dml_10_454809251903439155v0",node_id="37"} 487 +# HELP milvus_datacoord_datanode_num number of data nodes +# TYPE milvus_datacoord_datanode_num gauge +milvus_datacoord_datanode_num 1 +# HELP milvus_datacoord_import_tasks the import tasks grouping by type and state +# TYPE milvus_datacoord_import_tasks gauge +milvus_datacoord_import_tasks{import_state="Completed",task_type="ImportTask"} 0 +milvus_datacoord_import_tasks{import_state="Completed",task_type="PreImportTask"} 0 +milvus_datacoord_import_tasks{import_state="Failed",task_type="ImportTask"} 0 +milvus_datacoord_import_tasks{import_state="Failed",task_type="PreImportTask"} 0 +milvus_datacoord_import_tasks{import_state="InProgress",task_type="ImportTask"} 0 +milvus_datacoord_import_tasks{import_state="InProgress",task_type="PreImportTask"} 0 +milvus_datacoord_import_tasks{import_state="Pending",task_type="ImportTask"} 0 +milvus_datacoord_import_tasks{import_state="Pending",task_type="PreImportTask"} 0 +# HELP milvus_datacoord_index_node_num number of IndexNodes managed by IndexCoord +# TYPE milvus_datacoord_index_node_num gauge +milvus_datacoord_index_node_num 1 +# HELP milvus_datacoord_index_req_count number of building index requests +# TYPE milvus_datacoord_index_req_count counter +milvus_datacoord_index_req_count{status="success"} 1 +milvus_datacoord_index_req_count{status="total"} 1 +# HELP milvus_datacoord_index_task_count number of index tasks of each type +# TYPE milvus_datacoord_index_task_count gauge +milvus_datacoord_index_task_count{collection_id="454808401168564379",index_task_status="failed"} 0 +milvus_datacoord_index_task_count{collection_id="454808401168564379",index_task_status="finished"} 1 +milvus_datacoord_index_task_count{collection_id="454808401168564379",index_task_status="in-progress"} 0 +milvus_datacoord_index_task_count{collection_id="454808401168564379",index_task_status="unissued"} 0 +milvus_datacoord_index_task_count{collection_id="454808438016835724",index_task_status="failed"} 0 +milvus_datacoord_index_task_count{collection_id="454808438016835724",index_task_status="finished"} 1 +milvus_datacoord_index_task_count{collection_id="454808438016835724",index_task_status="in-progress"} 0 +milvus_datacoord_index_task_count{collection_id="454808438016835724",index_task_status="unissued"} 0 +milvus_datacoord_index_task_count{collection_id="454808487770194226",index_task_status="failed"} 0 +milvus_datacoord_index_task_count{collection_id="454808487770194226",index_task_status="finished"} 1 +milvus_datacoord_index_task_count{collection_id="454808487770194226",index_task_status="in-progress"} 0 +milvus_datacoord_index_task_count{collection_id="454808487770194226",index_task_status="unissued"} 0 +milvus_datacoord_index_task_count{collection_id="454808550493389106",index_task_status="failed"} 0 +milvus_datacoord_index_task_count{collection_id="454808550493389106",index_task_status="finished"} 1 +milvus_datacoord_index_task_count{collection_id="454808550493389106",index_task_status="in-progress"} 0 +milvus_datacoord_index_task_count{collection_id="454808550493389106",index_task_status="unissued"} 0 +milvus_datacoord_index_task_count{collection_id="454808644260200606",index_task_status="failed"} 0 +milvus_datacoord_index_task_count{collection_id="454808644260200606",index_task_status="finished"} 1 +milvus_datacoord_index_task_count{collection_id="454808644260200606",index_task_status="in-progress"} 0 +milvus_datacoord_index_task_count{collection_id="454808644260200606",index_task_status="unissued"} 0 +milvus_datacoord_index_task_count{collection_id="454808703961661599",index_task_status="failed"} 0 +milvus_datacoord_index_task_count{collection_id="454808703961661599",index_task_status="finished"} 1 +milvus_datacoord_index_task_count{collection_id="454808703961661599",index_task_status="in-progress"} 0 +milvus_datacoord_index_task_count{collection_id="454808703961661599",index_task_status="unissued"} 0 +milvus_datacoord_index_task_count{collection_id="454808957805920562",index_task_status="failed"} 0 +milvus_datacoord_index_task_count{collection_id="454808957805920562",index_task_status="finished"} 1 +milvus_datacoord_index_task_count{collection_id="454808957805920562",index_task_status="in-progress"} 0 +milvus_datacoord_index_task_count{collection_id="454808957805920562",index_task_status="unissued"} 0 +milvus_datacoord_index_task_count{collection_id="454808985355419955",index_task_status="failed"} 0 +milvus_datacoord_index_task_count{collection_id="454808985355419955",index_task_status="finished"} 1 +milvus_datacoord_index_task_count{collection_id="454808985355419955",index_task_status="in-progress"} 0 +milvus_datacoord_index_task_count{collection_id="454808985355419955",index_task_status="unissued"} 0 +milvus_datacoord_index_task_count{collection_id="454809016083415346",index_task_status="failed"} 0 +milvus_datacoord_index_task_count{collection_id="454809016083415346",index_task_status="finished"} 1 +milvus_datacoord_index_task_count{collection_id="454809016083415346",index_task_status="in-progress"} 0 +milvus_datacoord_index_task_count{collection_id="454809016083415346",index_task_status="unissued"} 0 +milvus_datacoord_index_task_count{collection_id="454809046208741680",index_task_status="failed"} 0 +milvus_datacoord_index_task_count{collection_id="454809046208741680",index_task_status="finished"} 1 +milvus_datacoord_index_task_count{collection_id="454809046208741680",index_task_status="in-progress"} 0 +milvus_datacoord_index_task_count{collection_id="454809046208741680",index_task_status="unissued"} 0 +# HELP milvus_datacoord_segment_num number of segments +# TYPE milvus_datacoord_segment_num gauge +milvus_datacoord_segment_num{segment_is_sorted="sorted",segment_level="L1",segment_state="Dropped"} 11 +milvus_datacoord_segment_num{segment_is_sorted="sorted",segment_level="L1",segment_state="Flushed"} 0 +milvus_datacoord_segment_num{segment_is_sorted="unsorted",segment_level="L0",segment_state="Dropped"} 11 +milvus_datacoord_segment_num{segment_is_sorted="unsorted",segment_level="L0",segment_state="Flushed"} 0 +milvus_datacoord_segment_num{segment_is_sorted="unsorted",segment_level="L1",segment_state="Dropped"} 11 +milvus_datacoord_segment_num{segment_is_sorted="unsorted",segment_level="L1",segment_state="Flushed"} 0 +milvus_datacoord_segment_num{segment_is_sorted="unsorted",segment_level="L1",segment_state="Flushing"} 0 +milvus_datacoord_segment_num{segment_is_sorted="unsorted",segment_level="L1",segment_state="Growing"} 0 +milvus_datacoord_segment_num{segment_is_sorted="unsorted",segment_level="L1",segment_state="Sealed"} 0 +# HELP milvus_datacoord_task_count number of index tasks of each type +# TYPE milvus_datacoord_task_count gauge +milvus_datacoord_task_count{collection_id="454808401168564379",task_state="JobStateFailed",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808401168564379",task_state="JobStateFinished",task_type="JobTypeStatsJob"} 1 +milvus_datacoord_task_count{collection_id="454808401168564379",task_state="JobStateInProgress",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808401168564379",task_state="JobStateInit",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808401168564379",task_state="JobStateNone",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808401168564379",task_state="JobStateRetry",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808438016835724",task_state="JobStateFailed",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808438016835724",task_state="JobStateFinished",task_type="JobTypeStatsJob"} 1 +milvus_datacoord_task_count{collection_id="454808438016835724",task_state="JobStateInProgress",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808438016835724",task_state="JobStateInit",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808438016835724",task_state="JobStateNone",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808438016835724",task_state="JobStateRetry",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808487770194226",task_state="JobStateFailed",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808487770194226",task_state="JobStateFinished",task_type="JobTypeStatsJob"} 1 +milvus_datacoord_task_count{collection_id="454808487770194226",task_state="JobStateInProgress",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808487770194226",task_state="JobStateInit",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808487770194226",task_state="JobStateNone",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808487770194226",task_state="JobStateRetry",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808550493389106",task_state="JobStateFailed",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808550493389106",task_state="JobStateFinished",task_type="JobTypeStatsJob"} 1 +milvus_datacoord_task_count{collection_id="454808550493389106",task_state="JobStateInProgress",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808550493389106",task_state="JobStateInit",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808550493389106",task_state="JobStateNone",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808550493389106",task_state="JobStateRetry",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808644260200606",task_state="JobStateFailed",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808644260200606",task_state="JobStateFinished",task_type="JobTypeStatsJob"} 1 +milvus_datacoord_task_count{collection_id="454808644260200606",task_state="JobStateInProgress",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808644260200606",task_state="JobStateInit",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808644260200606",task_state="JobStateNone",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808644260200606",task_state="JobStateRetry",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808703961661599",task_state="JobStateFailed",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808703961661599",task_state="JobStateFinished",task_type="JobTypeStatsJob"} 1 +milvus_datacoord_task_count{collection_id="454808703961661599",task_state="JobStateInProgress",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808703961661599",task_state="JobStateInit",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808703961661599",task_state="JobStateNone",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808703961661599",task_state="JobStateRetry",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808957805920562",task_state="JobStateFailed",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808957805920562",task_state="JobStateFinished",task_type="JobTypeStatsJob"} 1 +milvus_datacoord_task_count{collection_id="454808957805920562",task_state="JobStateInProgress",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808957805920562",task_state="JobStateInit",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808957805920562",task_state="JobStateNone",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808957805920562",task_state="JobStateRetry",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808985355419955",task_state="JobStateFailed",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808985355419955",task_state="JobStateFinished",task_type="JobTypeStatsJob"} 1 +milvus_datacoord_task_count{collection_id="454808985355419955",task_state="JobStateInProgress",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808985355419955",task_state="JobStateInit",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808985355419955",task_state="JobStateNone",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454808985355419955",task_state="JobStateRetry",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454809016083415346",task_state="JobStateFailed",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454809016083415346",task_state="JobStateFinished",task_type="JobTypeStatsJob"} 1 +milvus_datacoord_task_count{collection_id="454809016083415346",task_state="JobStateInProgress",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454809016083415346",task_state="JobStateInit",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454809016083415346",task_state="JobStateNone",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454809016083415346",task_state="JobStateRetry",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454809046208741680",task_state="JobStateFailed",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454809046208741680",task_state="JobStateFinished",task_type="JobTypeStatsJob"} 1 +milvus_datacoord_task_count{collection_id="454809046208741680",task_state="JobStateInProgress",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454809046208741680",task_state="JobStateInit",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454809046208741680",task_state="JobStateNone",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454809046208741680",task_state="JobStateRetry",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454809251903439155",task_state="JobStateFailed",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454809251903439155",task_state="JobStateFinished",task_type="JobTypeStatsJob"} 1 +milvus_datacoord_task_count{collection_id="454809251903439155",task_state="JobStateInProgress",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454809251903439155",task_state="JobStateInit",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454809251903439155",task_state="JobStateNone",task_type="JobTypeStatsJob"} 0 +milvus_datacoord_task_count{collection_id="454809251903439155",task_state="JobStateRetry",task_type="JobTypeStatsJob"} 0 +# HELP milvus_datacoord_task_execute_max_latency latency of task execute operation +# TYPE milvus_datacoord_task_execute_max_latency histogram +milvus_datacoord_task_execute_max_latency_bucket{status="executing",task_type="JobTypeIndexJob",le="1"} 0 +milvus_datacoord_task_execute_max_latency_bucket{status="executing",task_type="JobTypeIndexJob",le="100"} 0 +milvus_datacoord_task_execute_max_latency_bucket{status="executing",task_type="JobTypeIndexJob",le="500"} 0 +milvus_datacoord_task_execute_max_latency_bucket{status="executing",task_type="JobTypeIndexJob",le="1000"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="executing",task_type="JobTypeIndexJob",le="5000"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="executing",task_type="JobTypeIndexJob",le="10000"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="executing",task_type="JobTypeIndexJob",le="20000"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="executing",task_type="JobTypeIndexJob",le="50000"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="executing",task_type="JobTypeIndexJob",le="100000"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="executing",task_type="JobTypeIndexJob",le="250000"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="executing",task_type="JobTypeIndexJob",le="500000"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="executing",task_type="JobTypeIndexJob",le="1e+06"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="executing",task_type="JobTypeIndexJob",le="3.6e+06"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="executing",task_type="JobTypeIndexJob",le="5e+06"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="executing",task_type="JobTypeIndexJob",le="1e+07"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="executing",task_type="JobTypeIndexJob",le="+Inf"} 1 +milvus_datacoord_task_execute_max_latency_sum{status="executing",task_type="JobTypeIndexJob"} 971 +milvus_datacoord_task_execute_max_latency_count{status="executing",task_type="JobTypeIndexJob"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="executing",task_type="JobTypeStatsJob",le="1"} 0 +milvus_datacoord_task_execute_max_latency_bucket{status="executing",task_type="JobTypeStatsJob",le="100"} 0 +milvus_datacoord_task_execute_max_latency_bucket{status="executing",task_type="JobTypeStatsJob",le="500"} 0 +milvus_datacoord_task_execute_max_latency_bucket{status="executing",task_type="JobTypeStatsJob",le="1000"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="executing",task_type="JobTypeStatsJob",le="5000"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="executing",task_type="JobTypeStatsJob",le="10000"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="executing",task_type="JobTypeStatsJob",le="20000"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="executing",task_type="JobTypeStatsJob",le="50000"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="executing",task_type="JobTypeStatsJob",le="100000"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="executing",task_type="JobTypeStatsJob",le="250000"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="executing",task_type="JobTypeStatsJob",le="500000"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="executing",task_type="JobTypeStatsJob",le="1e+06"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="executing",task_type="JobTypeStatsJob",le="3.6e+06"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="executing",task_type="JobTypeStatsJob",le="5e+06"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="executing",task_type="JobTypeStatsJob",le="1e+07"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="executing",task_type="JobTypeStatsJob",le="+Inf"} 1 +milvus_datacoord_task_execute_max_latency_sum{status="executing",task_type="JobTypeStatsJob"} 533 +milvus_datacoord_task_execute_max_latency_count{status="executing",task_type="JobTypeStatsJob"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="pending",task_type="JobTypeIndexJob",le="1"} 0 +milvus_datacoord_task_execute_max_latency_bucket{status="pending",task_type="JobTypeIndexJob",le="100"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="pending",task_type="JobTypeIndexJob",le="500"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="pending",task_type="JobTypeIndexJob",le="1000"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="pending",task_type="JobTypeIndexJob",le="5000"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="pending",task_type="JobTypeIndexJob",le="10000"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="pending",task_type="JobTypeIndexJob",le="20000"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="pending",task_type="JobTypeIndexJob",le="50000"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="pending",task_type="JobTypeIndexJob",le="100000"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="pending",task_type="JobTypeIndexJob",le="250000"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="pending",task_type="JobTypeIndexJob",le="500000"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="pending",task_type="JobTypeIndexJob",le="1e+06"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="pending",task_type="JobTypeIndexJob",le="3.6e+06"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="pending",task_type="JobTypeIndexJob",le="5e+06"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="pending",task_type="JobTypeIndexJob",le="1e+07"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="pending",task_type="JobTypeIndexJob",le="+Inf"} 1 +milvus_datacoord_task_execute_max_latency_sum{status="pending",task_type="JobTypeIndexJob"} 15 +milvus_datacoord_task_execute_max_latency_count{status="pending",task_type="JobTypeIndexJob"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="pending",task_type="JobTypeStatsJob",le="1"} 0 +milvus_datacoord_task_execute_max_latency_bucket{status="pending",task_type="JobTypeStatsJob",le="100"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="pending",task_type="JobTypeStatsJob",le="500"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="pending",task_type="JobTypeStatsJob",le="1000"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="pending",task_type="JobTypeStatsJob",le="5000"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="pending",task_type="JobTypeStatsJob",le="10000"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="pending",task_type="JobTypeStatsJob",le="20000"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="pending",task_type="JobTypeStatsJob",le="50000"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="pending",task_type="JobTypeStatsJob",le="100000"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="pending",task_type="JobTypeStatsJob",le="250000"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="pending",task_type="JobTypeStatsJob",le="500000"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="pending",task_type="JobTypeStatsJob",le="1e+06"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="pending",task_type="JobTypeStatsJob",le="3.6e+06"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="pending",task_type="JobTypeStatsJob",le="5e+06"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="pending",task_type="JobTypeStatsJob",le="1e+07"} 1 +milvus_datacoord_task_execute_max_latency_bucket{status="pending",task_type="JobTypeStatsJob",le="+Inf"} 1 +milvus_datacoord_task_execute_max_latency_sum{status="pending",task_type="JobTypeStatsJob"} 5 +milvus_datacoord_task_execute_max_latency_count{status="pending",task_type="JobTypeStatsJob"} 1 +# HELP milvus_datanode_autoflush_buffer_op_count count of auto flush buffer operations +# TYPE milvus_datanode_autoflush_buffer_op_count counter +milvus_datanode_autoflush_buffer_op_count{node_id="37",segment_level="L1",status="success"} 1 +# HELP milvus_datanode_consume_bytes_count +# TYPE milvus_datanode_consume_bytes_count counter +milvus_datanode_consume_bytes_count{msg_type="delete",node_id="37"} 241 +milvus_datanode_consume_bytes_count{msg_type="insert",node_id="37"} 191574 +# HELP milvus_datanode_encode_buffer_latency latency of encode buffer data +# TYPE milvus_datanode_encode_buffer_latency histogram +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L0",le="1"} 0 +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L0",le="2"} 0 +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L0",le="4"} 0 +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L0",le="8"} 1 +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L0",le="16"} 1 +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L0",le="32"} 1 +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L0",le="64"} 1 +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L0",le="128"} 1 +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L0",le="256"} 1 +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L0",le="512"} 1 +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L0",le="1024"} 1 +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L0",le="2048"} 1 +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L0",le="4096"} 1 +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L0",le="8192"} 1 +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L0",le="16384"} 1 +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L0",le="32768"} 1 +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L0",le="65536"} 1 +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L0",le="131072"} 1 +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L0",le="+Inf"} 1 +milvus_datanode_encode_buffer_latency_sum{node_id="37",segment_level="L0"} 6 +milvus_datanode_encode_buffer_latency_count{node_id="37",segment_level="L0"} 1 +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L1",le="1"} 1 +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L1",le="2"} 1 +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L1",le="4"} 1 +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L1",le="8"} 1 +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L1",le="16"} 1 +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L1",le="32"} 2 +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L1",le="64"} 2 +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L1",le="128"} 2 +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L1",le="256"} 2 +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L1",le="512"} 2 +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L1",le="1024"} 2 +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L1",le="2048"} 2 +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L1",le="4096"} 2 +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L1",le="8192"} 2 +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L1",le="16384"} 2 +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L1",le="32768"} 2 +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L1",le="65536"} 2 +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L1",le="131072"} 2 +milvus_datanode_encode_buffer_latency_bucket{node_id="37",segment_level="L1",le="+Inf"} 2 +milvus_datanode_encode_buffer_latency_sum{node_id="37",segment_level="L1"} 20 +milvus_datanode_encode_buffer_latency_count{node_id="37",segment_level="L1"} 2 +# HELP milvus_datanode_flowgraph_num number of flowgraphs +# TYPE milvus_datanode_flowgraph_num gauge +milvus_datanode_flowgraph_num{node_id="37"} 0 +# HELP milvus_datanode_flush_buffer_op_count count of flush buffer operations +# TYPE milvus_datanode_flush_buffer_op_count counter +milvus_datanode_flush_buffer_op_count{node_id="37",segment_level="L0",status="success"} 1 +milvus_datanode_flush_buffer_op_count{node_id="37",segment_level="L1",status="success"} 2 +# HELP milvus_datanode_flush_req_count count of flush request +# TYPE milvus_datanode_flush_req_count counter +milvus_datanode_flush_req_count{node_id="37",status="success"} 1 +milvus_datanode_flush_req_count{node_id="37",status="total"} 3 +# HELP milvus_datanode_flushed_data_rows num of rows flushed to storage +# TYPE milvus_datanode_flushed_data_rows counter +milvus_datanode_flushed_data_rows{data_source="streaming",node_id="37"} 3001 +# HELP milvus_datanode_flushed_data_size byte size of data flushed to storage +# TYPE milvus_datanode_flushed_data_size counter +milvus_datanode_flushed_data_size{data_source="streaming",node_id="37",segment_level="L0"} 620 +milvus_datanode_flushed_data_size{data_source="streaming",node_id="37",segment_level="L1"} 226983 +# HELP milvus_datanode_msg_rows_count count of rows consumed from msgStream +# TYPE milvus_datanode_msg_rows_count counter +milvus_datanode_msg_rows_count{msg_type="delete",node_id="37"} 2 +milvus_datanode_msg_rows_count{msg_type="insert",node_id="37"} 3001 +# HELP milvus_datanode_save_latency latency of saving flush data to storage +# TYPE milvus_datanode_save_latency histogram +milvus_datanode_save_latency_bucket{msg_type="L0",node_id="37",le="0"} 0 +milvus_datanode_save_latency_bucket{msg_type="L0",node_id="37",le="10"} 1 +milvus_datanode_save_latency_bucket{msg_type="L0",node_id="37",le="100"} 1 +milvus_datanode_save_latency_bucket{msg_type="L0",node_id="37",le="200"} 1 +milvus_datanode_save_latency_bucket{msg_type="L0",node_id="37",le="400"} 1 +milvus_datanode_save_latency_bucket{msg_type="L0",node_id="37",le="1000"} 1 +milvus_datanode_save_latency_bucket{msg_type="L0",node_id="37",le="10000"} 1 +milvus_datanode_save_latency_bucket{msg_type="L0",node_id="37",le="+Inf"} 1 +milvus_datanode_save_latency_sum{msg_type="L0",node_id="37"} 5 +milvus_datanode_save_latency_count{msg_type="L0",node_id="37"} 1 +milvus_datanode_save_latency_bucket{msg_type="L1",node_id="37",le="0"} 0 +milvus_datanode_save_latency_bucket{msg_type="L1",node_id="37",le="10"} 2 +milvus_datanode_save_latency_bucket{msg_type="L1",node_id="37",le="100"} 2 +milvus_datanode_save_latency_bucket{msg_type="L1",node_id="37",le="200"} 2 +milvus_datanode_save_latency_bucket{msg_type="L1",node_id="37",le="400"} 2 +milvus_datanode_save_latency_bucket{msg_type="L1",node_id="37",le="1000"} 2 +milvus_datanode_save_latency_bucket{msg_type="L1",node_id="37",le="10000"} 2 +milvus_datanode_save_latency_bucket{msg_type="L1",node_id="37",le="+Inf"} 2 +milvus_datanode_save_latency_sum{msg_type="L1",node_id="37"} 14 +milvus_datanode_save_latency_count{msg_type="L1",node_id="37"} 2 +# HELP milvus_flushed_segment_file_num the num of files for flushed segment +# TYPE milvus_flushed_segment_file_num histogram +milvus_flushed_segment_file_num_bucket{segment_file_type="delete_file",le="1"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="delete_file",le="2"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="delete_file",le="4"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="delete_file",le="8"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="delete_file",le="16"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="delete_file",le="32"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="delete_file",le="64"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="delete_file",le="128"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="delete_file",le="256"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="delete_file",le="512"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="delete_file",le="1024"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="delete_file",le="2048"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="delete_file",le="4096"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="delete_file",le="8192"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="delete_file",le="16384"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="delete_file",le="32768"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="delete_file",le="65536"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="delete_file",le="131072"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="delete_file",le="+Inf"} 1 +milvus_flushed_segment_file_num_sum{segment_file_type="delete_file"} 0 +milvus_flushed_segment_file_num_count{segment_file_type="delete_file"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="index_file",le="1"} 11 +milvus_flushed_segment_file_num_bucket{segment_file_type="index_file",le="2"} 11 +milvus_flushed_segment_file_num_bucket{segment_file_type="index_file",le="4"} 11 +milvus_flushed_segment_file_num_bucket{segment_file_type="index_file",le="8"} 11 +milvus_flushed_segment_file_num_bucket{segment_file_type="index_file",le="16"} 11 +milvus_flushed_segment_file_num_bucket{segment_file_type="index_file",le="32"} 11 +milvus_flushed_segment_file_num_bucket{segment_file_type="index_file",le="64"} 11 +milvus_flushed_segment_file_num_bucket{segment_file_type="index_file",le="128"} 11 +milvus_flushed_segment_file_num_bucket{segment_file_type="index_file",le="256"} 11 +milvus_flushed_segment_file_num_bucket{segment_file_type="index_file",le="512"} 11 +milvus_flushed_segment_file_num_bucket{segment_file_type="index_file",le="1024"} 11 +milvus_flushed_segment_file_num_bucket{segment_file_type="index_file",le="2048"} 11 +milvus_flushed_segment_file_num_bucket{segment_file_type="index_file",le="4096"} 11 +milvus_flushed_segment_file_num_bucket{segment_file_type="index_file",le="8192"} 11 +milvus_flushed_segment_file_num_bucket{segment_file_type="index_file",le="16384"} 11 +milvus_flushed_segment_file_num_bucket{segment_file_type="index_file",le="32768"} 11 +milvus_flushed_segment_file_num_bucket{segment_file_type="index_file",le="65536"} 11 +milvus_flushed_segment_file_num_bucket{segment_file_type="index_file",le="131072"} 11 +milvus_flushed_segment_file_num_bucket{segment_file_type="index_file",le="+Inf"} 11 +milvus_flushed_segment_file_num_sum{segment_file_type="index_file"} 11 +milvus_flushed_segment_file_num_count{segment_file_type="index_file"} 11 +milvus_flushed_segment_file_num_bucket{segment_file_type="insert_file",le="1"} 0 +milvus_flushed_segment_file_num_bucket{segment_file_type="insert_file",le="2"} 0 +milvus_flushed_segment_file_num_bucket{segment_file_type="insert_file",le="4"} 0 +milvus_flushed_segment_file_num_bucket{segment_file_type="insert_file",le="8"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="insert_file",le="16"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="insert_file",le="32"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="insert_file",le="64"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="insert_file",le="128"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="insert_file",le="256"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="insert_file",le="512"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="insert_file",le="1024"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="insert_file",le="2048"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="insert_file",le="4096"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="insert_file",le="8192"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="insert_file",le="16384"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="insert_file",le="32768"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="insert_file",le="65536"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="insert_file",le="131072"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="insert_file",le="+Inf"} 1 +milvus_flushed_segment_file_num_sum{segment_file_type="insert_file"} 5 +milvus_flushed_segment_file_num_count{segment_file_type="insert_file"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="stat_file",le="1"} 0 +milvus_flushed_segment_file_num_bucket{segment_file_type="stat_file",le="2"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="stat_file",le="4"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="stat_file",le="8"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="stat_file",le="16"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="stat_file",le="32"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="stat_file",le="64"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="stat_file",le="128"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="stat_file",le="256"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="stat_file",le="512"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="stat_file",le="1024"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="stat_file",le="2048"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="stat_file",le="4096"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="stat_file",le="8192"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="stat_file",le="16384"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="stat_file",le="32768"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="stat_file",le="65536"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="stat_file",le="131072"} 1 +milvus_flushed_segment_file_num_bucket{segment_file_type="stat_file",le="+Inf"} 1 +milvus_flushed_segment_file_num_sum{segment_file_type="stat_file"} 2 +milvus_flushed_segment_file_num_count{segment_file_type="stat_file"} 1 +# HELP milvus_indexnode_build_index_latency latency of build index for segment +# TYPE milvus_indexnode_build_index_latency histogram +milvus_indexnode_build_index_latency_bucket{node_id="37",le="0.001"} 0 +milvus_indexnode_build_index_latency_bucket{node_id="37",le="0.1"} 1 +milvus_indexnode_build_index_latency_bucket{node_id="37",le="0.5"} 1 +milvus_indexnode_build_index_latency_bucket{node_id="37",le="1"} 1 +milvus_indexnode_build_index_latency_bucket{node_id="37",le="5"} 1 +milvus_indexnode_build_index_latency_bucket{node_id="37",le="10"} 1 +milvus_indexnode_build_index_latency_bucket{node_id="37",le="20"} 1 +milvus_indexnode_build_index_latency_bucket{node_id="37",le="50"} 1 +milvus_indexnode_build_index_latency_bucket{node_id="37",le="100"} 1 +milvus_indexnode_build_index_latency_bucket{node_id="37",le="250"} 1 +milvus_indexnode_build_index_latency_bucket{node_id="37",le="500"} 1 +milvus_indexnode_build_index_latency_bucket{node_id="37",le="1000"} 1 +milvus_indexnode_build_index_latency_bucket{node_id="37",le="3600"} 1 +milvus_indexnode_build_index_latency_bucket{node_id="37",le="5000"} 1 +milvus_indexnode_build_index_latency_bucket{node_id="37",le="10000"} 1 +milvus_indexnode_build_index_latency_bucket{node_id="37",le="+Inf"} 1 +milvus_indexnode_build_index_latency_sum{node_id="37"} 0.033203625 +milvus_indexnode_build_index_latency_count{node_id="37"} 1 +# HELP milvus_indexnode_encode_index_latency latency of encoding the index file +# TYPE milvus_indexnode_encode_index_latency histogram +milvus_indexnode_encode_index_latency_bucket{node_id="37",le="0.001"} 0 +milvus_indexnode_encode_index_latency_bucket{node_id="37",le="0.1"} 1 +milvus_indexnode_encode_index_latency_bucket{node_id="37",le="0.5"} 1 +milvus_indexnode_encode_index_latency_bucket{node_id="37",le="1"} 1 +milvus_indexnode_encode_index_latency_bucket{node_id="37",le="5"} 1 +milvus_indexnode_encode_index_latency_bucket{node_id="37",le="10"} 1 +milvus_indexnode_encode_index_latency_bucket{node_id="37",le="20"} 1 +milvus_indexnode_encode_index_latency_bucket{node_id="37",le="50"} 1 +milvus_indexnode_encode_index_latency_bucket{node_id="37",le="100"} 1 +milvus_indexnode_encode_index_latency_bucket{node_id="37",le="250"} 1 +milvus_indexnode_encode_index_latency_bucket{node_id="37",le="500"} 1 +milvus_indexnode_encode_index_latency_bucket{node_id="37",le="1000"} 1 +milvus_indexnode_encode_index_latency_bucket{node_id="37",le="3600"} 1 +milvus_indexnode_encode_index_latency_bucket{node_id="37",le="5000"} 1 +milvus_indexnode_encode_index_latency_bucket{node_id="37",le="10000"} 1 +milvus_indexnode_encode_index_latency_bucket{node_id="37",le="+Inf"} 1 +milvus_indexnode_encode_index_latency_sum{node_id="37"} 0.007635709 +milvus_indexnode_encode_index_latency_count{node_id="37"} 1 +# HELP milvus_indexnode_index_task_count number of tasks that index node received +# TYPE milvus_indexnode_index_task_count counter +milvus_indexnode_index_task_count{node_id="37",status="success"} 1 +# HELP milvus_indexnode_index_task_latency_in_queue latency of index task in queue +# TYPE milvus_indexnode_index_task_latency_in_queue histogram +milvus_indexnode_index_task_latency_in_queue_bucket{node_id="37",le="1"} 1 +milvus_indexnode_index_task_latency_in_queue_bucket{node_id="37",le="2"} 1 +milvus_indexnode_index_task_latency_in_queue_bucket{node_id="37",le="4"} 1 +milvus_indexnode_index_task_latency_in_queue_bucket{node_id="37",le="8"} 1 +milvus_indexnode_index_task_latency_in_queue_bucket{node_id="37",le="16"} 1 +milvus_indexnode_index_task_latency_in_queue_bucket{node_id="37",le="32"} 1 +milvus_indexnode_index_task_latency_in_queue_bucket{node_id="37",le="64"} 1 +milvus_indexnode_index_task_latency_in_queue_bucket{node_id="37",le="128"} 1 +milvus_indexnode_index_task_latency_in_queue_bucket{node_id="37",le="256"} 1 +milvus_indexnode_index_task_latency_in_queue_bucket{node_id="37",le="512"} 1 +milvus_indexnode_index_task_latency_in_queue_bucket{node_id="37",le="1024"} 1 +milvus_indexnode_index_task_latency_in_queue_bucket{node_id="37",le="2048"} 1 +milvus_indexnode_index_task_latency_in_queue_bucket{node_id="37",le="4096"} 1 +milvus_indexnode_index_task_latency_in_queue_bucket{node_id="37",le="8192"} 1 +milvus_indexnode_index_task_latency_in_queue_bucket{node_id="37",le="16384"} 1 +milvus_indexnode_index_task_latency_in_queue_bucket{node_id="37",le="32768"} 1 +milvus_indexnode_index_task_latency_in_queue_bucket{node_id="37",le="65536"} 1 +milvus_indexnode_index_task_latency_in_queue_bucket{node_id="37",le="131072"} 1 +milvus_indexnode_index_task_latency_in_queue_bucket{node_id="37",le="+Inf"} 1 +milvus_indexnode_index_task_latency_in_queue_sum{node_id="37"} 0 +milvus_indexnode_index_task_latency_in_queue_count{node_id="37"} 1 +# HELP milvus_indexnode_knowhere_build_index_latency latency of building the index by knowhere +# TYPE milvus_indexnode_knowhere_build_index_latency histogram +milvus_indexnode_knowhere_build_index_latency_bucket{node_id="37",le="0.001"} 0 +milvus_indexnode_knowhere_build_index_latency_bucket{node_id="37",le="0.1"} 1 +milvus_indexnode_knowhere_build_index_latency_bucket{node_id="37",le="0.5"} 1 +milvus_indexnode_knowhere_build_index_latency_bucket{node_id="37",le="1"} 1 +milvus_indexnode_knowhere_build_index_latency_bucket{node_id="37",le="5"} 1 +milvus_indexnode_knowhere_build_index_latency_bucket{node_id="37",le="10"} 1 +milvus_indexnode_knowhere_build_index_latency_bucket{node_id="37",le="20"} 1 +milvus_indexnode_knowhere_build_index_latency_bucket{node_id="37",le="50"} 1 +milvus_indexnode_knowhere_build_index_latency_bucket{node_id="37",le="100"} 1 +milvus_indexnode_knowhere_build_index_latency_bucket{node_id="37",le="250"} 1 +milvus_indexnode_knowhere_build_index_latency_bucket{node_id="37",le="500"} 1 +milvus_indexnode_knowhere_build_index_latency_bucket{node_id="37",le="1000"} 1 +milvus_indexnode_knowhere_build_index_latency_bucket{node_id="37",le="3600"} 1 +milvus_indexnode_knowhere_build_index_latency_bucket{node_id="37",le="5000"} 1 +milvus_indexnode_knowhere_build_index_latency_bucket{node_id="37",le="10000"} 1 +milvus_indexnode_knowhere_build_index_latency_bucket{node_id="37",le="+Inf"} 1 +milvus_indexnode_knowhere_build_index_latency_sum{node_id="37"} 0.025299416 +milvus_indexnode_knowhere_build_index_latency_count{node_id="37"} 1 +# HELP milvus_indexnode_save_index_latency latency of saving the index file +# TYPE milvus_indexnode_save_index_latency histogram +milvus_indexnode_save_index_latency_bucket{node_id="37",le="0.001"} 1 +milvus_indexnode_save_index_latency_bucket{node_id="37",le="0.1"} 1 +milvus_indexnode_save_index_latency_bucket{node_id="37",le="0.5"} 1 +milvus_indexnode_save_index_latency_bucket{node_id="37",le="1"} 1 +milvus_indexnode_save_index_latency_bucket{node_id="37",le="5"} 1 +milvus_indexnode_save_index_latency_bucket{node_id="37",le="10"} 1 +milvus_indexnode_save_index_latency_bucket{node_id="37",le="20"} 1 +milvus_indexnode_save_index_latency_bucket{node_id="37",le="50"} 1 +milvus_indexnode_save_index_latency_bucket{node_id="37",le="100"} 1 +milvus_indexnode_save_index_latency_bucket{node_id="37",le="250"} 1 +milvus_indexnode_save_index_latency_bucket{node_id="37",le="500"} 1 +milvus_indexnode_save_index_latency_bucket{node_id="37",le="1000"} 1 +milvus_indexnode_save_index_latency_bucket{node_id="37",le="3600"} 1 +milvus_indexnode_save_index_latency_bucket{node_id="37",le="5000"} 1 +milvus_indexnode_save_index_latency_bucket{node_id="37",le="10000"} 1 +milvus_indexnode_save_index_latency_bucket{node_id="37",le="+Inf"} 1 +milvus_indexnode_save_index_latency_sum{node_id="37"} 6.0041e-05 +milvus_indexnode_save_index_latency_count{node_id="37"} 1 +# HELP milvus_meta_kv_size kv size stats +# TYPE milvus_meta_kv_size histogram +milvus_meta_kv_size_bucket{meta_op_type="get",le="1"} 89 +milvus_meta_kv_size_bucket{meta_op_type="get",le="2"} 89 +milvus_meta_kv_size_bucket{meta_op_type="get",le="4"} 89 +milvus_meta_kv_size_bucket{meta_op_type="get",le="8"} 89 +milvus_meta_kv_size_bucket{meta_op_type="get",le="16"} 89 +milvus_meta_kv_size_bucket{meta_op_type="get",le="32"} 89 +milvus_meta_kv_size_bucket{meta_op_type="get",le="64"} 89 +milvus_meta_kv_size_bucket{meta_op_type="get",le="128"} 89 +milvus_meta_kv_size_bucket{meta_op_type="get",le="256"} 89 +milvus_meta_kv_size_bucket{meta_op_type="get",le="512"} 89 +milvus_meta_kv_size_bucket{meta_op_type="get",le="1024"} 89 +milvus_meta_kv_size_bucket{meta_op_type="get",le="2048"} 89 +milvus_meta_kv_size_bucket{meta_op_type="get",le="4096"} 89 +milvus_meta_kv_size_bucket{meta_op_type="get",le="8192"} 89 +milvus_meta_kv_size_bucket{meta_op_type="get",le="16384"} 89 +milvus_meta_kv_size_bucket{meta_op_type="get",le="32768"} 89 +milvus_meta_kv_size_bucket{meta_op_type="get",le="65536"} 89 +milvus_meta_kv_size_bucket{meta_op_type="get",le="131072"} 89 +milvus_meta_kv_size_bucket{meta_op_type="get",le="+Inf"} 89 +milvus_meta_kv_size_sum{meta_op_type="get"} -313 +milvus_meta_kv_size_count{meta_op_type="get"} 89 +milvus_meta_kv_size_bucket{meta_op_type="put",le="1"} 1 +milvus_meta_kv_size_bucket{meta_op_type="put",le="2"} 1 +milvus_meta_kv_size_bucket{meta_op_type="put",le="4"} 1 +milvus_meta_kv_size_bucket{meta_op_type="put",le="8"} 39 +milvus_meta_kv_size_bucket{meta_op_type="put",le="16"} 40 +milvus_meta_kv_size_bucket{meta_op_type="put",le="32"} 40 +milvus_meta_kv_size_bucket{meta_op_type="put",le="64"} 50 +milvus_meta_kv_size_bucket{meta_op_type="put",le="128"} 60 +milvus_meta_kv_size_bucket{meta_op_type="put",le="256"} 63 +milvus_meta_kv_size_bucket{meta_op_type="put",le="512"} 72 +milvus_meta_kv_size_bucket{meta_op_type="put",le="1024"} 76 +milvus_meta_kv_size_bucket{meta_op_type="put",le="2048"} 76 +milvus_meta_kv_size_bucket{meta_op_type="put",le="4096"} 76 +milvus_meta_kv_size_bucket{meta_op_type="put",le="8192"} 76 +milvus_meta_kv_size_bucket{meta_op_type="put",le="16384"} 76 +milvus_meta_kv_size_bucket{meta_op_type="put",le="32768"} 76 +milvus_meta_kv_size_bucket{meta_op_type="put",le="65536"} 76 +milvus_meta_kv_size_bucket{meta_op_type="put",le="131072"} 76 +milvus_meta_kv_size_bucket{meta_op_type="put",le="+Inf"} 76 +milvus_meta_kv_size_sum{meta_op_type="put"} 8526 +milvus_meta_kv_size_count{meta_op_type="put"} 76 +# HELP milvus_meta_op_count count of meta operation +# TYPE milvus_meta_op_count counter +milvus_meta_op_count{meta_op_type="get",status="success"} 63 +milvus_meta_op_count{meta_op_type="get",status="total"} 63 +milvus_meta_op_count{meta_op_type="put",status="success"} 50 +milvus_meta_op_count{meta_op_type="put",status="total"} 50 +milvus_meta_op_count{meta_op_type="remove",status="success"} 6 +milvus_meta_op_count{meta_op_type="remove",status="total"} 6 +milvus_meta_op_count{meta_op_type="txn",status="success"} 26 +milvus_meta_op_count{meta_op_type="txn",status="total"} 26 +# HELP milvus_meta_request_latency request latency on the client side +# TYPE milvus_meta_request_latency histogram +milvus_meta_request_latency_bucket{meta_op_type="get",le="1"} 62 +milvus_meta_request_latency_bucket{meta_op_type="get",le="2"} 62 +milvus_meta_request_latency_bucket{meta_op_type="get",le="4"} 62 +milvus_meta_request_latency_bucket{meta_op_type="get",le="8"} 63 +milvus_meta_request_latency_bucket{meta_op_type="get",le="16"} 63 +milvus_meta_request_latency_bucket{meta_op_type="get",le="32"} 63 +milvus_meta_request_latency_bucket{meta_op_type="get",le="64"} 63 +milvus_meta_request_latency_bucket{meta_op_type="get",le="128"} 63 +milvus_meta_request_latency_bucket{meta_op_type="get",le="256"} 63 +milvus_meta_request_latency_bucket{meta_op_type="get",le="512"} 63 +milvus_meta_request_latency_bucket{meta_op_type="get",le="1024"} 63 +milvus_meta_request_latency_bucket{meta_op_type="get",le="2048"} 63 +milvus_meta_request_latency_bucket{meta_op_type="get",le="4096"} 63 +milvus_meta_request_latency_bucket{meta_op_type="get",le="8192"} 63 +milvus_meta_request_latency_bucket{meta_op_type="get",le="16384"} 63 +milvus_meta_request_latency_bucket{meta_op_type="get",le="32768"} 63 +milvus_meta_request_latency_bucket{meta_op_type="get",le="65536"} 63 +milvus_meta_request_latency_bucket{meta_op_type="get",le="131072"} 63 +milvus_meta_request_latency_bucket{meta_op_type="get",le="+Inf"} 63 +milvus_meta_request_latency_sum{meta_op_type="get"} 7 +milvus_meta_request_latency_count{meta_op_type="get"} 63 +milvus_meta_request_latency_bucket{meta_op_type="put",le="1"} 19 +milvus_meta_request_latency_bucket{meta_op_type="put",le="2"} 35 +milvus_meta_request_latency_bucket{meta_op_type="put",le="4"} 45 +milvus_meta_request_latency_bucket{meta_op_type="put",le="8"} 49 +milvus_meta_request_latency_bucket{meta_op_type="put",le="16"} 50 +milvus_meta_request_latency_bucket{meta_op_type="put",le="32"} 50 +milvus_meta_request_latency_bucket{meta_op_type="put",le="64"} 50 +milvus_meta_request_latency_bucket{meta_op_type="put",le="128"} 50 +milvus_meta_request_latency_bucket{meta_op_type="put",le="256"} 50 +milvus_meta_request_latency_bucket{meta_op_type="put",le="512"} 50 +milvus_meta_request_latency_bucket{meta_op_type="put",le="1024"} 50 +milvus_meta_request_latency_bucket{meta_op_type="put",le="2048"} 50 +milvus_meta_request_latency_bucket{meta_op_type="put",le="4096"} 50 +milvus_meta_request_latency_bucket{meta_op_type="put",le="8192"} 50 +milvus_meta_request_latency_bucket{meta_op_type="put",le="16384"} 50 +milvus_meta_request_latency_bucket{meta_op_type="put",le="32768"} 50 +milvus_meta_request_latency_bucket{meta_op_type="put",le="65536"} 50 +milvus_meta_request_latency_bucket{meta_op_type="put",le="131072"} 50 +milvus_meta_request_latency_bucket{meta_op_type="put",le="+Inf"} 50 +milvus_meta_request_latency_sum{meta_op_type="put"} 106 +milvus_meta_request_latency_count{meta_op_type="put"} 50 +milvus_meta_request_latency_bucket{meta_op_type="remove",le="1"} 6 +milvus_meta_request_latency_bucket{meta_op_type="remove",le="2"} 6 +milvus_meta_request_latency_bucket{meta_op_type="remove",le="4"} 6 +milvus_meta_request_latency_bucket{meta_op_type="remove",le="8"} 6 +milvus_meta_request_latency_bucket{meta_op_type="remove",le="16"} 6 +milvus_meta_request_latency_bucket{meta_op_type="remove",le="32"} 6 +milvus_meta_request_latency_bucket{meta_op_type="remove",le="64"} 6 +milvus_meta_request_latency_bucket{meta_op_type="remove",le="128"} 6 +milvus_meta_request_latency_bucket{meta_op_type="remove",le="256"} 6 +milvus_meta_request_latency_bucket{meta_op_type="remove",le="512"} 6 +milvus_meta_request_latency_bucket{meta_op_type="remove",le="1024"} 6 +milvus_meta_request_latency_bucket{meta_op_type="remove",le="2048"} 6 +milvus_meta_request_latency_bucket{meta_op_type="remove",le="4096"} 6 +milvus_meta_request_latency_bucket{meta_op_type="remove",le="8192"} 6 +milvus_meta_request_latency_bucket{meta_op_type="remove",le="16384"} 6 +milvus_meta_request_latency_bucket{meta_op_type="remove",le="32768"} 6 +milvus_meta_request_latency_bucket{meta_op_type="remove",le="65536"} 6 +milvus_meta_request_latency_bucket{meta_op_type="remove",le="131072"} 6 +milvus_meta_request_latency_bucket{meta_op_type="remove",le="+Inf"} 6 +milvus_meta_request_latency_sum{meta_op_type="remove"} 0 +milvus_meta_request_latency_count{meta_op_type="remove"} 6 +milvus_meta_request_latency_bucket{meta_op_type="txn",le="1"} 22 +milvus_meta_request_latency_bucket{meta_op_type="txn",le="2"} 25 +milvus_meta_request_latency_bucket{meta_op_type="txn",le="4"} 26 +milvus_meta_request_latency_bucket{meta_op_type="txn",le="8"} 26 +milvus_meta_request_latency_bucket{meta_op_type="txn",le="16"} 26 +milvus_meta_request_latency_bucket{meta_op_type="txn",le="32"} 26 +milvus_meta_request_latency_bucket{meta_op_type="txn",le="64"} 26 +milvus_meta_request_latency_bucket{meta_op_type="txn",le="128"} 26 +milvus_meta_request_latency_bucket{meta_op_type="txn",le="256"} 26 +milvus_meta_request_latency_bucket{meta_op_type="txn",le="512"} 26 +milvus_meta_request_latency_bucket{meta_op_type="txn",le="1024"} 26 +milvus_meta_request_latency_bucket{meta_op_type="txn",le="2048"} 26 +milvus_meta_request_latency_bucket{meta_op_type="txn",le="4096"} 26 +milvus_meta_request_latency_bucket{meta_op_type="txn",le="8192"} 26 +milvus_meta_request_latency_bucket{meta_op_type="txn",le="16384"} 26 +milvus_meta_request_latency_bucket{meta_op_type="txn",le="32768"} 26 +milvus_meta_request_latency_bucket{meta_op_type="txn",le="65536"} 26 +milvus_meta_request_latency_bucket{meta_op_type="txn",le="131072"} 26 +milvus_meta_request_latency_bucket{meta_op_type="txn",le="+Inf"} 26 +milvus_meta_request_latency_sum{meta_op_type="txn"} 14 +milvus_meta_request_latency_count{meta_op_type="txn"} 26 +# HELP milvus_msg_queue_consumer_num number of consumers +# TYPE milvus_msg_queue_consumer_num gauge +milvus_msg_queue_consumer_num{node_id="37",role_name="standalone"} 0 +# HELP milvus_msgstream_op_count count of stream message operation +# TYPE milvus_msgstream_op_count counter +milvus_msgstream_op_count{message_op_type="create_consumer",status="success"} 2 +milvus_msgstream_op_count{message_op_type="create_consumer",status="total"} 2 +milvus_msgstream_op_count{message_op_type="create_producer",status="success"} 18 +milvus_msgstream_op_count{message_op_type="create_producer",status="total"} 18 +milvus_msgstream_op_count{message_op_type="produce",status="success"} 3594 +milvus_msgstream_op_count{message_op_type="produce",status="total"} 3594 +# HELP milvus_msgstream_request_latency request latency on the client side +# TYPE milvus_msgstream_request_latency histogram +milvus_msgstream_request_latency_bucket{message_op_type="create_consumer",le="1"} 2 +milvus_msgstream_request_latency_bucket{message_op_type="create_consumer",le="2"} 2 +milvus_msgstream_request_latency_bucket{message_op_type="create_consumer",le="4"} 2 +milvus_msgstream_request_latency_bucket{message_op_type="create_consumer",le="8"} 2 +milvus_msgstream_request_latency_bucket{message_op_type="create_consumer",le="16"} 2 +milvus_msgstream_request_latency_bucket{message_op_type="create_consumer",le="32"} 2 +milvus_msgstream_request_latency_bucket{message_op_type="create_consumer",le="64"} 2 +milvus_msgstream_request_latency_bucket{message_op_type="create_consumer",le="128"} 2 +milvus_msgstream_request_latency_bucket{message_op_type="create_consumer",le="256"} 2 +milvus_msgstream_request_latency_bucket{message_op_type="create_consumer",le="512"} 2 +milvus_msgstream_request_latency_bucket{message_op_type="create_consumer",le="1024"} 2 +milvus_msgstream_request_latency_bucket{message_op_type="create_consumer",le="2048"} 2 +milvus_msgstream_request_latency_bucket{message_op_type="create_consumer",le="4096"} 2 +milvus_msgstream_request_latency_bucket{message_op_type="create_consumer",le="8192"} 2 +milvus_msgstream_request_latency_bucket{message_op_type="create_consumer",le="16384"} 2 +milvus_msgstream_request_latency_bucket{message_op_type="create_consumer",le="32768"} 2 +milvus_msgstream_request_latency_bucket{message_op_type="create_consumer",le="65536"} 2 +milvus_msgstream_request_latency_bucket{message_op_type="create_consumer",le="131072"} 2 +milvus_msgstream_request_latency_bucket{message_op_type="create_consumer",le="+Inf"} 2 +milvus_msgstream_request_latency_sum{message_op_type="create_consumer"} 0 +milvus_msgstream_request_latency_count{message_op_type="create_consumer"} 2 +milvus_msgstream_request_latency_bucket{message_op_type="create_producer",le="1"} 18 +milvus_msgstream_request_latency_bucket{message_op_type="create_producer",le="2"} 18 +milvus_msgstream_request_latency_bucket{message_op_type="create_producer",le="4"} 18 +milvus_msgstream_request_latency_bucket{message_op_type="create_producer",le="8"} 18 +milvus_msgstream_request_latency_bucket{message_op_type="create_producer",le="16"} 18 +milvus_msgstream_request_latency_bucket{message_op_type="create_producer",le="32"} 18 +milvus_msgstream_request_latency_bucket{message_op_type="create_producer",le="64"} 18 +milvus_msgstream_request_latency_bucket{message_op_type="create_producer",le="128"} 18 +milvus_msgstream_request_latency_bucket{message_op_type="create_producer",le="256"} 18 +milvus_msgstream_request_latency_bucket{message_op_type="create_producer",le="512"} 18 +milvus_msgstream_request_latency_bucket{message_op_type="create_producer",le="1024"} 18 +milvus_msgstream_request_latency_bucket{message_op_type="create_producer",le="2048"} 18 +milvus_msgstream_request_latency_bucket{message_op_type="create_producer",le="4096"} 18 +milvus_msgstream_request_latency_bucket{message_op_type="create_producer",le="8192"} 18 +milvus_msgstream_request_latency_bucket{message_op_type="create_producer",le="16384"} 18 +milvus_msgstream_request_latency_bucket{message_op_type="create_producer",le="32768"} 18 +milvus_msgstream_request_latency_bucket{message_op_type="create_producer",le="65536"} 18 +milvus_msgstream_request_latency_bucket{message_op_type="create_producer",le="131072"} 18 +milvus_msgstream_request_latency_bucket{message_op_type="create_producer",le="+Inf"} 18 +milvus_msgstream_request_latency_sum{message_op_type="create_producer"} 0 +milvus_msgstream_request_latency_count{message_op_type="create_producer"} 18 +milvus_msgstream_request_latency_bucket{message_op_type="produce",le="1"} 781 +milvus_msgstream_request_latency_bucket{message_op_type="produce",le="2"} 1620 +milvus_msgstream_request_latency_bucket{message_op_type="produce",le="4"} 2887 +milvus_msgstream_request_latency_bucket{message_op_type="produce",le="8"} 3438 +milvus_msgstream_request_latency_bucket{message_op_type="produce",le="16"} 3583 +milvus_msgstream_request_latency_bucket{message_op_type="produce",le="32"} 3594 +milvus_msgstream_request_latency_bucket{message_op_type="produce",le="64"} 3594 +milvus_msgstream_request_latency_bucket{message_op_type="produce",le="128"} 3594 +milvus_msgstream_request_latency_bucket{message_op_type="produce",le="256"} 3594 +milvus_msgstream_request_latency_bucket{message_op_type="produce",le="512"} 3594 +milvus_msgstream_request_latency_bucket{message_op_type="produce",le="1024"} 3594 +milvus_msgstream_request_latency_bucket{message_op_type="produce",le="2048"} 3594 +milvus_msgstream_request_latency_bucket{message_op_type="produce",le="4096"} 3594 +milvus_msgstream_request_latency_bucket{message_op_type="produce",le="8192"} 3594 +milvus_msgstream_request_latency_bucket{message_op_type="produce",le="16384"} 3594 +milvus_msgstream_request_latency_bucket{message_op_type="produce",le="32768"} 3594 +milvus_msgstream_request_latency_bucket{message_op_type="produce",le="65536"} 3594 +milvus_msgstream_request_latency_bucket{message_op_type="produce",le="131072"} 3594 +milvus_msgstream_request_latency_bucket{message_op_type="produce",le="+Inf"} 3594 +milvus_msgstream_request_latency_sum{message_op_type="produce"} 11317 +milvus_msgstream_request_latency_count{message_op_type="produce"} 3594 +# HELP milvus_num_node number of nodes and coordinates +# TYPE milvus_num_node gauge +milvus_num_node{node_id="37",role_name="datacoord"} 1 +milvus_num_node{node_id="37",role_name="datanode"} 1 +milvus_num_node{node_id="37",role_name="indexnode"} 1 +milvus_num_node{node_id="37",role_name="proxy"} 1 +milvus_num_node{node_id="37",role_name="querycoord"} 1 +milvus_num_node{node_id="37",role_name="querynode"} 1 +milvus_num_node{node_id="37",role_name="rootcoord"} 1 +# HELP milvus_proxy_apply_pk_latency latency that apply primary key +# TYPE milvus_proxy_apply_pk_latency histogram +milvus_proxy_apply_pk_latency_bucket{node_id="37",le="1"} 2 +milvus_proxy_apply_pk_latency_bucket{node_id="37",le="2"} 2 +milvus_proxy_apply_pk_latency_bucket{node_id="37",le="4"} 2 +milvus_proxy_apply_pk_latency_bucket{node_id="37",le="8"} 2 +milvus_proxy_apply_pk_latency_bucket{node_id="37",le="16"} 2 +milvus_proxy_apply_pk_latency_bucket{node_id="37",le="32"} 2 +milvus_proxy_apply_pk_latency_bucket{node_id="37",le="64"} 2 +milvus_proxy_apply_pk_latency_bucket{node_id="37",le="128"} 2 +milvus_proxy_apply_pk_latency_bucket{node_id="37",le="256"} 2 +milvus_proxy_apply_pk_latency_bucket{node_id="37",le="512"} 2 +milvus_proxy_apply_pk_latency_bucket{node_id="37",le="1024"} 2 +milvus_proxy_apply_pk_latency_bucket{node_id="37",le="2048"} 2 +milvus_proxy_apply_pk_latency_bucket{node_id="37",le="4096"} 2 +milvus_proxy_apply_pk_latency_bucket{node_id="37",le="8192"} 2 +milvus_proxy_apply_pk_latency_bucket{node_id="37",le="16384"} 2 +milvus_proxy_apply_pk_latency_bucket{node_id="37",le="32768"} 2 +milvus_proxy_apply_pk_latency_bucket{node_id="37",le="65536"} 2 +milvus_proxy_apply_pk_latency_bucket{node_id="37",le="131072"} 2 +milvus_proxy_apply_pk_latency_bucket{node_id="37",le="+Inf"} 2 +milvus_proxy_apply_pk_latency_sum{node_id="37"} 0 +milvus_proxy_apply_pk_latency_count{node_id="37"} 2 +# HELP milvus_proxy_apply_timestamp_latency latency that proxy apply timestamp +# TYPE milvus_proxy_apply_timestamp_latency histogram +milvus_proxy_apply_timestamp_latency_bucket{node_id="37",le="1"} 357 +milvus_proxy_apply_timestamp_latency_bucket{node_id="37",le="2"} 523 +milvus_proxy_apply_timestamp_latency_bucket{node_id="37",le="4"} 561 +milvus_proxy_apply_timestamp_latency_bucket{node_id="37",le="8"} 577 +milvus_proxy_apply_timestamp_latency_bucket{node_id="37",le="16"} 578 +milvus_proxy_apply_timestamp_latency_bucket{node_id="37",le="32"} 578 +milvus_proxy_apply_timestamp_latency_bucket{node_id="37",le="64"} 578 +milvus_proxy_apply_timestamp_latency_bucket{node_id="37",le="128"} 578 +milvus_proxy_apply_timestamp_latency_bucket{node_id="37",le="256"} 578 +milvus_proxy_apply_timestamp_latency_bucket{node_id="37",le="512"} 578 +milvus_proxy_apply_timestamp_latency_bucket{node_id="37",le="1024"} 578 +milvus_proxy_apply_timestamp_latency_bucket{node_id="37",le="2048"} 578 +milvus_proxy_apply_timestamp_latency_bucket{node_id="37",le="4096"} 578 +milvus_proxy_apply_timestamp_latency_bucket{node_id="37",le="8192"} 578 +milvus_proxy_apply_timestamp_latency_bucket{node_id="37",le="16384"} 578 +milvus_proxy_apply_timestamp_latency_bucket{node_id="37",le="32768"} 578 +milvus_proxy_apply_timestamp_latency_bucket{node_id="37",le="65536"} 578 +milvus_proxy_apply_timestamp_latency_bucket{node_id="37",le="131072"} 578 +milvus_proxy_apply_timestamp_latency_bucket{node_id="37",le="+Inf"} 578 +milvus_proxy_apply_timestamp_latency_sum{node_id="37"} 750 +milvus_proxy_apply_timestamp_latency_count{node_id="37"} 578 +# HELP milvus_proxy_assign_segmentID_latency latency that proxy get segmentID from dataCoord +# TYPE milvus_proxy_assign_segmentID_latency histogram +milvus_proxy_assign_segmentID_latency_bucket{node_id="37",le="1"} 2 +milvus_proxy_assign_segmentID_latency_bucket{node_id="37",le="2"} 2 +milvus_proxy_assign_segmentID_latency_bucket{node_id="37",le="4"} 2 +milvus_proxy_assign_segmentID_latency_bucket{node_id="37",le="8"} 2 +milvus_proxy_assign_segmentID_latency_bucket{node_id="37",le="16"} 2 +milvus_proxy_assign_segmentID_latency_bucket{node_id="37",le="32"} 2 +milvus_proxy_assign_segmentID_latency_bucket{node_id="37",le="64"} 2 +milvus_proxy_assign_segmentID_latency_bucket{node_id="37",le="128"} 2 +milvus_proxy_assign_segmentID_latency_bucket{node_id="37",le="256"} 2 +milvus_proxy_assign_segmentID_latency_bucket{node_id="37",le="512"} 2 +milvus_proxy_assign_segmentID_latency_bucket{node_id="37",le="1024"} 2 +milvus_proxy_assign_segmentID_latency_bucket{node_id="37",le="2048"} 2 +milvus_proxy_assign_segmentID_latency_bucket{node_id="37",le="4096"} 2 +milvus_proxy_assign_segmentID_latency_bucket{node_id="37",le="8192"} 2 +milvus_proxy_assign_segmentID_latency_bucket{node_id="37",le="16384"} 2 +milvus_proxy_assign_segmentID_latency_bucket{node_id="37",le="32768"} 2 +milvus_proxy_assign_segmentID_latency_bucket{node_id="37",le="65536"} 2 +milvus_proxy_assign_segmentID_latency_bucket{node_id="37",le="131072"} 2 +milvus_proxy_assign_segmentID_latency_bucket{node_id="37",le="+Inf"} 2 +milvus_proxy_assign_segmentID_latency_sum{node_id="37"} 1 +milvus_proxy_assign_segmentID_latency_count{node_id="37"} 2 +# HELP milvus_proxy_cache_hit_count count of cache hits/miss +# TYPE milvus_proxy_cache_hit_count counter +milvus_proxy_cache_hit_count{cache_name="GetCollectionID",cache_state="hit",node_id="37"} 51 +milvus_proxy_cache_hit_count{cache_name="GetCollectionID",cache_state="miss",node_id="37"} 3 +milvus_proxy_cache_hit_count{cache_name="GetCollectionInfo",cache_state="hit",node_id="37"} 19 +milvus_proxy_cache_hit_count{cache_name="GetCollectionSchema",cache_state="hit",node_id="37"} 27 +milvus_proxy_cache_hit_count{cache_name="GetShards",cache_state="hit",node_id="37"} 6 +milvus_proxy_cache_hit_count{cache_name="GetShards",cache_state="miss",node_id="37"} 2 +# HELP milvus_proxy_cache_update_latency latency that proxy update cache when cache miss +# TYPE milvus_proxy_cache_update_latency histogram +milvus_proxy_cache_update_latency_bucket{cache_name="GetCollectionID",node_id="37",le="1"} 2 +milvus_proxy_cache_update_latency_bucket{cache_name="GetCollectionID",node_id="37",le="2"} 2 +milvus_proxy_cache_update_latency_bucket{cache_name="GetCollectionID",node_id="37",le="4"} 2 +milvus_proxy_cache_update_latency_bucket{cache_name="GetCollectionID",node_id="37",le="8"} 2 +milvus_proxy_cache_update_latency_bucket{cache_name="GetCollectionID",node_id="37",le="16"} 2 +milvus_proxy_cache_update_latency_bucket{cache_name="GetCollectionID",node_id="37",le="32"} 2 +milvus_proxy_cache_update_latency_bucket{cache_name="GetCollectionID",node_id="37",le="64"} 2 +milvus_proxy_cache_update_latency_bucket{cache_name="GetCollectionID",node_id="37",le="128"} 2 +milvus_proxy_cache_update_latency_bucket{cache_name="GetCollectionID",node_id="37",le="256"} 2 +milvus_proxy_cache_update_latency_bucket{cache_name="GetCollectionID",node_id="37",le="512"} 2 +milvus_proxy_cache_update_latency_bucket{cache_name="GetCollectionID",node_id="37",le="1024"} 2 +milvus_proxy_cache_update_latency_bucket{cache_name="GetCollectionID",node_id="37",le="2048"} 2 +milvus_proxy_cache_update_latency_bucket{cache_name="GetCollectionID",node_id="37",le="4096"} 2 +milvus_proxy_cache_update_latency_bucket{cache_name="GetCollectionID",node_id="37",le="8192"} 2 +milvus_proxy_cache_update_latency_bucket{cache_name="GetCollectionID",node_id="37",le="16384"} 2 +milvus_proxy_cache_update_latency_bucket{cache_name="GetCollectionID",node_id="37",le="32768"} 2 +milvus_proxy_cache_update_latency_bucket{cache_name="GetCollectionID",node_id="37",le="65536"} 2 +milvus_proxy_cache_update_latency_bucket{cache_name="GetCollectionID",node_id="37",le="131072"} 2 +milvus_proxy_cache_update_latency_bucket{cache_name="GetCollectionID",node_id="37",le="+Inf"} 2 +milvus_proxy_cache_update_latency_sum{cache_name="GetCollectionID",node_id="37"} 1 +milvus_proxy_cache_update_latency_count{cache_name="GetCollectionID",node_id="37"} 2 +milvus_proxy_cache_update_latency_bucket{cache_name="GetShards",node_id="37",le="1"} 1 +milvus_proxy_cache_update_latency_bucket{cache_name="GetShards",node_id="37",le="2"} 1 +milvus_proxy_cache_update_latency_bucket{cache_name="GetShards",node_id="37",le="4"} 1 +milvus_proxy_cache_update_latency_bucket{cache_name="GetShards",node_id="37",le="8"} 1 +milvus_proxy_cache_update_latency_bucket{cache_name="GetShards",node_id="37",le="16"} 1 +milvus_proxy_cache_update_latency_bucket{cache_name="GetShards",node_id="37",le="32"} 1 +milvus_proxy_cache_update_latency_bucket{cache_name="GetShards",node_id="37",le="64"} 1 +milvus_proxy_cache_update_latency_bucket{cache_name="GetShards",node_id="37",le="128"} 1 +milvus_proxy_cache_update_latency_bucket{cache_name="GetShards",node_id="37",le="256"} 1 +milvus_proxy_cache_update_latency_bucket{cache_name="GetShards",node_id="37",le="512"} 1 +milvus_proxy_cache_update_latency_bucket{cache_name="GetShards",node_id="37",le="1024"} 1 +milvus_proxy_cache_update_latency_bucket{cache_name="GetShards",node_id="37",le="2048"} 1 +milvus_proxy_cache_update_latency_bucket{cache_name="GetShards",node_id="37",le="4096"} 1 +milvus_proxy_cache_update_latency_bucket{cache_name="GetShards",node_id="37",le="8192"} 1 +milvus_proxy_cache_update_latency_bucket{cache_name="GetShards",node_id="37",le="16384"} 1 +milvus_proxy_cache_update_latency_bucket{cache_name="GetShards",node_id="37",le="32768"} 1 +milvus_proxy_cache_update_latency_bucket{cache_name="GetShards",node_id="37",le="65536"} 1 +milvus_proxy_cache_update_latency_bucket{cache_name="GetShards",node_id="37",le="131072"} 1 +milvus_proxy_cache_update_latency_bucket{cache_name="GetShards",node_id="37",le="+Inf"} 1 +milvus_proxy_cache_update_latency_sum{cache_name="GetShards",node_id="37"} 1 +milvus_proxy_cache_update_latency_count{cache_name="GetShards",node_id="37"} 1 +# HELP milvus_proxy_delete_vectors_count counter of vectors successfully deleted +# TYPE milvus_proxy_delete_vectors_count counter +milvus_proxy_delete_vectors_count{collection_name="hello_milvus",db_name="default",node_id="37"} 2 +# HELP milvus_proxy_msgstream_obj_num number of MsgStream objects per physical channel +# TYPE milvus_proxy_msgstream_obj_num gauge +milvus_proxy_msgstream_obj_num{channel_name="by-dev-rootcoord-dml_10",node_id="37"} 0 +# HELP milvus_proxy_mutation_send_latency latency that proxy send insert request to MsgStream +# TYPE milvus_proxy_mutation_send_latency histogram +milvus_proxy_mutation_send_latency_bucket{msg_type="insert",node_id="37",le="1"} 2 +milvus_proxy_mutation_send_latency_bucket{msg_type="insert",node_id="37",le="100"} 2 +milvus_proxy_mutation_send_latency_bucket{msg_type="insert",node_id="37",le="500"} 2 +milvus_proxy_mutation_send_latency_bucket{msg_type="insert",node_id="37",le="1000"} 2 +milvus_proxy_mutation_send_latency_bucket{msg_type="insert",node_id="37",le="5000"} 2 +milvus_proxy_mutation_send_latency_bucket{msg_type="insert",node_id="37",le="10000"} 2 +milvus_proxy_mutation_send_latency_bucket{msg_type="insert",node_id="37",le="20000"} 2 +milvus_proxy_mutation_send_latency_bucket{msg_type="insert",node_id="37",le="50000"} 2 +milvus_proxy_mutation_send_latency_bucket{msg_type="insert",node_id="37",le="100000"} 2 +milvus_proxy_mutation_send_latency_bucket{msg_type="insert",node_id="37",le="250000"} 2 +milvus_proxy_mutation_send_latency_bucket{msg_type="insert",node_id="37",le="500000"} 2 +milvus_proxy_mutation_send_latency_bucket{msg_type="insert",node_id="37",le="1e+06"} 2 +milvus_proxy_mutation_send_latency_bucket{msg_type="insert",node_id="37",le="3.6e+06"} 2 +milvus_proxy_mutation_send_latency_bucket{msg_type="insert",node_id="37",le="5e+06"} 2 +milvus_proxy_mutation_send_latency_bucket{msg_type="insert",node_id="37",le="1e+07"} 2 +milvus_proxy_mutation_send_latency_bucket{msg_type="insert",node_id="37",le="+Inf"} 2 +milvus_proxy_mutation_send_latency_sum{msg_type="insert",node_id="37"} 1 +milvus_proxy_mutation_send_latency_count{msg_type="insert",node_id="37"} 2 +# HELP milvus_proxy_rate_limit_req_count count of operation executed +# TYPE milvus_proxy_rate_limit_req_count counter +milvus_proxy_rate_limit_req_count{msg_type="DDLCollection",node_id="37",status="success"} 23 +milvus_proxy_rate_limit_req_count{msg_type="DDLCollection",node_id="37",status="total"} 23 +milvus_proxy_rate_limit_req_count{msg_type="DDLFlush",node_id="37",status="success"} 1 +milvus_proxy_rate_limit_req_count{msg_type="DDLFlush",node_id="37",status="total"} 1 +milvus_proxy_rate_limit_req_count{msg_type="DDLIndex",node_id="37",status="success"} 1 +milvus_proxy_rate_limit_req_count{msg_type="DDLIndex",node_id="37",status="total"} 1 +milvus_proxy_rate_limit_req_count{msg_type="DMLDelete",node_id="37",status="success"} 1 +milvus_proxy_rate_limit_req_count{msg_type="DMLDelete",node_id="37",status="total"} 1 +milvus_proxy_rate_limit_req_count{msg_type="DMLInsert",node_id="37",status="success"} 2 +milvus_proxy_rate_limit_req_count{msg_type="DMLInsert",node_id="37",status="total"} 2 +milvus_proxy_rate_limit_req_count{msg_type="DQLQuery",node_id="37",status="success"} 5 +milvus_proxy_rate_limit_req_count{msg_type="DQLQuery",node_id="37",status="total"} 5 +milvus_proxy_rate_limit_req_count{msg_type="DQLSearch",node_id="37",status="success"} 2 +milvus_proxy_rate_limit_req_count{msg_type="DQLSearch",node_id="37",status="total"} 2 +# HELP milvus_proxy_report_value report value about the request +# TYPE milvus_proxy_report_value counter +milvus_proxy_report_value{db_name="default",msg_type="delete",node_id="37",username=""} 0 +milvus_proxy_report_value{db_name="default",msg_type="insert",node_id="37",username=""} 0 +milvus_proxy_report_value{db_name="default",msg_type="query",node_id="37",username=""} 0 +milvus_proxy_report_value{db_name="default",msg_type="search",node_id="37",username=""} 0 +# HELP milvus_proxy_req_count count of operation executed +# TYPE milvus_proxy_req_count counter +milvus_proxy_req_count{collection_name="",db_name="default",function_name="Flush",node_id="37",status="success"} 1 +milvus_proxy_req_count{collection_name="",db_name="default",function_name="Flush",node_id="37",status="total"} 1 +milvus_proxy_req_count{collection_name="hello_milvus",db_name="default",function_name="DropCollection",node_id="37",status="success"} 1 +# HELP milvus_proxy_req_in_queue_latency latency which request waits in the queue +# TYPE milvus_proxy_req_in_queue_latency histogram +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateCollection",node_id="37",le="1"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateCollection",node_id="37",le="2"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateCollection",node_id="37",le="4"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateCollection",node_id="37",le="8"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateCollection",node_id="37",le="16"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateCollection",node_id="37",le="32"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateCollection",node_id="37",le="64"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateCollection",node_id="37",le="128"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateCollection",node_id="37",le="256"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateCollection",node_id="37",le="512"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateCollection",node_id="37",le="1024"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateCollection",node_id="37",le="2048"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateCollection",node_id="37",le="4096"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateCollection",node_id="37",le="8192"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateCollection",node_id="37",le="16384"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateCollection",node_id="37",le="32768"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateCollection",node_id="37",le="65536"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateCollection",node_id="37",le="131072"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateCollection",node_id="37",le="+Inf"} 1 +milvus_proxy_req_in_queue_latency_sum{function_name="CreateCollection",node_id="37"} 0 +milvus_proxy_req_in_queue_latency_count{function_name="CreateCollection",node_id="37"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateIndex",node_id="37",le="1"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateIndex",node_id="37",le="2"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateIndex",node_id="37",le="4"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateIndex",node_id="37",le="8"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateIndex",node_id="37",le="16"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateIndex",node_id="37",le="32"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateIndex",node_id="37",le="64"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateIndex",node_id="37",le="128"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateIndex",node_id="37",le="256"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateIndex",node_id="37",le="512"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateIndex",node_id="37",le="1024"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateIndex",node_id="37",le="2048"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateIndex",node_id="37",le="4096"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateIndex",node_id="37",le="8192"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateIndex",node_id="37",le="16384"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateIndex",node_id="37",le="32768"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateIndex",node_id="37",le="65536"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateIndex",node_id="37",le="131072"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="CreateIndex",node_id="37",le="+Inf"} 1 +milvus_proxy_req_in_queue_latency_sum{function_name="CreateIndex",node_id="37"} 0 +milvus_proxy_req_in_queue_latency_count{function_name="CreateIndex",node_id="37"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Delete",node_id="37",le="1"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Delete",node_id="37",le="2"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Delete",node_id="37",le="4"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Delete",node_id="37",le="8"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Delete",node_id="37",le="16"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Delete",node_id="37",le="32"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Delete",node_id="37",le="64"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Delete",node_id="37",le="128"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Delete",node_id="37",le="256"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Delete",node_id="37",le="512"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Delete",node_id="37",le="1024"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Delete",node_id="37",le="2048"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Delete",node_id="37",le="4096"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Delete",node_id="37",le="8192"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Delete",node_id="37",le="16384"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Delete",node_id="37",le="32768"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Delete",node_id="37",le="65536"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Delete",node_id="37",le="131072"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Delete",node_id="37",le="+Inf"} 1 +milvus_proxy_req_in_queue_latency_sum{function_name="Delete",node_id="37"} 0 +milvus_proxy_req_in_queue_latency_count{function_name="Delete",node_id="37"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeCollection",node_id="37",le="1"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeCollection",node_id="37",le="2"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeCollection",node_id="37",le="4"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeCollection",node_id="37",le="8"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeCollection",node_id="37",le="16"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeCollection",node_id="37",le="32"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeCollection",node_id="37",le="64"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeCollection",node_id="37",le="128"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeCollection",node_id="37",le="256"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeCollection",node_id="37",le="512"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeCollection",node_id="37",le="1024"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeCollection",node_id="37",le="2048"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeCollection",node_id="37",le="4096"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeCollection",node_id="37",le="8192"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeCollection",node_id="37",le="16384"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeCollection",node_id="37",le="32768"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeCollection",node_id="37",le="65536"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeCollection",node_id="37",le="131072"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeCollection",node_id="37",le="+Inf"} 3 +milvus_proxy_req_in_queue_latency_sum{function_name="DescribeCollection",node_id="37"} 0 +milvus_proxy_req_in_queue_latency_count{function_name="DescribeCollection",node_id="37"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeIndex",node_id="37",le="1"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeIndex",node_id="37",le="2"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeIndex",node_id="37",le="4"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeIndex",node_id="37",le="8"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeIndex",node_id="37",le="16"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeIndex",node_id="37",le="32"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeIndex",node_id="37",le="64"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeIndex",node_id="37",le="128"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeIndex",node_id="37",le="256"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeIndex",node_id="37",le="512"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeIndex",node_id="37",le="1024"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeIndex",node_id="37",le="2048"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeIndex",node_id="37",le="4096"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeIndex",node_id="37",le="8192"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeIndex",node_id="37",le="16384"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeIndex",node_id="37",le="32768"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeIndex",node_id="37",le="65536"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeIndex",node_id="37",le="131072"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DescribeIndex",node_id="37",le="+Inf"} 3 +milvus_proxy_req_in_queue_latency_sum{function_name="DescribeIndex",node_id="37"} 0 +milvus_proxy_req_in_queue_latency_count{function_name="DescribeIndex",node_id="37"} 3 +milvus_proxy_req_in_queue_latency_bucket{function_name="DropCollection",node_id="37",le="1"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="DropCollection",node_id="37",le="2"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="DropCollection",node_id="37",le="4"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="DropCollection",node_id="37",le="8"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="DropCollection",node_id="37",le="16"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="DropCollection",node_id="37",le="32"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="DropCollection",node_id="37",le="64"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="DropCollection",node_id="37",le="128"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="DropCollection",node_id="37",le="256"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="DropCollection",node_id="37",le="512"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="DropCollection",node_id="37",le="1024"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="DropCollection",node_id="37",le="2048"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="DropCollection",node_id="37",le="4096"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="DropCollection",node_id="37",le="8192"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="DropCollection",node_id="37",le="16384"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="DropCollection",node_id="37",le="32768"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="DropCollection",node_id="37",le="65536"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="DropCollection",node_id="37",le="131072"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="DropCollection",node_id="37",le="+Inf"} 1 +milvus_proxy_req_in_queue_latency_sum{function_name="DropCollection",node_id="37"} 0 +milvus_proxy_req_in_queue_latency_count{function_name="DropCollection",node_id="37"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Flush",node_id="37",le="1"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Flush",node_id="37",le="2"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Flush",node_id="37",le="4"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Flush",node_id="37",le="8"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Flush",node_id="37",le="16"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Flush",node_id="37",le="32"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Flush",node_id="37",le="64"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Flush",node_id="37",le="128"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Flush",node_id="37",le="256"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Flush",node_id="37",le="512"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Flush",node_id="37",le="1024"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Flush",node_id="37",le="2048"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Flush",node_id="37",le="4096"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Flush",node_id="37",le="8192"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Flush",node_id="37",le="16384"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Flush",node_id="37",le="32768"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Flush",node_id="37",le="65536"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Flush",node_id="37",le="131072"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Flush",node_id="37",le="+Inf"} 1 +milvus_proxy_req_in_queue_latency_sum{function_name="Flush",node_id="37"} 0 +milvus_proxy_req_in_queue_latency_count{function_name="Flush",node_id="37"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Insert",node_id="37",le="1"} 2 +milvus_proxy_req_in_queue_latency_bucket{function_name="Insert",node_id="37",le="2"} 2 +milvus_proxy_req_in_queue_latency_bucket{function_name="Insert",node_id="37",le="4"} 2 +milvus_proxy_req_in_queue_latency_bucket{function_name="Insert",node_id="37",le="8"} 2 +milvus_proxy_req_in_queue_latency_bucket{function_name="Insert",node_id="37",le="16"} 2 +milvus_proxy_req_in_queue_latency_bucket{function_name="Insert",node_id="37",le="32"} 2 +milvus_proxy_req_in_queue_latency_bucket{function_name="Insert",node_id="37",le="64"} 2 +milvus_proxy_req_in_queue_latency_bucket{function_name="Insert",node_id="37",le="128"} 2 +milvus_proxy_req_in_queue_latency_bucket{function_name="Insert",node_id="37",le="256"} 2 +milvus_proxy_req_in_queue_latency_bucket{function_name="Insert",node_id="37",le="512"} 2 +milvus_proxy_req_in_queue_latency_bucket{function_name="Insert",node_id="37",le="1024"} 2 +milvus_proxy_req_in_queue_latency_bucket{function_name="Insert",node_id="37",le="2048"} 2 +milvus_proxy_req_in_queue_latency_bucket{function_name="Insert",node_id="37",le="4096"} 2 +milvus_proxy_req_in_queue_latency_bucket{function_name="Insert",node_id="37",le="8192"} 2 +milvus_proxy_req_in_queue_latency_bucket{function_name="Insert",node_id="37",le="16384"} 2 +milvus_proxy_req_in_queue_latency_bucket{function_name="Insert",node_id="37",le="32768"} 2 +milvus_proxy_req_in_queue_latency_bucket{function_name="Insert",node_id="37",le="65536"} 2 +milvus_proxy_req_in_queue_latency_bucket{function_name="Insert",node_id="37",le="131072"} 2 +milvus_proxy_req_in_queue_latency_bucket{function_name="Insert",node_id="37",le="+Inf"} 2 +milvus_proxy_req_in_queue_latency_sum{function_name="Insert",node_id="37"} 0 +milvus_proxy_req_in_queue_latency_count{function_name="Insert",node_id="37"} 2 +milvus_proxy_req_in_queue_latency_bucket{function_name="LoadCollection",node_id="37",le="1"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="LoadCollection",node_id="37",le="2"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="LoadCollection",node_id="37",le="4"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="LoadCollection",node_id="37",le="8"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="LoadCollection",node_id="37",le="16"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="LoadCollection",node_id="37",le="32"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="LoadCollection",node_id="37",le="64"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="LoadCollection",node_id="37",le="128"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="LoadCollection",node_id="37",le="256"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="LoadCollection",node_id="37",le="512"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="LoadCollection",node_id="37",le="1024"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="LoadCollection",node_id="37",le="2048"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="LoadCollection",node_id="37",le="4096"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="LoadCollection",node_id="37",le="8192"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="LoadCollection",node_id="37",le="16384"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="LoadCollection",node_id="37",le="32768"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="LoadCollection",node_id="37",le="65536"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="LoadCollection",node_id="37",le="131072"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="LoadCollection",node_id="37",le="+Inf"} 1 +milvus_proxy_req_in_queue_latency_sum{function_name="LoadCollection",node_id="37"} 0 +milvus_proxy_req_in_queue_latency_count{function_name="LoadCollection",node_id="37"} 1 +milvus_proxy_req_in_queue_latency_bucket{function_name="Retrieve",node_id="37",le="1"} 5 +milvus_proxy_req_in_queue_latency_bucket{function_name="Retrieve",node_id="37",le="2"} 5 +milvus_proxy_req_in_queue_latency_bucket{function_name="Retrieve",node_id="37",le="4"} 5 +milvus_proxy_req_in_queue_latency_bucket{function_name="Retrieve",node_id="37",le="8"} 5 +milvus_proxy_req_in_queue_latency_bucket{function_name="Retrieve",node_id="37",le="16"} 5 +milvus_proxy_req_in_queue_latency_bucket{function_name="Retrieve",node_id="37",le="32"} 5 +milvus_proxy_req_in_queue_latency_bucket{function_name="Retrieve",node_id="37",le="64"} 5 +milvus_proxy_req_in_queue_latency_bucket{function_name="Retrieve",node_id="37",le="128"} 5 +milvus_proxy_req_in_queue_latency_bucket{function_name="Retrieve",node_id="37",le="256"} 5 +milvus_proxy_req_in_queue_latency_bucket{function_name="Retrieve",node_id="37",le="512"} 5 +milvus_proxy_req_in_queue_latency_bucket{function_name="Retrieve",node_id="37",le="1024"} 5 +milvus_proxy_req_in_queue_latency_bucket{function_name="Retrieve",node_id="37",le="2048"} 5 +milvus_proxy_req_in_queue_latency_bucket{function_name="Retrieve",node_id="37",le="4096"} 5 +milvus_proxy_req_in_queue_latency_bucket{function_name="Retrieve",node_id="37",le="8192"} 5 +milvus_proxy_req_in_queue_latency_bucket{function_name="Retrieve",node_id="37",le="16384"} 5 +milvus_proxy_req_in_queue_latency_bucket{function_name="Retrieve",node_id="37",le="32768"} 5 +milvus_proxy_req_in_queue_latency_bucket{function_name="Retrieve",node_id="37",le="65536"} 5 +milvus_proxy_req_in_queue_latency_bucket{function_name="Retrieve",node_id="37",le="131072"} 5 +milvus_proxy_req_in_queue_latency_bucket{function_name="Retrieve",node_id="37",le="+Inf"} 5 +milvus_proxy_req_in_queue_latency_sum{function_name="Retrieve",node_id="37"} 0 +milvus_proxy_req_in_queue_latency_count{function_name="Retrieve",node_id="37"} 5 +milvus_proxy_req_in_queue_latency_bucket{function_name="Search",node_id="37",le="1"} 2 +milvus_proxy_req_in_queue_latency_bucket{function_name="Search",node_id="37",le="2"} 2 +milvus_proxy_req_in_queue_latency_bucket{function_name="Search",node_id="37",le="4"} 2 +milvus_proxy_req_in_queue_latency_bucket{function_name="Search",node_id="37",le="8"} 2 +milvus_proxy_req_in_queue_latency_bucket{function_name="Search",node_id="37",le="16"} 2 +milvus_proxy_req_in_queue_latency_bucket{function_name="Search",node_id="37",le="32"} 2 +milvus_proxy_req_in_queue_latency_bucket{function_name="Search",node_id="37",le="64"} 2 +milvus_proxy_req_in_queue_latency_bucket{function_name="Search",node_id="37",le="128"} 2 +milvus_proxy_req_in_queue_latency_bucket{function_name="Search",node_id="37",le="256"} 2 +milvus_proxy_req_in_queue_latency_bucket{function_name="Search",node_id="37",le="512"} 2 +milvus_proxy_req_in_queue_latency_bucket{function_name="Search",node_id="37",le="1024"} 2 +milvus_proxy_req_in_queue_latency_bucket{function_name="Search",node_id="37",le="2048"} 2 +milvus_proxy_req_in_queue_latency_bucket{function_name="Search",node_id="37",le="4096"} 2 +milvus_proxy_req_in_queue_latency_bucket{function_name="Search",node_id="37",le="8192"} 2 +milvus_proxy_req_in_queue_latency_bucket{function_name="Search",node_id="37",le="16384"} 2 +milvus_proxy_req_in_queue_latency_bucket{function_name="Search",node_id="37",le="32768"} 2 +milvus_proxy_req_in_queue_latency_bucket{function_name="Search",node_id="37",le="65536"} 2 +milvus_proxy_req_in_queue_latency_bucket{function_name="Search",node_id="37",le="131072"} 2 +milvus_proxy_req_in_queue_latency_bucket{function_name="Search",node_id="37",le="+Inf"} 2 +milvus_proxy_req_in_queue_latency_sum{function_name="Search",node_id="37"} 0 +milvus_proxy_req_in_queue_latency_count{function_name="Search",node_id="37"} 2 +# HELP milvus_proxy_req_latency latency of each request +# TYPE milvus_proxy_req_latency histogram +milvus_proxy_req_latency_bucket{function_name="CreateCollection",node_id="37",le="1"} 0 +milvus_proxy_req_latency_bucket{function_name="CreateCollection",node_id="37",le="2"} 0 +milvus_proxy_req_latency_bucket{function_name="CreateCollection",node_id="37",le="4"} 1 +milvus_proxy_req_latency_bucket{function_name="CreateCollection",node_id="37",le="8"} 1 +milvus_proxy_req_latency_bucket{function_name="CreateCollection",node_id="37",le="16"} 1 +milvus_proxy_req_latency_bucket{function_name="CreateCollection",node_id="37",le="32"} 1 +milvus_proxy_req_latency_bucket{function_name="CreateCollection",node_id="37",le="64"} 1 +milvus_proxy_req_latency_bucket{function_name="CreateCollection",node_id="37",le="128"} 1 +milvus_proxy_req_latency_bucket{function_name="CreateCollection",node_id="37",le="256"} 1 +milvus_proxy_req_latency_bucket{function_name="CreateCollection",node_id="37",le="512"} 1 +milvus_proxy_req_latency_bucket{function_name="CreateCollection",node_id="37",le="1024"} 1 +milvus_proxy_req_latency_bucket{function_name="CreateCollection",node_id="37",le="2048"} 1 +milvus_proxy_req_latency_bucket{function_name="CreateCollection",node_id="37",le="4096"} 1 +milvus_proxy_req_latency_bucket{function_name="CreateCollection",node_id="37",le="8192"} 1 +milvus_proxy_req_latency_bucket{function_name="CreateCollection",node_id="37",le="16384"} 1 +milvus_proxy_req_latency_bucket{function_name="CreateCollection",node_id="37",le="32768"} 1 +milvus_proxy_req_latency_bucket{function_name="CreateCollection",node_id="37",le="65536"} 1 +milvus_proxy_req_latency_bucket{function_name="CreateCollection",node_id="37",le="131072"} 1 +milvus_proxy_req_latency_bucket{function_name="CreateCollection",node_id="37",le="+Inf"} 1 +milvus_proxy_req_latency_sum{function_name="CreateCollection",node_id="37"} 4 +milvus_proxy_req_latency_count{function_name="CreateCollection",node_id="37"} 1 +milvus_proxy_req_latency_bucket{function_name="CreateIndex",node_id="37",le="1"} 0 +milvus_proxy_req_latency_bucket{function_name="CreateIndex",node_id="37",le="2"} 0 +milvus_proxy_req_latency_bucket{function_name="CreateIndex",node_id="37",le="4"} 0 +milvus_proxy_req_latency_bucket{function_name="CreateIndex",node_id="37",le="8"} 1 +milvus_proxy_req_latency_bucket{function_name="CreateIndex",node_id="37",le="16"} 1 +milvus_proxy_req_latency_bucket{function_name="CreateIndex",node_id="37",le="32"} 1 +milvus_proxy_req_latency_bucket{function_name="CreateIndex",node_id="37",le="64"} 1 +milvus_proxy_req_latency_bucket{function_name="CreateIndex",node_id="37",le="128"} 1 +milvus_proxy_req_latency_bucket{function_name="CreateIndex",node_id="37",le="256"} 1 +milvus_proxy_req_latency_bucket{function_name="CreateIndex",node_id="37",le="512"} 1 +milvus_proxy_req_latency_bucket{function_name="CreateIndex",node_id="37",le="1024"} 1 +milvus_proxy_req_latency_bucket{function_name="CreateIndex",node_id="37",le="2048"} 1 +milvus_proxy_req_latency_bucket{function_name="CreateIndex",node_id="37",le="4096"} 1 +milvus_proxy_req_latency_bucket{function_name="CreateIndex",node_id="37",le="8192"} 1 +milvus_proxy_req_latency_bucket{function_name="CreateIndex",node_id="37",le="16384"} 1 +milvus_proxy_req_latency_bucket{function_name="CreateIndex",node_id="37",le="32768"} 1 +milvus_proxy_req_latency_bucket{function_name="CreateIndex",node_id="37",le="65536"} 1 +milvus_proxy_req_latency_bucket{function_name="CreateIndex",node_id="37",le="131072"} 1 +milvus_proxy_req_latency_bucket{function_name="CreateIndex",node_id="37",le="+Inf"} 1 +milvus_proxy_req_latency_sum{function_name="CreateIndex",node_id="37"} 7 +milvus_proxy_req_latency_count{function_name="CreateIndex",node_id="37"} 1 +milvus_proxy_req_latency_bucket{function_name="DescribeCollection",node_id="37",le="1"} 2 +milvus_proxy_req_latency_bucket{function_name="DescribeCollection",node_id="37",le="2"} 3 +milvus_proxy_req_latency_bucket{function_name="DescribeCollection",node_id="37",le="4"} 3 +milvus_proxy_req_latency_bucket{function_name="DescribeCollection",node_id="37",le="8"} 3 +milvus_proxy_req_latency_bucket{function_name="DescribeCollection",node_id="37",le="16"} 3 +milvus_proxy_req_latency_bucket{function_name="DescribeCollection",node_id="37",le="32"} 3 +milvus_proxy_req_latency_bucket{function_name="DescribeCollection",node_id="37",le="64"} 3 +milvus_proxy_req_latency_bucket{function_name="DescribeCollection",node_id="37",le="128"} 3 +milvus_proxy_req_latency_bucket{function_name="DescribeCollection",node_id="37",le="256"} 3 +milvus_proxy_req_latency_bucket{function_name="DescribeCollection",node_id="37",le="512"} 3 +milvus_proxy_req_latency_bucket{function_name="DescribeCollection",node_id="37",le="1024"} 3 +milvus_proxy_req_latency_bucket{function_name="DescribeCollection",node_id="37",le="2048"} 3 +milvus_proxy_req_latency_bucket{function_name="DescribeCollection",node_id="37",le="4096"} 3 +milvus_proxy_req_latency_bucket{function_name="DescribeCollection",node_id="37",le="8192"} 3 +milvus_proxy_req_latency_bucket{function_name="DescribeCollection",node_id="37",le="16384"} 3 +milvus_proxy_req_latency_bucket{function_name="DescribeCollection",node_id="37",le="32768"} 3 +milvus_proxy_req_latency_bucket{function_name="DescribeCollection",node_id="37",le="65536"} 3 +milvus_proxy_req_latency_bucket{function_name="DescribeCollection",node_id="37",le="131072"} 3 +milvus_proxy_req_latency_bucket{function_name="DescribeCollection",node_id="37",le="+Inf"} 3 +milvus_proxy_req_latency_sum{function_name="DescribeCollection",node_id="37"} 2 +milvus_proxy_req_latency_count{function_name="DescribeCollection",node_id="37"} 3 +milvus_proxy_req_latency_bucket{function_name="DescribeIndex",node_id="37",le="1"} 0 +milvus_proxy_req_latency_bucket{function_name="DescribeIndex",node_id="37",le="2"} 2 +milvus_proxy_req_latency_bucket{function_name="DescribeIndex",node_id="37",le="4"} 3 +milvus_proxy_req_latency_bucket{function_name="DescribeIndex",node_id="37",le="8"} 3 +milvus_proxy_req_latency_bucket{function_name="DescribeIndex",node_id="37",le="16"} 3 +milvus_proxy_req_latency_bucket{function_name="DescribeIndex",node_id="37",le="32"} 3 +milvus_proxy_req_latency_bucket{function_name="DescribeIndex",node_id="37",le="64"} 3 +milvus_proxy_req_latency_bucket{function_name="DescribeIndex",node_id="37",le="128"} 3 +milvus_proxy_req_latency_bucket{function_name="DescribeIndex",node_id="37",le="256"} 3 +milvus_proxy_req_latency_bucket{function_name="DescribeIndex",node_id="37",le="512"} 3 +milvus_proxy_req_latency_bucket{function_name="DescribeIndex",node_id="37",le="1024"} 3 +milvus_proxy_req_latency_bucket{function_name="DescribeIndex",node_id="37",le="2048"} 3 +milvus_proxy_req_latency_bucket{function_name="DescribeIndex",node_id="37",le="4096"} 3 +milvus_proxy_req_latency_bucket{function_name="DescribeIndex",node_id="37",le="8192"} 3 +milvus_proxy_req_latency_bucket{function_name="DescribeIndex",node_id="37",le="16384"} 3 +milvus_proxy_req_latency_bucket{function_name="DescribeIndex",node_id="37",le="32768"} 3 +milvus_proxy_req_latency_bucket{function_name="DescribeIndex",node_id="37",le="65536"} 3 +milvus_proxy_req_latency_bucket{function_name="DescribeIndex",node_id="37",le="131072"} 3 +milvus_proxy_req_latency_bucket{function_name="DescribeIndex",node_id="37",le="+Inf"} 3 +milvus_proxy_req_latency_sum{function_name="DescribeIndex",node_id="37"} 8 +milvus_proxy_req_latency_count{function_name="DescribeIndex",node_id="37"} 3 +milvus_proxy_req_latency_bucket{function_name="DropCollection",node_id="37",le="1"} 0 +milvus_proxy_req_latency_bucket{function_name="DropCollection",node_id="37",le="2"} 0 +milvus_proxy_req_latency_bucket{function_name="DropCollection",node_id="37",le="4"} 0 +milvus_proxy_req_latency_bucket{function_name="DropCollection",node_id="37",le="8"} 1 +milvus_proxy_req_latency_bucket{function_name="DropCollection",node_id="37",le="16"} 1 +milvus_proxy_req_latency_bucket{function_name="DropCollection",node_id="37",le="32"} 1 +milvus_proxy_req_latency_bucket{function_name="DropCollection",node_id="37",le="64"} 1 +milvus_proxy_req_latency_bucket{function_name="DropCollection",node_id="37",le="128"} 1 +milvus_proxy_req_latency_bucket{function_name="DropCollection",node_id="37",le="256"} 1 +milvus_proxy_req_latency_bucket{function_name="DropCollection",node_id="37",le="512"} 1 +milvus_proxy_req_latency_bucket{function_name="DropCollection",node_id="37",le="1024"} 1 +milvus_proxy_req_latency_bucket{function_name="DropCollection",node_id="37",le="2048"} 1 +milvus_proxy_req_latency_bucket{function_name="DropCollection",node_id="37",le="4096"} 1 +milvus_proxy_req_latency_bucket{function_name="DropCollection",node_id="37",le="8192"} 1 +milvus_proxy_req_latency_bucket{function_name="DropCollection",node_id="37",le="16384"} 1 +milvus_proxy_req_latency_bucket{function_name="DropCollection",node_id="37",le="32768"} 1 +milvus_proxy_req_latency_bucket{function_name="DropCollection",node_id="37",le="65536"} 1 +milvus_proxy_req_latency_bucket{function_name="DropCollection",node_id="37",le="131072"} 1 +milvus_proxy_req_latency_bucket{function_name="DropCollection",node_id="37",le="+Inf"} 1 +milvus_proxy_req_latency_sum{function_name="DropCollection",node_id="37"} 6 +milvus_proxy_req_latency_count{function_name="DropCollection",node_id="37"} 1 +milvus_proxy_req_latency_bucket{function_name="Flush",node_id="37",le="1"} 0 +milvus_proxy_req_latency_bucket{function_name="Flush",node_id="37",le="2"} 0 +milvus_proxy_req_latency_bucket{function_name="Flush",node_id="37",le="4"} 0 +milvus_proxy_req_latency_bucket{function_name="Flush",node_id="37",le="8"} 0 +milvus_proxy_req_latency_bucket{function_name="Flush",node_id="37",le="16"} 0 +milvus_proxy_req_latency_bucket{function_name="Flush",node_id="37",le="32"} 0 +milvus_proxy_req_latency_bucket{function_name="Flush",node_id="37",le="64"} 0 +milvus_proxy_req_latency_bucket{function_name="Flush",node_id="37",le="128"} 0 +milvus_proxy_req_latency_bucket{function_name="Flush",node_id="37",le="256"} 0 +milvus_proxy_req_latency_bucket{function_name="Flush",node_id="37",le="512"} 0 +milvus_proxy_req_latency_bucket{function_name="Flush",node_id="37",le="1024"} 1 +milvus_proxy_req_latency_bucket{function_name="Flush",node_id="37",le="2048"} 1 +milvus_proxy_req_latency_bucket{function_name="Flush",node_id="37",le="4096"} 1 +milvus_proxy_req_latency_bucket{function_name="Flush",node_id="37",le="8192"} 1 +milvus_proxy_req_latency_bucket{function_name="Flush",node_id="37",le="16384"} 1 +milvus_proxy_req_latency_bucket{function_name="Flush",node_id="37",le="32768"} 1 +milvus_proxy_req_latency_bucket{function_name="Flush",node_id="37",le="65536"} 1 +milvus_proxy_req_latency_bucket{function_name="Flush",node_id="37",le="131072"} 1 +milvus_proxy_req_latency_bucket{function_name="Flush",node_id="37",le="+Inf"} 1 +milvus_proxy_req_latency_sum{function_name="Flush",node_id="37"} 619 +milvus_proxy_req_latency_count{function_name="Flush",node_id="37"} 1 +milvus_proxy_req_latency_bucket{function_name="GetLoadingProgress",node_id="37",le="1"} 6 +milvus_proxy_req_latency_bucket{function_name="GetLoadingProgress",node_id="37",le="2"} 6 +milvus_proxy_req_latency_bucket{function_name="GetLoadingProgress",node_id="37",le="4"} 6 +milvus_proxy_req_latency_bucket{function_name="GetLoadingProgress",node_id="37",le="8"} 6 +milvus_proxy_req_latency_bucket{function_name="GetLoadingProgress",node_id="37",le="16"} 6 +milvus_proxy_req_latency_bucket{function_name="GetLoadingProgress",node_id="37",le="32"} 6 +milvus_proxy_req_latency_bucket{function_name="GetLoadingProgress",node_id="37",le="64"} 6 +milvus_proxy_req_latency_bucket{function_name="GetLoadingProgress",node_id="37",le="128"} 6 +milvus_proxy_req_latency_bucket{function_name="GetLoadingProgress",node_id="37",le="256"} 6 +milvus_proxy_req_latency_bucket{function_name="GetLoadingProgress",node_id="37",le="512"} 6 +milvus_proxy_req_latency_bucket{function_name="GetLoadingProgress",node_id="37",le="1024"} 6 +milvus_proxy_req_latency_bucket{function_name="GetLoadingProgress",node_id="37",le="2048"} 6 +milvus_proxy_req_latency_bucket{function_name="GetLoadingProgress",node_id="37",le="4096"} 6 +milvus_proxy_req_latency_bucket{function_name="GetLoadingProgress",node_id="37",le="8192"} 6 +milvus_proxy_req_latency_bucket{function_name="GetLoadingProgress",node_id="37",le="16384"} 6 +milvus_proxy_req_latency_bucket{function_name="GetLoadingProgress",node_id="37",le="32768"} 6 +milvus_proxy_req_latency_bucket{function_name="GetLoadingProgress",node_id="37",le="65536"} 6 +milvus_proxy_req_latency_bucket{function_name="GetLoadingProgress",node_id="37",le="131072"} 6 +milvus_proxy_req_latency_bucket{function_name="GetLoadingProgress",node_id="37",le="+Inf"} 6 +milvus_proxy_req_latency_sum{function_name="GetLoadingProgress",node_id="37"} 2 +milvus_proxy_req_latency_count{function_name="GetLoadingProgress",node_id="37"} 6 +milvus_proxy_req_latency_bucket{function_name="LoadCollection",node_id="37",le="1"} 0 +milvus_proxy_req_latency_bucket{function_name="LoadCollection",node_id="37",le="2"} 0 +milvus_proxy_req_latency_bucket{function_name="LoadCollection",node_id="37",le="4"} 0 +milvus_proxy_req_latency_bucket{function_name="LoadCollection",node_id="37",le="8"} 1 +milvus_proxy_req_latency_bucket{function_name="LoadCollection",node_id="37",le="16"} 1 +milvus_proxy_req_latency_bucket{function_name="LoadCollection",node_id="37",le="32"} 1 +milvus_proxy_req_latency_bucket{function_name="LoadCollection",node_id="37",le="64"} 1 +milvus_proxy_req_latency_bucket{function_name="LoadCollection",node_id="37",le="128"} 1 +milvus_proxy_req_latency_bucket{function_name="LoadCollection",node_id="37",le="256"} 1 +milvus_proxy_req_latency_bucket{function_name="LoadCollection",node_id="37",le="512"} 1 +milvus_proxy_req_latency_bucket{function_name="LoadCollection",node_id="37",le="1024"} 1 +milvus_proxy_req_latency_bucket{function_name="LoadCollection",node_id="37",le="2048"} 1 +milvus_proxy_req_latency_bucket{function_name="LoadCollection",node_id="37",le="4096"} 1 +milvus_proxy_req_latency_bucket{function_name="LoadCollection",node_id="37",le="8192"} 1 +milvus_proxy_req_latency_bucket{function_name="LoadCollection",node_id="37",le="16384"} 1 +milvus_proxy_req_latency_bucket{function_name="LoadCollection",node_id="37",le="32768"} 1 +milvus_proxy_req_latency_bucket{function_name="LoadCollection",node_id="37",le="65536"} 1 +milvus_proxy_req_latency_bucket{function_name="LoadCollection",node_id="37",le="131072"} 1 +milvus_proxy_req_latency_bucket{function_name="LoadCollection",node_id="37",le="+Inf"} 1 +milvus_proxy_req_latency_sum{function_name="LoadCollection",node_id="37"} 7 +milvus_proxy_req_latency_count{function_name="LoadCollection",node_id="37"} 1 +# HELP milvus_proxy_search_sparse_num_non_zeros the number of non-zeros in each sparse search task +# TYPE milvus_proxy_search_sparse_num_non_zeros histogram +milvus_proxy_search_sparse_num_non_zeros_bucket{collection_name="hello_milvus",node_id="37",le="1"} 0 +milvus_proxy_search_sparse_num_non_zeros_bucket{collection_name="hello_milvus",node_id="37",le="2"} 0 +milvus_proxy_search_sparse_num_non_zeros_bucket{collection_name="hello_milvus",node_id="37",le="4"} 0 +milvus_proxy_search_sparse_num_non_zeros_bucket{collection_name="hello_milvus",node_id="37",le="8"} 2 +milvus_proxy_search_sparse_num_non_zeros_bucket{collection_name="hello_milvus",node_id="37",le="16"} 2 +milvus_proxy_search_sparse_num_non_zeros_bucket{collection_name="hello_milvus",node_id="37",le="32"} 2 +milvus_proxy_search_sparse_num_non_zeros_bucket{collection_name="hello_milvus",node_id="37",le="64"} 2 +milvus_proxy_search_sparse_num_non_zeros_bucket{collection_name="hello_milvus",node_id="37",le="128"} 2 +milvus_proxy_search_sparse_num_non_zeros_bucket{collection_name="hello_milvus",node_id="37",le="256"} 2 +milvus_proxy_search_sparse_num_non_zeros_bucket{collection_name="hello_milvus",node_id="37",le="512"} 2 +milvus_proxy_search_sparse_num_non_zeros_bucket{collection_name="hello_milvus",node_id="37",le="1024"} 2 +milvus_proxy_search_sparse_num_non_zeros_bucket{collection_name="hello_milvus",node_id="37",le="2048"} 2 +milvus_proxy_search_sparse_num_non_zeros_bucket{collection_name="hello_milvus",node_id="37",le="4096"} 2 +milvus_proxy_search_sparse_num_non_zeros_bucket{collection_name="hello_milvus",node_id="37",le="8192"} 2 +milvus_proxy_search_sparse_num_non_zeros_bucket{collection_name="hello_milvus",node_id="37",le="16384"} 2 +milvus_proxy_search_sparse_num_non_zeros_bucket{collection_name="hello_milvus",node_id="37",le="32768"} 2 +milvus_proxy_search_sparse_num_non_zeros_bucket{collection_name="hello_milvus",node_id="37",le="65536"} 2 +milvus_proxy_search_sparse_num_non_zeros_bucket{collection_name="hello_milvus",node_id="37",le="131072"} 2 +milvus_proxy_search_sparse_num_non_zeros_bucket{collection_name="hello_milvus",node_id="37",le="+Inf"} 2 +milvus_proxy_search_sparse_num_non_zeros_sum{collection_name="hello_milvus",node_id="37"} 16 +milvus_proxy_search_sparse_num_non_zeros_count{collection_name="hello_milvus",node_id="37"} 2 +# HELP milvus_proxy_send_bytes_count count of bytes sent back to sdk +# TYPE milvus_proxy_send_bytes_count counter +milvus_proxy_send_bytes_count{node_id="37"} 70813 +# HELP milvus_proxy_sq_decode_result_latency latency that proxy decodes the search result +# TYPE milvus_proxy_sq_decode_result_latency histogram +milvus_proxy_sq_decode_result_latency_bucket{node_id="37",query_type="query",le="1"} 5 +milvus_proxy_sq_decode_result_latency_bucket{node_id="37",query_type="query",le="2"} 5 +milvus_proxy_sq_decode_result_latency_bucket{node_id="37",query_type="query",le="4"} 5 +milvus_proxy_sq_decode_result_latency_bucket{node_id="37",query_type="query",le="8"} 5 +milvus_proxy_sq_decode_result_latency_bucket{node_id="37",query_type="query",le="16"} 5 +milvus_proxy_sq_decode_result_latency_bucket{node_id="37",query_type="query",le="32"} 5 +milvus_proxy_sq_decode_result_latency_bucket{node_id="37",query_type="query",le="64"} 5 +milvus_proxy_sq_decode_result_latency_bucket{node_id="37",query_type="query",le="128"} 5 +milvus_proxy_sq_decode_result_latency_bucket{node_id="37",query_type="query",le="256"} 5 +milvus_proxy_sq_decode_result_latency_bucket{node_id="37",query_type="query",le="512"} 5 +milvus_proxy_sq_decode_result_latency_bucket{node_id="37",query_type="query",le="1024"} 5 +milvus_proxy_sq_decode_result_latency_bucket{node_id="37",query_type="query",le="2048"} 5 +milvus_proxy_sq_decode_result_latency_bucket{node_id="37",query_type="query",le="4096"} 5 +milvus_proxy_sq_decode_result_latency_bucket{node_id="37",query_type="query",le="8192"} 5 +milvus_proxy_sq_decode_result_latency_bucket{node_id="37",query_type="query",le="16384"} 5 +milvus_proxy_sq_decode_result_latency_bucket{node_id="37",query_type="query",le="32768"} 5 +milvus_proxy_sq_decode_result_latency_bucket{node_id="37",query_type="query",le="65536"} 5 +milvus_proxy_sq_decode_result_latency_bucket{node_id="37",query_type="query",le="131072"} 5 +milvus_proxy_sq_decode_result_latency_bucket{node_id="37",query_type="query",le="+Inf"} 5 +milvus_proxy_sq_decode_result_latency_sum{node_id="37",query_type="query"} 0 +milvus_proxy_sq_decode_result_latency_count{node_id="37",query_type="query"} 5 +# HELP milvus_proxy_sq_reduce_result_latency latency that proxy reduces search result +# TYPE milvus_proxy_sq_reduce_result_latency histogram +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="query",le="1"} 5 +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="query",le="2"} 5 +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="query",le="4"} 5 +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="query",le="8"} 5 +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="query",le="16"} 5 +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="query",le="32"} 5 +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="query",le="64"} 5 +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="query",le="128"} 5 +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="query",le="256"} 5 +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="query",le="512"} 5 +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="query",le="1024"} 5 +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="query",le="2048"} 5 +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="query",le="4096"} 5 +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="query",le="8192"} 5 +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="query",le="16384"} 5 +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="query",le="32768"} 5 +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="query",le="65536"} 5 +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="query",le="131072"} 5 +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="query",le="+Inf"} 5 +milvus_proxy_sq_reduce_result_latency_sum{node_id="37",query_type="query"} 0 +milvus_proxy_sq_reduce_result_latency_count{node_id="37",query_type="query"} 5 +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="search",le="1"} 2 +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="search",le="2"} 2 +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="search",le="4"} 2 +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="search",le="8"} 2 +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="search",le="16"} 2 +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="search",le="32"} 2 +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="search",le="64"} 2 +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="search",le="128"} 2 +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="search",le="256"} 2 +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="search",le="512"} 2 +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="search",le="1024"} 2 +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="search",le="2048"} 2 +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="search",le="4096"} 2 +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="search",le="8192"} 2 +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="search",le="16384"} 2 +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="search",le="32768"} 2 +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="search",le="65536"} 2 +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="search",le="131072"} 2 +milvus_proxy_sq_reduce_result_latency_bucket{node_id="37",query_type="search",le="+Inf"} 2 +milvus_proxy_sq_reduce_result_latency_sum{node_id="37",query_type="search"} 0 +milvus_proxy_sq_reduce_result_latency_count{node_id="37",query_type="search"} 2 +# HELP milvus_proxy_sq_wait_result_latency latency that proxy waits for the result +# TYPE milvus_proxy_sq_wait_result_latency histogram +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="query",le="1"} 0 +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="query",le="2"} 0 +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="query",le="4"} 0 +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="query",le="8"} 0 +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="query",le="16"} 0 +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="query",le="32"} 0 +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="query",le="64"} 0 +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="query",le="128"} 0 +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="query",le="256"} 2 +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="query",le="512"} 5 +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="query",le="1024"} 5 +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="query",le="2048"} 5 +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="query",le="4096"} 5 +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="query",le="8192"} 5 +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="query",le="16384"} 5 +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="query",le="32768"} 5 +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="query",le="65536"} 5 +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="query",le="131072"} 5 +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="query",le="+Inf"} 5 +milvus_proxy_sq_wait_result_latency_sum{node_id="37",query_type="query"} 1478 +milvus_proxy_sq_wait_result_latency_count{node_id="37",query_type="query"} 5 +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="search",le="1"} 0 +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="search",le="2"} 0 +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="search",le="4"} 0 +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="search",le="8"} 0 +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="search",le="16"} 0 +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="search",le="32"} 0 +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="search",le="64"} 0 +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="search",le="128"} 0 +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="search",le="256"} 1 +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="search",le="512"} 2 +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="search",le="1024"} 2 +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="search",le="2048"} 2 +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="search",le="4096"} 2 +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="search",le="8192"} 2 +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="search",le="16384"} 2 +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="search",le="32768"} 2 +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="search",le="65536"} 2 +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="search",le="131072"} 2 +milvus_proxy_sq_wait_result_latency_bucket{node_id="37",query_type="search",le="+Inf"} 2 +milvus_proxy_sq_wait_result_latency_sum{node_id="37",query_type="search"} 610 +milvus_proxy_sq_wait_result_latency_count{node_id="37",query_type="search"} 2 +# HELP milvus_proxy_sync_segment_request_length the length of SegmentIDRequests when assigning segments for insert +# TYPE milvus_proxy_sync_segment_request_length histogram +milvus_proxy_sync_segment_request_length_bucket{node_id="37",le="1"} 2 +milvus_proxy_sync_segment_request_length_bucket{node_id="37",le="2"} 2 +milvus_proxy_sync_segment_request_length_bucket{node_id="37",le="4"} 2 +milvus_proxy_sync_segment_request_length_bucket{node_id="37",le="8"} 2 +milvus_proxy_sync_segment_request_length_bucket{node_id="37",le="16"} 2 +milvus_proxy_sync_segment_request_length_bucket{node_id="37",le="32"} 2 +milvus_proxy_sync_segment_request_length_bucket{node_id="37",le="64"} 2 +milvus_proxy_sync_segment_request_length_bucket{node_id="37",le="128"} 2 +milvus_proxy_sync_segment_request_length_bucket{node_id="37",le="256"} 2 +milvus_proxy_sync_segment_request_length_bucket{node_id="37",le="512"} 2 +milvus_proxy_sync_segment_request_length_bucket{node_id="37",le="1024"} 2 +milvus_proxy_sync_segment_request_length_bucket{node_id="37",le="2048"} 2 +milvus_proxy_sync_segment_request_length_bucket{node_id="37",le="4096"} 2 +milvus_proxy_sync_segment_request_length_bucket{node_id="37",le="8192"} 2 +milvus_proxy_sync_segment_request_length_bucket{node_id="37",le="16384"} 2 +milvus_proxy_sync_segment_request_length_bucket{node_id="37",le="32768"} 2 +milvus_proxy_sync_segment_request_length_bucket{node_id="37",le="65536"} 2 +milvus_proxy_sync_segment_request_length_bucket{node_id="37",le="131072"} 2 +milvus_proxy_sync_segment_request_length_bucket{node_id="37",le="+Inf"} 2 +milvus_proxy_sync_segment_request_length_sum{node_id="37"} 2 +milvus_proxy_sync_segment_request_length_count{node_id="37"} 2 +# HELP milvus_proxy_tt_lag_ms now time minus tt per physical channel +# TYPE milvus_proxy_tt_lag_ms gauge +milvus_proxy_tt_lag_ms{channel_name="default",node_id="37"} 36 +# HELP milvus_querycoord_collection_num number of collections +# TYPE milvus_querycoord_collection_num gauge +milvus_querycoord_collection_num 0 +# HELP milvus_querycoord_load_latency latency of load the entire collection +# TYPE milvus_querycoord_load_latency histogram +milvus_querycoord_load_latency_bucket{le="0"} 0 +milvus_querycoord_load_latency_bucket{le="500"} 0 +milvus_querycoord_load_latency_bucket{le="1000"} 2 +milvus_querycoord_load_latency_bucket{le="2000"} 2 +milvus_querycoord_load_latency_bucket{le="5000"} 2 +milvus_querycoord_load_latency_bucket{le="10000"} 2 +milvus_querycoord_load_latency_bucket{le="20000"} 2 +milvus_querycoord_load_latency_bucket{le="50000"} 2 +milvus_querycoord_load_latency_bucket{le="60000"} 2 +milvus_querycoord_load_latency_bucket{le="300000"} 2 +milvus_querycoord_load_latency_bucket{le="600000"} 2 +milvus_querycoord_load_latency_bucket{le="1.8e+06"} 2 +milvus_querycoord_load_latency_bucket{le="+Inf"} 2 +milvus_querycoord_load_latency_sum 1996 +milvus_querycoord_load_latency_count 2 +# HELP milvus_querycoord_load_req_count count of load request +# TYPE milvus_querycoord_load_req_count counter +milvus_querycoord_load_req_count{status="success"} 1 +milvus_querycoord_load_req_count{status="total"} 1 +# HELP milvus_querycoord_partition_num number of partitions +# TYPE milvus_querycoord_partition_num gauge +milvus_querycoord_partition_num 0 +# HELP milvus_querycoord_querynode_num number of QueryNodes managered by QueryCoord +# TYPE milvus_querycoord_querynode_num gauge +milvus_querycoord_querynode_num 1 +# HELP milvus_querycoord_release_latency latency of release request +# TYPE milvus_querycoord_release_latency histogram +milvus_querycoord_release_latency_bucket{le="0"} 10 +milvus_querycoord_release_latency_bucket{le="5"} 10 +milvus_querycoord_release_latency_bucket{le="10"} 10 +milvus_querycoord_release_latency_bucket{le="20"} 10 +milvus_querycoord_release_latency_bucket{le="40"} 10 +milvus_querycoord_release_latency_bucket{le="100"} 10 +milvus_querycoord_release_latency_bucket{le="200"} 10 +milvus_querycoord_release_latency_bucket{le="400"} 10 +milvus_querycoord_release_latency_bucket{le="1000"} 11 +milvus_querycoord_release_latency_bucket{le="10000"} 11 +milvus_querycoord_release_latency_bucket{le="+Inf"} 11 +milvus_querycoord_release_latency_sum 606 +milvus_querycoord_release_latency_count 11 +# HELP milvus_querycoord_release_req_count count of release request +# TYPE milvus_querycoord_release_req_count counter +milvus_querycoord_release_req_count{status="success"} 1 +milvus_querycoord_release_req_count{status="total"} 1 +# HELP milvus_querycoord_replica_ro_node_total total read only node number of replica +# TYPE milvus_querycoord_replica_ro_node_total gauge +milvus_querycoord_replica_ro_node_total 0 +# HELP milvus_querycoord_resource_group_info all resource group detail info in query coord +# TYPE milvus_querycoord_resource_group_info gauge +milvus_querycoord_resource_group_info{node_id="37",rg="__default_resource_group"} 1 +# HELP milvus_querycoord_resource_group_replica_total total replica number of resource group +# TYPE milvus_querycoord_resource_group_replica_total gauge +milvus_querycoord_resource_group_replica_total{rg="__default_resource_group"} 0 +# HELP milvus_querycoord_task_num the number of tasks in QueryCoord's scheduler +# TYPE milvus_querycoord_task_num gauge +milvus_querycoord_task_num{querycoord_task_type="channel_grow"} 0 +milvus_querycoord_task_num{querycoord_task_type="channel_move"} 0 +milvus_querycoord_task_num{querycoord_task_type="channel_reduce"} 0 +milvus_querycoord_task_num{querycoord_task_type="segment_grow"} 0 +milvus_querycoord_task_num{querycoord_task_type="segment_move"} 0 +milvus_querycoord_task_num{querycoord_task_type="segment_reduce"} 0 +# HELP milvus_querynode_apply_bf_latency apply bf cost in ms +# TYPE milvus_querynode_apply_bf_latency histogram +milvus_querynode_apply_bf_latency_bucket{function_name="ProcessDelete",node_id="37",le="1"} 1 +milvus_querynode_apply_bf_latency_bucket{function_name="ProcessDelete",node_id="37",le="2"} 1 +milvus_querynode_apply_bf_latency_bucket{function_name="ProcessDelete",node_id="37",le="4"} 1 +milvus_querynode_apply_bf_latency_bucket{function_name="ProcessDelete",node_id="37",le="8"} 1 +milvus_querynode_apply_bf_latency_bucket{function_name="ProcessDelete",node_id="37",le="16"} 1 +milvus_querynode_apply_bf_latency_bucket{function_name="ProcessDelete",node_id="37",le="32"} 1 +milvus_querynode_apply_bf_latency_bucket{function_name="ProcessDelete",node_id="37",le="64"} 1 +milvus_querynode_apply_bf_latency_bucket{function_name="ProcessDelete",node_id="37",le="128"} 1 +milvus_querynode_apply_bf_latency_bucket{function_name="ProcessDelete",node_id="37",le="256"} 1 +milvus_querynode_apply_bf_latency_bucket{function_name="ProcessDelete",node_id="37",le="512"} 1 +milvus_querynode_apply_bf_latency_bucket{function_name="ProcessDelete",node_id="37",le="1024"} 1 +milvus_querynode_apply_bf_latency_bucket{function_name="ProcessDelete",node_id="37",le="2048"} 1 +milvus_querynode_apply_bf_latency_bucket{function_name="ProcessDelete",node_id="37",le="4096"} 1 +milvus_querynode_apply_bf_latency_bucket{function_name="ProcessDelete",node_id="37",le="8192"} 1 +milvus_querynode_apply_bf_latency_bucket{function_name="ProcessDelete",node_id="37",le="16384"} 1 +milvus_querynode_apply_bf_latency_bucket{function_name="ProcessDelete",node_id="37",le="32768"} 1 +milvus_querynode_apply_bf_latency_bucket{function_name="ProcessDelete",node_id="37",le="65536"} 1 +milvus_querynode_apply_bf_latency_bucket{function_name="ProcessDelete",node_id="37",le="131072"} 1 +milvus_querynode_apply_bf_latency_bucket{function_name="ProcessDelete",node_id="37",le="+Inf"} 1 +milvus_querynode_apply_bf_latency_sum{function_name="ProcessDelete",node_id="37"} 0 +milvus_querynode_apply_bf_latency_count{function_name="ProcessDelete",node_id="37"} 1 +# HELP milvus_querynode_cgo_latency latency of each cgo call +# TYPE milvus_querynode_cgo_latency histogram +milvus_querynode_cgo_latency_bucket{cgo_name="AppendIndexV2",cgo_type="Sync",node_id="37",le="1"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="AppendIndexV2",cgo_type="Sync",node_id="37",le="2"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="AppendIndexV2",cgo_type="Sync",node_id="37",le="4"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="AppendIndexV2",cgo_type="Sync",node_id="37",le="8"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="AppendIndexV2",cgo_type="Sync",node_id="37",le="16"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="AppendIndexV2",cgo_type="Sync",node_id="37",le="32"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="AppendIndexV2",cgo_type="Sync",node_id="37",le="64"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="AppendIndexV2",cgo_type="Sync",node_id="37",le="128"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="AppendIndexV2",cgo_type="Sync",node_id="37",le="256"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="AppendIndexV2",cgo_type="Sync",node_id="37",le="512"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="AppendIndexV2",cgo_type="Sync",node_id="37",le="1024"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="AppendIndexV2",cgo_type="Sync",node_id="37",le="2048"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="AppendIndexV2",cgo_type="Sync",node_id="37",le="4096"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="AppendIndexV2",cgo_type="Sync",node_id="37",le="8192"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="AppendIndexV2",cgo_type="Sync",node_id="37",le="16384"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="AppendIndexV2",cgo_type="Sync",node_id="37",le="32768"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="AppendIndexV2",cgo_type="Sync",node_id="37",le="65536"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="AppendIndexV2",cgo_type="Sync",node_id="37",le="131072"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="AppendIndexV2",cgo_type="Sync",node_id="37",le="+Inf"} 1 +milvus_querynode_cgo_latency_sum{cgo_name="AppendIndexV2",cgo_type="Sync",node_id="37"} 1 +milvus_querynode_cgo_latency_count{cgo_name="AppendIndexV2",cgo_type="Sync",node_id="37"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="Delete",cgo_type="Sync",node_id="37",le="1"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="Delete",cgo_type="Sync",node_id="37",le="2"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="Delete",cgo_type="Sync",node_id="37",le="4"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="Delete",cgo_type="Sync",node_id="37",le="8"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="Delete",cgo_type="Sync",node_id="37",le="16"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="Delete",cgo_type="Sync",node_id="37",le="32"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="Delete",cgo_type="Sync",node_id="37",le="64"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="Delete",cgo_type="Sync",node_id="37",le="128"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="Delete",cgo_type="Sync",node_id="37",le="256"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="Delete",cgo_type="Sync",node_id="37",le="512"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="Delete",cgo_type="Sync",node_id="37",le="1024"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="Delete",cgo_type="Sync",node_id="37",le="2048"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="Delete",cgo_type="Sync",node_id="37",le="4096"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="Delete",cgo_type="Sync",node_id="37",le="8192"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="Delete",cgo_type="Sync",node_id="37",le="16384"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="Delete",cgo_type="Sync",node_id="37",le="32768"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="Delete",cgo_type="Sync",node_id="37",le="65536"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="Delete",cgo_type="Sync",node_id="37",le="131072"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="Delete",cgo_type="Sync",node_id="37",le="+Inf"} 1 +milvus_querynode_cgo_latency_sum{cgo_name="Delete",cgo_type="Sync",node_id="37"} 0 +milvus_querynode_cgo_latency_count{cgo_name="Delete",cgo_type="Sync",node_id="37"} 1 +milvus_querynode_cgo_latency_bucket{cgo_name="LoadFieldData",cgo_type="Sync",node_id="37",le="1"} 3 +milvus_querynode_cgo_latency_bucket{cgo_name="LoadFieldData",cgo_type="Sync",node_id="37",le="2"} 4 +milvus_querynode_cgo_latency_bucket{cgo_name="LoadFieldData",cgo_type="Sync",node_id="37",le="4"} 4 +milvus_querynode_cgo_latency_bucket{cgo_name="LoadFieldData",cgo_type="Sync",node_id="37",le="8"} 4 +milvus_querynode_cgo_latency_bucket{cgo_name="LoadFieldData",cgo_type="Sync",node_id="37",le="16"} 4 +milvus_querynode_cgo_latency_bucket{cgo_name="LoadFieldData",cgo_type="Sync",node_id="37",le="32"} 4 +milvus_querynode_cgo_latency_bucket{cgo_name="LoadFieldData",cgo_type="Sync",node_id="37",le="64"} 4 +milvus_querynode_cgo_latency_bucket{cgo_name="LoadFieldData",cgo_type="Sync",node_id="37",le="128"} 4 +milvus_querynode_cgo_latency_bucket{cgo_name="LoadFieldData",cgo_type="Sync",node_id="37",le="256"} 4 +milvus_querynode_cgo_latency_bucket{cgo_name="LoadFieldData",cgo_type="Sync",node_id="37",le="512"} 4 +milvus_querynode_cgo_latency_bucket{cgo_name="LoadFieldData",cgo_type="Sync",node_id="37",le="1024"} 4 +milvus_querynode_cgo_latency_bucket{cgo_name="LoadFieldData",cgo_type="Sync",node_id="37",le="2048"} 4 +milvus_querynode_cgo_latency_bucket{cgo_name="LoadFieldData",cgo_type="Sync",node_id="37",le="4096"} 4 +milvus_querynode_cgo_latency_bucket{cgo_name="LoadFieldData",cgo_type="Sync",node_id="37",le="8192"} 4 +milvus_querynode_cgo_latency_bucket{cgo_name="LoadFieldData",cgo_type="Sync",node_id="37",le="16384"} 4 +milvus_querynode_cgo_latency_bucket{cgo_name="LoadFieldData",cgo_type="Sync",node_id="37",le="32768"} 4 +milvus_querynode_cgo_latency_bucket{cgo_name="LoadFieldData",cgo_type="Sync",node_id="37",le="65536"} 4 +milvus_querynode_cgo_latency_bucket{cgo_name="LoadFieldData",cgo_type="Sync",node_id="37",le="131072"} 4 +milvus_querynode_cgo_latency_bucket{cgo_name="LoadFieldData",cgo_type="Sync",node_id="37",le="+Inf"} 4 +milvus_querynode_cgo_latency_sum{cgo_name="LoadFieldData",cgo_type="Sync",node_id="37"} 5 +milvus_querynode_cgo_latency_count{cgo_name="LoadFieldData",cgo_type="Sync",node_id="37"} 4 +# HELP milvus_querynode_collection_num number of collections loaded +# TYPE milvus_querynode_collection_num gauge +milvus_querynode_collection_num{node_id="37"} 0 +# HELP milvus_querynode_consume_bytes_counter +# TYPE milvus_querynode_consume_bytes_counter counter +milvus_querynode_consume_bytes_counter{msg_type="insert",node_id="37"} 241 +# HELP milvus_querynode_disk_cache_evict_bytes number of bytes evicted from disk cache +# TYPE milvus_querynode_disk_cache_evict_bytes counter +milvus_querynode_disk_cache_evict_bytes{db_name="default",node_id="37",rg="__default_resource_group"} 0 +# HELP milvus_querynode_disk_cache_evict_duration total time cost of evicting segments from disk cache +# TYPE milvus_querynode_disk_cache_evict_duration counter +milvus_querynode_disk_cache_evict_duration{db_name="default",node_id="37",rg="__default_resource_group"} 0 +# HELP milvus_querynode_disk_cache_evict_global_duration global duration of evicting segments from disk cache +# TYPE milvus_querynode_disk_cache_evict_global_duration histogram +milvus_querynode_disk_cache_evict_global_duration_bucket{node_id="37",le="1"} 0 +milvus_querynode_disk_cache_evict_global_duration_bucket{node_id="37",le="100"} 0 +milvus_querynode_disk_cache_evict_global_duration_bucket{node_id="37",le="500"} 0 +milvus_querynode_disk_cache_evict_global_duration_bucket{node_id="37",le="1000"} 0 +milvus_querynode_disk_cache_evict_global_duration_bucket{node_id="37",le="5000"} 0 +milvus_querynode_disk_cache_evict_global_duration_bucket{node_id="37",le="10000"} 0 +milvus_querynode_disk_cache_evict_global_duration_bucket{node_id="37",le="20000"} 0 +milvus_querynode_disk_cache_evict_global_duration_bucket{node_id="37",le="50000"} 0 +milvus_querynode_disk_cache_evict_global_duration_bucket{node_id="37",le="100000"} 0 +milvus_querynode_disk_cache_evict_global_duration_bucket{node_id="37",le="250000"} 0 +milvus_querynode_disk_cache_evict_global_duration_bucket{node_id="37",le="500000"} 0 +milvus_querynode_disk_cache_evict_global_duration_bucket{node_id="37",le="1e+06"} 0 +milvus_querynode_disk_cache_evict_global_duration_bucket{node_id="37",le="3.6e+06"} 0 +milvus_querynode_disk_cache_evict_global_duration_bucket{node_id="37",le="5e+06"} 0 +milvus_querynode_disk_cache_evict_global_duration_bucket{node_id="37",le="1e+07"} 0 +milvus_querynode_disk_cache_evict_global_duration_bucket{node_id="37",le="+Inf"} 0 +milvus_querynode_disk_cache_evict_global_duration_sum{node_id="37"} 0 +milvus_querynode_disk_cache_evict_global_duration_count{node_id="37"} 0 +# HELP milvus_querynode_disk_cache_evict_total number of segments evicted from disk cache +# TYPE milvus_querynode_disk_cache_evict_total counter +milvus_querynode_disk_cache_evict_total{db_name="default",node_id="37",rg="__default_resource_group"} 0 +# HELP milvus_querynode_disk_cache_load_bytes number of bytes loaded from disk cache +# TYPE milvus_querynode_disk_cache_load_bytes counter +milvus_querynode_disk_cache_load_bytes{db_name="default",node_id="37",rg="__default_resource_group"} 0 +# HELP milvus_querynode_disk_cache_load_duration total time cost of loading segments from disk cache +# TYPE milvus_querynode_disk_cache_load_duration counter +milvus_querynode_disk_cache_load_duration{db_name="default",node_id="37",rg="__default_resource_group"} 0 +# HELP milvus_querynode_disk_cache_load_global_duration global duration of loading segments from disk cache +# TYPE milvus_querynode_disk_cache_load_global_duration histogram +milvus_querynode_disk_cache_load_global_duration_bucket{node_id="37",le="1"} 0 +milvus_querynode_disk_cache_load_global_duration_bucket{node_id="37",le="100"} 0 +milvus_querynode_disk_cache_load_global_duration_bucket{node_id="37",le="500"} 0 +milvus_querynode_disk_cache_load_global_duration_bucket{node_id="37",le="1000"} 0 +milvus_querynode_disk_cache_load_global_duration_bucket{node_id="37",le="5000"} 0 +milvus_querynode_disk_cache_load_global_duration_bucket{node_id="37",le="10000"} 0 +milvus_querynode_disk_cache_load_global_duration_bucket{node_id="37",le="20000"} 0 +milvus_querynode_disk_cache_load_global_duration_bucket{node_id="37",le="50000"} 0 +milvus_querynode_disk_cache_load_global_duration_bucket{node_id="37",le="100000"} 0 +milvus_querynode_disk_cache_load_global_duration_bucket{node_id="37",le="250000"} 0 +milvus_querynode_disk_cache_load_global_duration_bucket{node_id="37",le="500000"} 0 +milvus_querynode_disk_cache_load_global_duration_bucket{node_id="37",le="1e+06"} 0 +milvus_querynode_disk_cache_load_global_duration_bucket{node_id="37",le="3.6e+06"} 0 +milvus_querynode_disk_cache_load_global_duration_bucket{node_id="37",le="5e+06"} 0 +milvus_querynode_disk_cache_load_global_duration_bucket{node_id="37",le="1e+07"} 0 +milvus_querynode_disk_cache_load_global_duration_bucket{node_id="37",le="+Inf"} 0 +milvus_querynode_disk_cache_load_global_duration_sum{node_id="37"} 0 +milvus_querynode_disk_cache_load_global_duration_count{node_id="37"} 0 +# HELP milvus_querynode_disk_cache_load_total number of segments loaded from disk cache +# TYPE milvus_querynode_disk_cache_load_total counter +milvus_querynode_disk_cache_load_total{db_name="default",node_id="37",rg="__default_resource_group"} 0 +# HELP milvus_querynode_disk_used_size disk used size(MB) +# TYPE milvus_querynode_disk_used_size gauge +milvus_querynode_disk_used_size{node_id="37"} 4.285002708435059 +# HELP milvus_querynode_dml_vchannel_num number of dmlChannels watched +# TYPE milvus_querynode_dml_vchannel_num gauge +milvus_querynode_dml_vchannel_num{node_id="37"} 0 +# HELP milvus_querynode_execute_bytes_counter +# TYPE milvus_querynode_execute_bytes_counter counter +milvus_querynode_execute_bytes_counter{msg_type="query",node_id="37"} 818 +milvus_querynode_execute_bytes_counter{msg_type="search",node_id="37"} 599 +# HELP milvus_querynode_flowgraph_num number of flowgraphs +# TYPE milvus_querynode_flowgraph_num gauge +milvus_querynode_flowgraph_num{node_id="37"} 0 +# HELP milvus_querynode_forward_delete_latency forward delete cost in ms +# TYPE milvus_querynode_forward_delete_latency histogram +milvus_querynode_forward_delete_latency_bucket{function_name="ProcessDelete",node_id="37",le="1"} 1 +milvus_querynode_forward_delete_latency_bucket{function_name="ProcessDelete",node_id="37",le="2"} 1 +milvus_querynode_forward_delete_latency_bucket{function_name="ProcessDelete",node_id="37",le="4"} 1 +milvus_querynode_forward_delete_latency_bucket{function_name="ProcessDelete",node_id="37",le="8"} 1 +milvus_querynode_forward_delete_latency_bucket{function_name="ProcessDelete",node_id="37",le="16"} 1 +milvus_querynode_forward_delete_latency_bucket{function_name="ProcessDelete",node_id="37",le="32"} 1 +milvus_querynode_forward_delete_latency_bucket{function_name="ProcessDelete",node_id="37",le="64"} 1 +milvus_querynode_forward_delete_latency_bucket{function_name="ProcessDelete",node_id="37",le="128"} 1 +milvus_querynode_forward_delete_latency_bucket{function_name="ProcessDelete",node_id="37",le="256"} 1 +milvus_querynode_forward_delete_latency_bucket{function_name="ProcessDelete",node_id="37",le="512"} 1 +milvus_querynode_forward_delete_latency_bucket{function_name="ProcessDelete",node_id="37",le="1024"} 1 +milvus_querynode_forward_delete_latency_bucket{function_name="ProcessDelete",node_id="37",le="2048"} 1 +milvus_querynode_forward_delete_latency_bucket{function_name="ProcessDelete",node_id="37",le="4096"} 1 +milvus_querynode_forward_delete_latency_bucket{function_name="ProcessDelete",node_id="37",le="8192"} 1 +milvus_querynode_forward_delete_latency_bucket{function_name="ProcessDelete",node_id="37",le="16384"} 1 +milvus_querynode_forward_delete_latency_bucket{function_name="ProcessDelete",node_id="37",le="32768"} 1 +milvus_querynode_forward_delete_latency_bucket{function_name="ProcessDelete",node_id="37",le="65536"} 1 +milvus_querynode_forward_delete_latency_bucket{function_name="ProcessDelete",node_id="37",le="131072"} 1 +milvus_querynode_forward_delete_latency_bucket{function_name="ProcessDelete",node_id="37",le="+Inf"} 1 +milvus_querynode_forward_delete_latency_sum{function_name="ProcessDelete",node_id="37"} 1 +milvus_querynode_forward_delete_latency_count{function_name="ProcessDelete",node_id="37"} 1 +# HELP milvus_querynode_load_index_latency latency of load per segment's index, in milliseconds +# TYPE milvus_querynode_load_index_latency histogram +milvus_querynode_load_index_latency_bucket{node_id="37",le="1"} 0 +milvus_querynode_load_index_latency_bucket{node_id="37",le="100"} 1 +milvus_querynode_load_index_latency_bucket{node_id="37",le="500"} 1 +milvus_querynode_load_index_latency_bucket{node_id="37",le="1000"} 1 +milvus_querynode_load_index_latency_bucket{node_id="37",le="5000"} 1 +milvus_querynode_load_index_latency_bucket{node_id="37",le="10000"} 1 +milvus_querynode_load_index_latency_bucket{node_id="37",le="20000"} 1 +milvus_querynode_load_index_latency_bucket{node_id="37",le="50000"} 1 +milvus_querynode_load_index_latency_bucket{node_id="37",le="100000"} 1 +milvus_querynode_load_index_latency_bucket{node_id="37",le="250000"} 1 +milvus_querynode_load_index_latency_bucket{node_id="37",le="500000"} 1 +milvus_querynode_load_index_latency_bucket{node_id="37",le="1e+06"} 1 +milvus_querynode_load_index_latency_bucket{node_id="37",le="3.6e+06"} 1 +milvus_querynode_load_index_latency_bucket{node_id="37",le="5e+06"} 1 +milvus_querynode_load_index_latency_bucket{node_id="37",le="1e+07"} 1 +milvus_querynode_load_index_latency_bucket{node_id="37",le="+Inf"} 1 +milvus_querynode_load_index_latency_sum{node_id="37"} 2 +milvus_querynode_load_index_latency_count{node_id="37"} 1 +# HELP milvus_querynode_load_segment_concurrency number of concurrent loading segments in QueryNode +# TYPE milvus_querynode_load_segment_concurrency gauge +milvus_querynode_load_segment_concurrency{load_type="LoadSegment",node_id="37"} 0 +# HELP milvus_querynode_load_segment_latency latency of load per segment +# TYPE milvus_querynode_load_segment_latency histogram +milvus_querynode_load_segment_latency_bucket{node_id="37",le="1"} 0 +milvus_querynode_load_segment_latency_bucket{node_id="37",le="100"} 1 +milvus_querynode_load_segment_latency_bucket{node_id="37",le="500"} 1 +milvus_querynode_load_segment_latency_bucket{node_id="37",le="1000"} 1 +milvus_querynode_load_segment_latency_bucket{node_id="37",le="5000"} 1 +milvus_querynode_load_segment_latency_bucket{node_id="37",le="10000"} 1 +milvus_querynode_load_segment_latency_bucket{node_id="37",le="20000"} 1 +milvus_querynode_load_segment_latency_bucket{node_id="37",le="50000"} 1 +milvus_querynode_load_segment_latency_bucket{node_id="37",le="100000"} 1 +milvus_querynode_load_segment_latency_bucket{node_id="37",le="250000"} 1 +milvus_querynode_load_segment_latency_bucket{node_id="37",le="500000"} 1 +milvus_querynode_load_segment_latency_bucket{node_id="37",le="1e+06"} 1 +milvus_querynode_load_segment_latency_bucket{node_id="37",le="3.6e+06"} 1 +milvus_querynode_load_segment_latency_bucket{node_id="37",le="5e+06"} 1 +milvus_querynode_load_segment_latency_bucket{node_id="37",le="1e+07"} 1 +milvus_querynode_load_segment_latency_bucket{node_id="37",le="+Inf"} 1 +milvus_querynode_load_segment_latency_sum{node_id="37"} 9 +milvus_querynode_load_segment_latency_count{node_id="37"} 1 +# HELP milvus_querynode_process_insert_or_delete_latency process insert or delete cost in ms +# TYPE milvus_querynode_process_insert_or_delete_latency histogram +milvus_querynode_process_insert_or_delete_latency_bucket{msg_type="delete",node_id="37",le="1"} 1 +milvus_querynode_process_insert_or_delete_latency_bucket{msg_type="delete",node_id="37",le="2"} 1 +milvus_querynode_process_insert_or_delete_latency_bucket{msg_type="delete",node_id="37",le="4"} 1 +milvus_querynode_process_insert_or_delete_latency_bucket{msg_type="delete",node_id="37",le="8"} 1 +milvus_querynode_process_insert_or_delete_latency_bucket{msg_type="delete",node_id="37",le="16"} 1 +milvus_querynode_process_insert_or_delete_latency_bucket{msg_type="delete",node_id="37",le="32"} 1 +milvus_querynode_process_insert_or_delete_latency_bucket{msg_type="delete",node_id="37",le="64"} 1 +milvus_querynode_process_insert_or_delete_latency_bucket{msg_type="delete",node_id="37",le="128"} 1 +milvus_querynode_process_insert_or_delete_latency_bucket{msg_type="delete",node_id="37",le="256"} 1 +milvus_querynode_process_insert_or_delete_latency_bucket{msg_type="delete",node_id="37",le="512"} 1 +milvus_querynode_process_insert_or_delete_latency_bucket{msg_type="delete",node_id="37",le="1024"} 1 +milvus_querynode_process_insert_or_delete_latency_bucket{msg_type="delete",node_id="37",le="2048"} 1 +milvus_querynode_process_insert_or_delete_latency_bucket{msg_type="delete",node_id="37",le="4096"} 1 +milvus_querynode_process_insert_or_delete_latency_bucket{msg_type="delete",node_id="37",le="8192"} 1 +milvus_querynode_process_insert_or_delete_latency_bucket{msg_type="delete",node_id="37",le="16384"} 1 +milvus_querynode_process_insert_or_delete_latency_bucket{msg_type="delete",node_id="37",le="32768"} 1 +milvus_querynode_process_insert_or_delete_latency_bucket{msg_type="delete",node_id="37",le="65536"} 1 +milvus_querynode_process_insert_or_delete_latency_bucket{msg_type="delete",node_id="37",le="131072"} 1 +milvus_querynode_process_insert_or_delete_latency_bucket{msg_type="delete",node_id="37",le="+Inf"} 1 +milvus_querynode_process_insert_or_delete_latency_sum{msg_type="delete",node_id="37"} 1 +milvus_querynode_process_insert_or_delete_latency_count{msg_type="delete",node_id="37"} 1 +# HELP milvus_querynode_read_task_concurrency number of concurrent executing read tasks in QueryNode +# TYPE milvus_querynode_read_task_concurrency gauge +milvus_querynode_read_task_concurrency{node_id="37"} 0 +# HELP milvus_querynode_read_task_ready_len number of ready read tasks in readyQueue +# TYPE milvus_querynode_read_task_ready_len gauge +milvus_querynode_read_task_ready_len{node_id="37"} 0 +# HELP milvus_querynode_read_task_unsolved_len number of unsolved read tasks in unsolvedQueue +# TYPE milvus_querynode_read_task_unsolved_len gauge +milvus_querynode_read_task_unsolved_len{node_id="37"} 0 +# HELP milvus_querynode_search_group_nq the number of queries of each grouped search task +# TYPE milvus_querynode_search_group_nq histogram +milvus_querynode_search_group_nq_bucket{node_id="37",le="1"} 0 +milvus_querynode_search_group_nq_bucket{node_id="37",le="2"} 2 +milvus_querynode_search_group_nq_bucket{node_id="37",le="4"} 2 +milvus_querynode_search_group_nq_bucket{node_id="37",le="8"} 2 +milvus_querynode_search_group_nq_bucket{node_id="37",le="16"} 2 +milvus_querynode_search_group_nq_bucket{node_id="37",le="32"} 2 +milvus_querynode_search_group_nq_bucket{node_id="37",le="64"} 2 +milvus_querynode_search_group_nq_bucket{node_id="37",le="128"} 2 +milvus_querynode_search_group_nq_bucket{node_id="37",le="256"} 2 +milvus_querynode_search_group_nq_bucket{node_id="37",le="512"} 2 +milvus_querynode_search_group_nq_bucket{node_id="37",le="1024"} 2 +milvus_querynode_search_group_nq_bucket{node_id="37",le="2048"} 2 +milvus_querynode_search_group_nq_bucket{node_id="37",le="4096"} 2 +milvus_querynode_search_group_nq_bucket{node_id="37",le="8192"} 2 +milvus_querynode_search_group_nq_bucket{node_id="37",le="16384"} 2 +milvus_querynode_search_group_nq_bucket{node_id="37",le="32768"} 2 +milvus_querynode_search_group_nq_bucket{node_id="37",le="65536"} 2 +milvus_querynode_search_group_nq_bucket{node_id="37",le="131072"} 2 +milvus_querynode_search_group_nq_bucket{node_id="37",le="+Inf"} 2 +milvus_querynode_search_group_nq_sum{node_id="37"} 4 +milvus_querynode_search_group_nq_count{node_id="37"} 2 +# HELP milvus_querynode_search_group_size the number of tasks of each grouped search task +# TYPE milvus_querynode_search_group_size histogram +milvus_querynode_search_group_size_bucket{node_id="37",le="1"} 2 +milvus_querynode_search_group_size_bucket{node_id="37",le="2"} 2 +milvus_querynode_search_group_size_bucket{node_id="37",le="4"} 2 +milvus_querynode_search_group_size_bucket{node_id="37",le="8"} 2 +milvus_querynode_search_group_size_bucket{node_id="37",le="16"} 2 +milvus_querynode_search_group_size_bucket{node_id="37",le="32"} 2 +milvus_querynode_search_group_size_bucket{node_id="37",le="64"} 2 +milvus_querynode_search_group_size_bucket{node_id="37",le="128"} 2 +milvus_querynode_search_group_size_bucket{node_id="37",le="256"} 2 +milvus_querynode_search_group_size_bucket{node_id="37",le="512"} 2 +milvus_querynode_search_group_size_bucket{node_id="37",le="1024"} 2 +milvus_querynode_search_group_size_bucket{node_id="37",le="2048"} 2 +milvus_querynode_search_group_size_bucket{node_id="37",le="4096"} 2 +milvus_querynode_search_group_size_bucket{node_id="37",le="8192"} 2 +milvus_querynode_search_group_size_bucket{node_id="37",le="16384"} 2 +milvus_querynode_search_group_size_bucket{node_id="37",le="32768"} 2 +milvus_querynode_search_group_size_bucket{node_id="37",le="65536"} 2 +milvus_querynode_search_group_size_bucket{node_id="37",le="131072"} 2 +milvus_querynode_search_group_size_bucket{node_id="37",le="+Inf"} 2 +milvus_querynode_search_group_size_sum{node_id="37"} 2 +milvus_querynode_search_group_size_count{node_id="37"} 2 +# HELP milvus_querynode_search_group_topk the topK of each grouped search task +# TYPE milvus_querynode_search_group_topk histogram +milvus_querynode_search_group_topk_bucket{node_id="37",le="1"} 0 +milvus_querynode_search_group_topk_bucket{node_id="37",le="2"} 0 +milvus_querynode_search_group_topk_bucket{node_id="37",le="4"} 2 +milvus_querynode_search_group_topk_bucket{node_id="37",le="8"} 2 +milvus_querynode_search_group_topk_bucket{node_id="37",le="16"} 2 +milvus_querynode_search_group_topk_bucket{node_id="37",le="32"} 2 +milvus_querynode_search_group_topk_bucket{node_id="37",le="64"} 2 +milvus_querynode_search_group_topk_bucket{node_id="37",le="128"} 2 +milvus_querynode_search_group_topk_bucket{node_id="37",le="256"} 2 +milvus_querynode_search_group_topk_bucket{node_id="37",le="512"} 2 +milvus_querynode_search_group_topk_bucket{node_id="37",le="1024"} 2 +milvus_querynode_search_group_topk_bucket{node_id="37",le="2048"} 2 +milvus_querynode_search_group_topk_bucket{node_id="37",le="4096"} 2 +milvus_querynode_search_group_topk_bucket{node_id="37",le="8192"} 2 +milvus_querynode_search_group_topk_bucket{node_id="37",le="16384"} 2 +milvus_querynode_search_group_topk_bucket{node_id="37",le="32768"} 2 +milvus_querynode_search_group_topk_bucket{node_id="37",le="65536"} 2 +milvus_querynode_search_group_topk_bucket{node_id="37",le="131072"} 2 +milvus_querynode_search_group_topk_bucket{node_id="37",le="+Inf"} 2 +milvus_querynode_search_group_topk_sum{node_id="37"} 6 +milvus_querynode_search_group_topk_count{node_id="37"} 2 +# HELP milvus_querynode_search_nq the number of queries of each search task +# TYPE milvus_querynode_search_nq histogram +milvus_querynode_search_nq_bucket{node_id="37",le="1"} 0 +milvus_querynode_search_nq_bucket{node_id="37",le="2"} 2 +milvus_querynode_search_nq_bucket{node_id="37",le="4"} 2 +milvus_querynode_search_nq_bucket{node_id="37",le="8"} 2 +milvus_querynode_search_nq_bucket{node_id="37",le="16"} 2 +milvus_querynode_search_nq_bucket{node_id="37",le="32"} 2 +milvus_querynode_search_nq_bucket{node_id="37",le="64"} 2 +milvus_querynode_search_nq_bucket{node_id="37",le="128"} 2 +milvus_querynode_search_nq_bucket{node_id="37",le="256"} 2 +milvus_querynode_search_nq_bucket{node_id="37",le="512"} 2 +milvus_querynode_search_nq_bucket{node_id="37",le="1024"} 2 +milvus_querynode_search_nq_bucket{node_id="37",le="2048"} 2 +milvus_querynode_search_nq_bucket{node_id="37",le="4096"} 2 +milvus_querynode_search_nq_bucket{node_id="37",le="8192"} 2 +milvus_querynode_search_nq_bucket{node_id="37",le="16384"} 2 +milvus_querynode_search_nq_bucket{node_id="37",le="32768"} 2 +milvus_querynode_search_nq_bucket{node_id="37",le="65536"} 2 +milvus_querynode_search_nq_bucket{node_id="37",le="131072"} 2 +milvus_querynode_search_nq_bucket{node_id="37",le="+Inf"} 2 +milvus_querynode_search_nq_sum{node_id="37"} 4 +milvus_querynode_search_nq_count{node_id="37"} 2 +# HELP milvus_querynode_search_topk the top of each search task +# TYPE milvus_querynode_search_topk histogram +milvus_querynode_search_topk_bucket{node_id="37",le="1"} 0 +milvus_querynode_search_topk_bucket{node_id="37",le="2"} 0 +milvus_querynode_search_topk_bucket{node_id="37",le="4"} 2 +milvus_querynode_search_topk_bucket{node_id="37",le="8"} 2 +milvus_querynode_search_topk_bucket{node_id="37",le="16"} 2 +milvus_querynode_search_topk_bucket{node_id="37",le="32"} 2 +milvus_querynode_search_topk_bucket{node_id="37",le="64"} 2 +milvus_querynode_search_topk_bucket{node_id="37",le="128"} 2 +milvus_querynode_search_topk_bucket{node_id="37",le="256"} 2 +milvus_querynode_search_topk_bucket{node_id="37",le="512"} 2 +milvus_querynode_search_topk_bucket{node_id="37",le="1024"} 2 +milvus_querynode_search_topk_bucket{node_id="37",le="2048"} 2 +milvus_querynode_search_topk_bucket{node_id="37",le="4096"} 2 +milvus_querynode_search_topk_bucket{node_id="37",le="8192"} 2 +milvus_querynode_search_topk_bucket{node_id="37",le="16384"} 2 +milvus_querynode_search_topk_bucket{node_id="37",le="32768"} 2 +milvus_querynode_search_topk_bucket{node_id="37",le="65536"} 2 +milvus_querynode_search_topk_bucket{node_id="37",le="131072"} 2 +milvus_querynode_search_topk_bucket{node_id="37",le="+Inf"} 2 +milvus_querynode_search_topk_sum{node_id="37"} 6 +milvus_querynode_search_topk_count{node_id="37"} 2 +# HELP milvus_querynode_segment_access_duration total time cost of accessing segments +# TYPE milvus_querynode_segment_access_duration counter +milvus_querynode_segment_access_duration{db_name="default",node_id="37",query_type="query",rg="__default_resource_group"} 11.942583 +milvus_querynode_segment_access_duration{db_name="default",node_id="37",query_type="search",rg="__default_resource_group"} 4.55725 +# HELP milvus_querynode_segment_access_global_duration global time cost of accessing segments +# TYPE milvus_querynode_segment_access_global_duration histogram +milvus_querynode_segment_access_global_duration_bucket{node_id="37",query_type="query",le="1"} 0 +milvus_querynode_segment_access_global_duration_bucket{node_id="37",query_type="query",le="100"} 5 +milvus_querynode_segment_access_global_duration_bucket{node_id="37",query_type="query",le="500"} 5 +milvus_querynode_segment_access_global_duration_bucket{node_id="37",query_type="query",le="1000"} 5 +milvus_querynode_segment_access_global_duration_bucket{node_id="37",query_type="query",le="5000"} 5 +milvus_querynode_segment_access_global_duration_bucket{node_id="37",query_type="query",le="10000"} 5 +milvus_querynode_segment_access_global_duration_bucket{node_id="37",query_type="query",le="20000"} 5 +milvus_querynode_segment_access_global_duration_bucket{node_id="37",query_type="query",le="50000"} 5 +milvus_querynode_segment_access_global_duration_bucket{node_id="37",query_type="query",le="100000"} 5 +milvus_querynode_segment_access_global_duration_bucket{node_id="37",query_type="query",le="250000"} 5 +milvus_querynode_segment_access_global_duration_bucket{node_id="37",query_type="query",le="500000"} 5 +milvus_querynode_segment_access_global_duration_bucket{node_id="37",query_type="query",le="1e+06"} 5 +milvus_querynode_segment_access_global_duration_bucket{node_id="37",query_type="query",le="3.6e+06"} 5 +milvus_querynode_segment_access_global_duration_bucket{node_id="37",query_type="query",le="5e+06"} 5 +milvus_querynode_segment_access_global_duration_bucket{node_id="37",query_type="query",le="1e+07"} 5 +milvus_querynode_segment_access_global_duration_bucket{node_id="37",query_type="query",le="+Inf"} 5 +milvus_querynode_segment_access_global_duration_sum{node_id="37",query_type="query"} 11.942583 +milvus_querynode_segment_access_global_duration_count{node_id="37",query_type="query"} 5 +milvus_querynode_segment_access_global_duration_bucket{node_id="37",query_type="search",le="1"} 0 +milvus_querynode_segment_access_global_duration_bucket{node_id="37",query_type="search",le="100"} 2 +milvus_querynode_segment_access_global_duration_bucket{node_id="37",query_type="search",le="500"} 2 +milvus_querynode_segment_access_global_duration_bucket{node_id="37",query_type="search",le="1000"} 2 +milvus_querynode_segment_access_global_duration_bucket{node_id="37",query_type="search",le="5000"} 2 +milvus_querynode_segment_access_global_duration_bucket{node_id="37",query_type="search",le="10000"} 2 +milvus_querynode_segment_access_global_duration_bucket{node_id="37",query_type="search",le="20000"} 2 +milvus_querynode_segment_access_global_duration_bucket{node_id="37",query_type="search",le="50000"} 2 +milvus_querynode_segment_access_global_duration_bucket{node_id="37",query_type="search",le="100000"} 2 +milvus_querynode_segment_access_global_duration_bucket{node_id="37",query_type="search",le="250000"} 2 +milvus_querynode_segment_access_global_duration_bucket{node_id="37",query_type="search",le="500000"} 2 +milvus_querynode_segment_access_global_duration_bucket{node_id="37",query_type="search",le="1e+06"} 2 +milvus_querynode_segment_access_global_duration_bucket{node_id="37",query_type="search",le="3.6e+06"} 2 +milvus_querynode_segment_access_global_duration_bucket{node_id="37",query_type="search",le="5e+06"} 2 +milvus_querynode_segment_access_global_duration_bucket{node_id="37",query_type="search",le="1e+07"} 2 +milvus_querynode_segment_access_global_duration_bucket{node_id="37",query_type="search",le="+Inf"} 2 +milvus_querynode_segment_access_global_duration_sum{node_id="37",query_type="search"} 4.55725 +milvus_querynode_segment_access_global_duration_count{node_id="37",query_type="search"} 2 +# HELP milvus_querynode_segment_access_total number of segments accessed +# TYPE milvus_querynode_segment_access_total counter +milvus_querynode_segment_access_total{db_name="default",node_id="37",query_type="query",rg="__default_resource_group"} 5 +milvus_querynode_segment_access_total{db_name="default",node_id="37",query_type="search",rg="__default_resource_group"} 2 +# HELP milvus_querynode_segment_access_wait_cache_duration total time cost of waiting for loading access +# TYPE milvus_querynode_segment_access_wait_cache_duration counter +milvus_querynode_segment_access_wait_cache_duration{db_name="default",node_id="37",query_type="query",rg="__default_resource_group"} 0 +milvus_querynode_segment_access_wait_cache_duration{db_name="default",node_id="37",query_type="search",rg="__default_resource_group"} 0 +# HELP milvus_querynode_segment_access_wait_cache_global_duration global time cost of waiting for loading access +# TYPE milvus_querynode_segment_access_wait_cache_global_duration histogram +milvus_querynode_segment_access_wait_cache_global_duration_bucket{node_id="37",query_type="query",le="1"} 0 +milvus_querynode_segment_access_wait_cache_global_duration_bucket{node_id="37",query_type="query",le="100"} 0 +milvus_querynode_segment_access_wait_cache_global_duration_bucket{node_id="37",query_type="query",le="500"} 0 +milvus_querynode_segment_access_wait_cache_global_duration_bucket{node_id="37",query_type="query",le="1000"} 0 +milvus_querynode_segment_access_wait_cache_global_duration_bucket{node_id="37",query_type="query",le="5000"} 0 +milvus_querynode_segment_access_wait_cache_global_duration_bucket{node_id="37",query_type="query",le="10000"} 0 +milvus_querynode_segment_access_wait_cache_global_duration_bucket{node_id="37",query_type="query",le="20000"} 0 +milvus_querynode_segment_access_wait_cache_global_duration_bucket{node_id="37",query_type="query",le="50000"} 0 +milvus_querynode_segment_access_wait_cache_global_duration_bucket{node_id="37",query_type="query",le="100000"} 0 +milvus_querynode_segment_access_wait_cache_global_duration_bucket{node_id="37",query_type="query",le="250000"} 0 +milvus_querynode_segment_access_wait_cache_global_duration_bucket{node_id="37",query_type="query",le="500000"} 0 +milvus_querynode_segment_access_wait_cache_global_duration_bucket{node_id="37",query_type="query",le="1e+06"} 0 +milvus_querynode_segment_access_wait_cache_global_duration_bucket{node_id="37",query_type="query",le="3.6e+06"} 0 +milvus_querynode_segment_access_wait_cache_global_duration_bucket{node_id="37",query_type="query",le="5e+06"} 0 +milvus_querynode_segment_access_wait_cache_global_duration_bucket{node_id="37",query_type="query",le="1e+07"} 0 +milvus_querynode_segment_access_wait_cache_global_duration_bucket{node_id="37",query_type="query",le="+Inf"} 0 +milvus_querynode_segment_access_wait_cache_global_duration_sum{node_id="37",query_type="query"} 0 +milvus_querynode_segment_access_wait_cache_global_duration_count{node_id="37",query_type="query"} 0 +milvus_querynode_segment_access_wait_cache_global_duration_bucket{node_id="37",query_type="search",le="1"} 0 +milvus_querynode_segment_access_wait_cache_global_duration_bucket{node_id="37",query_type="search",le="100"} 0 +milvus_querynode_segment_access_wait_cache_global_duration_bucket{node_id="37",query_type="search",le="500"} 0 +milvus_querynode_segment_access_wait_cache_global_duration_bucket{node_id="37",query_type="search",le="1000"} 0 +milvus_querynode_segment_access_wait_cache_global_duration_bucket{node_id="37",query_type="search",le="5000"} 0 +milvus_querynode_segment_access_wait_cache_global_duration_bucket{node_id="37",query_type="search",le="10000"} 0 +milvus_querynode_segment_access_wait_cache_global_duration_bucket{node_id="37",query_type="search",le="20000"} 0 +milvus_querynode_segment_access_wait_cache_global_duration_bucket{node_id="37",query_type="search",le="50000"} 0 +milvus_querynode_segment_access_wait_cache_global_duration_bucket{node_id="37",query_type="search",le="100000"} 0 +milvus_querynode_segment_access_wait_cache_global_duration_bucket{node_id="37",query_type="search",le="250000"} 0 +milvus_querynode_segment_access_wait_cache_global_duration_bucket{node_id="37",query_type="search",le="500000"} 0 +milvus_querynode_segment_access_wait_cache_global_duration_bucket{node_id="37",query_type="search",le="1e+06"} 0 +milvus_querynode_segment_access_wait_cache_global_duration_bucket{node_id="37",query_type="search",le="3.6e+06"} 0 +milvus_querynode_segment_access_wait_cache_global_duration_bucket{node_id="37",query_type="search",le="5e+06"} 0 +milvus_querynode_segment_access_wait_cache_global_duration_bucket{node_id="37",query_type="search",le="1e+07"} 0 +milvus_querynode_segment_access_wait_cache_global_duration_bucket{node_id="37",query_type="search",le="+Inf"} 0 +milvus_querynode_segment_access_wait_cache_global_duration_sum{node_id="37",query_type="search"} 0 +milvus_querynode_segment_access_wait_cache_global_duration_count{node_id="37",query_type="search"} 0 +# HELP milvus_querynode_segment_access_wait_cache_total number of segments waiting for loading access +# TYPE milvus_querynode_segment_access_wait_cache_total counter +milvus_querynode_segment_access_wait_cache_total{db_name="default",node_id="37",query_type="query",rg="__default_resource_group"} 0 +milvus_querynode_segment_access_wait_cache_total{db_name="default",node_id="37",query_type="search",rg="__default_resource_group"} 0 +# HELP milvus_querynode_segment_latency_per_vector one vector's search latency per segment +# TYPE milvus_querynode_segment_latency_per_vector histogram +milvus_querynode_segment_latency_per_vector_bucket{node_id="37",query_type="search",segment_state="Sealed",le="1"} 2 +milvus_querynode_segment_latency_per_vector_bucket{node_id="37",query_type="search",segment_state="Sealed",le="2"} 2 +milvus_querynode_segment_latency_per_vector_bucket{node_id="37",query_type="search",segment_state="Sealed",le="4"} 2 +milvus_querynode_segment_latency_per_vector_bucket{node_id="37",query_type="search",segment_state="Sealed",le="8"} 2 +milvus_querynode_segment_latency_per_vector_bucket{node_id="37",query_type="search",segment_state="Sealed",le="16"} 2 +milvus_querynode_segment_latency_per_vector_bucket{node_id="37",query_type="search",segment_state="Sealed",le="32"} 2 +milvus_querynode_segment_latency_per_vector_bucket{node_id="37",query_type="search",segment_state="Sealed",le="64"} 2 +milvus_querynode_segment_latency_per_vector_bucket{node_id="37",query_type="search",segment_state="Sealed",le="128"} 2 +milvus_querynode_segment_latency_per_vector_bucket{node_id="37",query_type="search",segment_state="Sealed",le="256"} 2 +milvus_querynode_segment_latency_per_vector_bucket{node_id="37",query_type="search",segment_state="Sealed",le="512"} 2 +milvus_querynode_segment_latency_per_vector_bucket{node_id="37",query_type="search",segment_state="Sealed",le="1024"} 2 +milvus_querynode_segment_latency_per_vector_bucket{node_id="37",query_type="search",segment_state="Sealed",le="2048"} 2 +milvus_querynode_segment_latency_per_vector_bucket{node_id="37",query_type="search",segment_state="Sealed",le="4096"} 2 +milvus_querynode_segment_latency_per_vector_bucket{node_id="37",query_type="search",segment_state="Sealed",le="8192"} 2 +milvus_querynode_segment_latency_per_vector_bucket{node_id="37",query_type="search",segment_state="Sealed",le="16384"} 2 +milvus_querynode_segment_latency_per_vector_bucket{node_id="37",query_type="search",segment_state="Sealed",le="32768"} 2 +milvus_querynode_segment_latency_per_vector_bucket{node_id="37",query_type="search",segment_state="Sealed",le="65536"} 2 +milvus_querynode_segment_latency_per_vector_bucket{node_id="37",query_type="search",segment_state="Sealed",le="131072"} 2 +milvus_querynode_segment_latency_per_vector_bucket{node_id="37",query_type="search",segment_state="Sealed",le="+Inf"} 2 +milvus_querynode_segment_latency_per_vector_sum{node_id="37",query_type="search",segment_state="Sealed"} 1.5 +milvus_querynode_segment_latency_per_vector_count{node_id="37",query_type="search",segment_state="Sealed"} 2 +# HELP milvus_querynode_sq_core_latency latency of search or query latency in segcore +# TYPE milvus_querynode_sq_core_latency histogram +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="query",le="1"} 4 +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="query",le="2"} 4 +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="query",le="4"} 5 +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="query",le="8"} 5 +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="query",le="16"} 5 +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="query",le="32"} 5 +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="query",le="64"} 5 +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="query",le="128"} 5 +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="query",le="256"} 5 +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="query",le="512"} 5 +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="query",le="1024"} 5 +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="query",le="2048"} 5 +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="query",le="4096"} 5 +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="query",le="8192"} 5 +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="query",le="16384"} 5 +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="query",le="32768"} 5 +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="query",le="65536"} 5 +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="query",le="131072"} 5 +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="query",le="+Inf"} 5 +milvus_querynode_sq_core_latency_sum{node_id="37",query_type="query"} 7 +milvus_querynode_sq_core_latency_count{node_id="37",query_type="query"} 5 +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="search",le="1"} 1 +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="search",le="2"} 2 +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="search",le="4"} 2 +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="search",le="8"} 2 +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="search",le="16"} 2 +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="search",le="32"} 2 +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="search",le="64"} 2 +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="search",le="128"} 2 +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="search",le="256"} 2 +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="search",le="512"} 2 +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="search",le="1024"} 2 +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="search",le="2048"} 2 +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="search",le="4096"} 2 +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="search",le="8192"} 2 +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="search",le="16384"} 2 +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="search",le="32768"} 2 +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="search",le="65536"} 2 +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="search",le="131072"} 2 +milvus_querynode_sq_core_latency_bucket{node_id="37",query_type="search",le="+Inf"} 2 +milvus_querynode_sq_core_latency_sum{node_id="37",query_type="search"} 3 +milvus_querynode_sq_core_latency_count{node_id="37",query_type="search"} 2 +# HELP milvus_querynode_sq_queue_latency latency of search or query in queue +# TYPE milvus_querynode_sq_queue_latency histogram +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="query",rg="__default_resource_group",le="1"} 5 +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="query",rg="__default_resource_group",le="2"} 5 +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="query",rg="__default_resource_group",le="4"} 5 +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="query",rg="__default_resource_group",le="8"} 5 +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="query",rg="__default_resource_group",le="16"} 5 +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="query",rg="__default_resource_group",le="32"} 5 +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="query",rg="__default_resource_group",le="64"} 5 +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="query",rg="__default_resource_group",le="128"} 5 +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="query",rg="__default_resource_group",le="256"} 5 +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="query",rg="__default_resource_group",le="512"} 5 +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="query",rg="__default_resource_group",le="1024"} 5 +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="query",rg="__default_resource_group",le="2048"} 5 +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="query",rg="__default_resource_group",le="4096"} 5 +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="query",rg="__default_resource_group",le="8192"} 5 +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="query",rg="__default_resource_group",le="16384"} 5 +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="query",rg="__default_resource_group",le="32768"} 5 +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="query",rg="__default_resource_group",le="65536"} 5 +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="query",rg="__default_resource_group",le="131072"} 5 +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="query",rg="__default_resource_group",le="+Inf"} 5 +milvus_querynode_sq_queue_latency_sum{db_name="default",node_id="37",query_type="query",rg="__default_resource_group"} 0.39716599999999996 +milvus_querynode_sq_queue_latency_count{db_name="default",node_id="37",query_type="query",rg="__default_resource_group"} 5 +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="search",rg="__default_resource_group",le="1"} 2 +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="search",rg="__default_resource_group",le="2"} 2 +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="search",rg="__default_resource_group",le="4"} 2 +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="search",rg="__default_resource_group",le="8"} 2 +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="search",rg="__default_resource_group",le="16"} 2 +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="search",rg="__default_resource_group",le="32"} 2 +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="search",rg="__default_resource_group",le="64"} 2 +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="search",rg="__default_resource_group",le="128"} 2 +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="search",rg="__default_resource_group",le="256"} 2 +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="search",rg="__default_resource_group",le="512"} 2 +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="search",rg="__default_resource_group",le="1024"} 2 +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="search",rg="__default_resource_group",le="2048"} 2 +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="search",rg="__default_resource_group",le="4096"} 2 +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="search",rg="__default_resource_group",le="8192"} 2 +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="search",rg="__default_resource_group",le="16384"} 2 +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="search",rg="__default_resource_group",le="32768"} 2 +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="search",rg="__default_resource_group",le="65536"} 2 +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="search",rg="__default_resource_group",le="131072"} 2 +milvus_querynode_sq_queue_latency_bucket{db_name="default",node_id="37",query_type="search",rg="__default_resource_group",le="+Inf"} 2 +milvus_querynode_sq_queue_latency_sum{db_name="default",node_id="37",query_type="search",rg="__default_resource_group"} 0.12625 +milvus_querynode_sq_queue_latency_count{db_name="default",node_id="37",query_type="search",rg="__default_resource_group"} 2 +# HELP milvus_querynode_sq_queue_user_latency latency per user of search or query in queue +# TYPE milvus_querynode_sq_queue_user_latency histogram +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="query",username="",le="1"} 5 +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="query",username="",le="2"} 5 +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="query",username="",le="4"} 5 +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="query",username="",le="8"} 5 +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="query",username="",le="16"} 5 +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="query",username="",le="32"} 5 +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="query",username="",le="64"} 5 +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="query",username="",le="128"} 5 +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="query",username="",le="256"} 5 +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="query",username="",le="512"} 5 +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="query",username="",le="1024"} 5 +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="query",username="",le="2048"} 5 +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="query",username="",le="4096"} 5 +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="query",username="",le="8192"} 5 +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="query",username="",le="16384"} 5 +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="query",username="",le="32768"} 5 +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="query",username="",le="65536"} 5 +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="query",username="",le="131072"} 5 +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="query",username="",le="+Inf"} 5 +milvus_querynode_sq_queue_user_latency_sum{node_id="37",query_type="query",username=""} 0.39716599999999996 +milvus_querynode_sq_queue_user_latency_count{node_id="37",query_type="query",username=""} 5 +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="search",username="",le="1"} 2 +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="search",username="",le="2"} 2 +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="search",username="",le="4"} 2 +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="search",username="",le="8"} 2 +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="search",username="",le="16"} 2 +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="search",username="",le="32"} 2 +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="search",username="",le="64"} 2 +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="search",username="",le="128"} 2 +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="search",username="",le="256"} 2 +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="search",username="",le="512"} 2 +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="search",username="",le="1024"} 2 +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="search",username="",le="2048"} 2 +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="search",username="",le="4096"} 2 +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="search",username="",le="8192"} 2 +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="search",username="",le="16384"} 2 +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="search",username="",le="32768"} 2 +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="search",username="",le="65536"} 2 +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="search",username="",le="131072"} 2 +milvus_querynode_sq_queue_user_latency_bucket{node_id="37",query_type="search",username="",le="+Inf"} 2 +milvus_querynode_sq_queue_user_latency_sum{node_id="37",query_type="search",username=""} 0.12625 +milvus_querynode_sq_queue_user_latency_count{node_id="37",query_type="search",username=""} 2 +# HELP milvus_querynode_sq_reduce_latency latency of reduce search or query result +# TYPE milvus_querynode_sq_reduce_latency histogram +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="segments",reduce_type="batch_reduce",le="1"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="segments",reduce_type="batch_reduce",le="2"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="segments",reduce_type="batch_reduce",le="4"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="segments",reduce_type="batch_reduce",le="8"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="segments",reduce_type="batch_reduce",le="16"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="segments",reduce_type="batch_reduce",le="32"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="segments",reduce_type="batch_reduce",le="64"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="segments",reduce_type="batch_reduce",le="128"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="segments",reduce_type="batch_reduce",le="256"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="segments",reduce_type="batch_reduce",le="512"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="segments",reduce_type="batch_reduce",le="1024"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="segments",reduce_type="batch_reduce",le="2048"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="segments",reduce_type="batch_reduce",le="4096"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="segments",reduce_type="batch_reduce",le="8192"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="segments",reduce_type="batch_reduce",le="16384"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="segments",reduce_type="batch_reduce",le="32768"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="segments",reduce_type="batch_reduce",le="65536"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="segments",reduce_type="batch_reduce",le="131072"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="segments",reduce_type="batch_reduce",le="+Inf"} 5 +milvus_querynode_sq_reduce_latency_sum{node_id="37",query_type="query",reduce_level="segments",reduce_type="batch_reduce"} 1 +milvus_querynode_sq_reduce_latency_count{node_id="37",query_type="query",reduce_level="segments",reduce_type="batch_reduce"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="shards",reduce_type="batch_reduce",le="1"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="shards",reduce_type="batch_reduce",le="2"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="shards",reduce_type="batch_reduce",le="4"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="shards",reduce_type="batch_reduce",le="8"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="shards",reduce_type="batch_reduce",le="16"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="shards",reduce_type="batch_reduce",le="32"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="shards",reduce_type="batch_reduce",le="64"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="shards",reduce_type="batch_reduce",le="128"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="shards",reduce_type="batch_reduce",le="256"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="shards",reduce_type="batch_reduce",le="512"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="shards",reduce_type="batch_reduce",le="1024"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="shards",reduce_type="batch_reduce",le="2048"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="shards",reduce_type="batch_reduce",le="4096"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="shards",reduce_type="batch_reduce",le="8192"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="shards",reduce_type="batch_reduce",le="16384"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="shards",reduce_type="batch_reduce",le="32768"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="shards",reduce_type="batch_reduce",le="65536"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="shards",reduce_type="batch_reduce",le="131072"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="query",reduce_level="shards",reduce_type="batch_reduce",le="+Inf"} 5 +milvus_querynode_sq_reduce_latency_sum{node_id="37",query_type="query",reduce_level="shards",reduce_type="batch_reduce"} 0 +milvus_querynode_sq_reduce_latency_count{node_id="37",query_type="query",reduce_level="shards",reduce_type="batch_reduce"} 5 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="segments",reduce_type="batch_reduce",le="1"} 2 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="segments",reduce_type="batch_reduce",le="2"} 2 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="segments",reduce_type="batch_reduce",le="4"} 2 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="segments",reduce_type="batch_reduce",le="8"} 2 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="segments",reduce_type="batch_reduce",le="16"} 2 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="segments",reduce_type="batch_reduce",le="32"} 2 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="segments",reduce_type="batch_reduce",le="64"} 2 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="segments",reduce_type="batch_reduce",le="128"} 2 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="segments",reduce_type="batch_reduce",le="256"} 2 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="segments",reduce_type="batch_reduce",le="512"} 2 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="segments",reduce_type="batch_reduce",le="1024"} 2 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="segments",reduce_type="batch_reduce",le="2048"} 2 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="segments",reduce_type="batch_reduce",le="4096"} 2 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="segments",reduce_type="batch_reduce",le="8192"} 2 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="segments",reduce_type="batch_reduce",le="16384"} 2 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="segments",reduce_type="batch_reduce",le="32768"} 2 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="segments",reduce_type="batch_reduce",le="65536"} 2 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="segments",reduce_type="batch_reduce",le="131072"} 2 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="segments",reduce_type="batch_reduce",le="+Inf"} 2 +milvus_querynode_sq_reduce_latency_sum{node_id="37",query_type="search",reduce_level="segments",reduce_type="batch_reduce"} 0 +milvus_querynode_sq_reduce_latency_count{node_id="37",query_type="search",reduce_level="segments",reduce_type="batch_reduce"} 2 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="shards",reduce_type="batch_reduce",le="1"} 2 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="shards",reduce_type="batch_reduce",le="2"} 2 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="shards",reduce_type="batch_reduce",le="4"} 2 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="shards",reduce_type="batch_reduce",le="8"} 2 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="shards",reduce_type="batch_reduce",le="16"} 2 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="shards",reduce_type="batch_reduce",le="32"} 2 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="shards",reduce_type="batch_reduce",le="64"} 2 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="shards",reduce_type="batch_reduce",le="128"} 2 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="shards",reduce_type="batch_reduce",le="256"} 2 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="shards",reduce_type="batch_reduce",le="512"} 2 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="shards",reduce_type="batch_reduce",le="1024"} 2 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="shards",reduce_type="batch_reduce",le="2048"} 2 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="shards",reduce_type="batch_reduce",le="4096"} 2 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="shards",reduce_type="batch_reduce",le="8192"} 2 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="shards",reduce_type="batch_reduce",le="16384"} 2 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="shards",reduce_type="batch_reduce",le="32768"} 2 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="shards",reduce_type="batch_reduce",le="65536"} 2 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="shards",reduce_type="batch_reduce",le="131072"} 2 +milvus_querynode_sq_reduce_latency_bucket{node_id="37",query_type="search",reduce_level="shards",reduce_type="batch_reduce",le="+Inf"} 2 +milvus_querynode_sq_reduce_latency_sum{node_id="37",query_type="search",reduce_level="shards",reduce_type="batch_reduce"} 0 +milvus_querynode_sq_reduce_latency_count{node_id="37",query_type="search",reduce_level="shards",reduce_type="batch_reduce"} 2 +# HELP milvus_querynode_sq_req_latency latency of Search or query requests +# TYPE milvus_querynode_sq_req_latency histogram +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="FromLeader",le="1"} 2 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="FromLeader",le="2"} 4 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="FromLeader",le="4"} 4 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="FromLeader",le="8"} 5 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="FromLeader",le="16"} 5 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="FromLeader",le="32"} 5 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="FromLeader",le="64"} 5 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="FromLeader",le="128"} 5 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="FromLeader",le="256"} 5 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="FromLeader",le="512"} 5 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="FromLeader",le="1024"} 5 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="FromLeader",le="2048"} 5 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="FromLeader",le="4096"} 5 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="FromLeader",le="8192"} 5 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="FromLeader",le="16384"} 5 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="FromLeader",le="32768"} 5 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="FromLeader",le="65536"} 5 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="FromLeader",le="131072"} 5 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="FromLeader",le="+Inf"} 5 +milvus_querynode_sq_req_latency_sum{node_id="37",query_type="query",scope="FromLeader"} 14 +milvus_querynode_sq_req_latency_count{node_id="37",query_type="query",scope="FromLeader"} 5 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="OnLeader",le="1"} 0 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="OnLeader",le="2"} 0 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="OnLeader",le="4"} 0 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="OnLeader",le="8"} 0 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="OnLeader",le="16"} 0 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="OnLeader",le="32"} 0 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="OnLeader",le="64"} 0 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="OnLeader",le="128"} 0 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="OnLeader",le="256"} 2 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="OnLeader",le="512"} 5 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="OnLeader",le="1024"} 5 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="OnLeader",le="2048"} 5 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="OnLeader",le="4096"} 5 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="OnLeader",le="8192"} 5 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="OnLeader",le="16384"} 5 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="OnLeader",le="32768"} 5 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="OnLeader",le="65536"} 5 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="OnLeader",le="131072"} 5 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="query",scope="OnLeader",le="+Inf"} 5 +milvus_querynode_sq_req_latency_sum{node_id="37",query_type="query",scope="OnLeader"} 1469 +milvus_querynode_sq_req_latency_count{node_id="37",query_type="query",scope="OnLeader"} 5 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="FromLeader",le="1"} 0 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="FromLeader",le="2"} 1 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="FromLeader",le="4"} 2 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="FromLeader",le="8"} 2 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="FromLeader",le="16"} 2 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="FromLeader",le="32"} 2 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="FromLeader",le="64"} 2 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="FromLeader",le="128"} 2 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="FromLeader",le="256"} 2 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="FromLeader",le="512"} 2 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="FromLeader",le="1024"} 2 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="FromLeader",le="2048"} 2 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="FromLeader",le="4096"} 2 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="FromLeader",le="8192"} 2 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="FromLeader",le="16384"} 2 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="FromLeader",le="32768"} 2 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="FromLeader",le="65536"} 2 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="FromLeader",le="131072"} 2 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="FromLeader",le="+Inf"} 2 +milvus_querynode_sq_req_latency_sum{node_id="37",query_type="search",scope="FromLeader"} 6 +milvus_querynode_sq_req_latency_count{node_id="37",query_type="search",scope="FromLeader"} 2 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="OnLeader",le="1"} 0 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="OnLeader",le="2"} 0 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="OnLeader",le="4"} 1 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="OnLeader",le="8"} 1 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="OnLeader",le="16"} 1 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="OnLeader",le="32"} 1 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="OnLeader",le="64"} 1 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="OnLeader",le="128"} 1 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="OnLeader",le="256"} 1 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="OnLeader",le="512"} 2 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="OnLeader",le="1024"} 2 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="OnLeader",le="2048"} 2 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="OnLeader",le="4096"} 2 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="OnLeader",le="8192"} 2 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="OnLeader",le="16384"} 2 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="OnLeader",le="32768"} 2 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="OnLeader",le="65536"} 2 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="OnLeader",le="131072"} 2 +milvus_querynode_sq_req_latency_bucket{node_id="37",query_type="search",scope="OnLeader",le="+Inf"} 2 +milvus_querynode_sq_req_latency_sum{node_id="37",query_type="search",scope="OnLeader"} 405 +milvus_querynode_sq_req_latency_count{node_id="37",query_type="search",scope="OnLeader"} 2 +# HELP milvus_querynode_sq_segment_latency latency of search or query per segment +# TYPE milvus_querynode_sq_segment_latency histogram +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="query",segment_state="Sealed",le="1"} 3 +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="query",segment_state="Sealed",le="2"} 4 +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="query",segment_state="Sealed",le="4"} 4 +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="query",segment_state="Sealed",le="8"} 5 +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="query",segment_state="Sealed",le="16"} 5 +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="query",segment_state="Sealed",le="32"} 5 +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="query",segment_state="Sealed",le="64"} 5 +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="query",segment_state="Sealed",le="128"} 5 +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="query",segment_state="Sealed",le="256"} 5 +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="query",segment_state="Sealed",le="512"} 5 +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="query",segment_state="Sealed",le="1024"} 5 +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="query",segment_state="Sealed",le="2048"} 5 +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="query",segment_state="Sealed",le="4096"} 5 +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="query",segment_state="Sealed",le="8192"} 5 +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="query",segment_state="Sealed",le="16384"} 5 +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="query",segment_state="Sealed",le="32768"} 5 +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="query",segment_state="Sealed",le="65536"} 5 +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="query",segment_state="Sealed",le="131072"} 5 +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="query",segment_state="Sealed",le="+Inf"} 5 +milvus_querynode_sq_segment_latency_sum{node_id="37",query_type="query",segment_state="Sealed"} 10 +milvus_querynode_sq_segment_latency_count{node_id="37",query_type="query",segment_state="Sealed"} 5 +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="search",segment_state="Sealed",le="1"} 1 +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="search",segment_state="Sealed",le="2"} 2 +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="search",segment_state="Sealed",le="4"} 2 +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="search",segment_state="Sealed",le="8"} 2 +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="search",segment_state="Sealed",le="16"} 2 +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="search",segment_state="Sealed",le="32"} 2 +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="search",segment_state="Sealed",le="64"} 2 +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="search",segment_state="Sealed",le="128"} 2 +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="search",segment_state="Sealed",le="256"} 2 +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="search",segment_state="Sealed",le="512"} 2 +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="search",segment_state="Sealed",le="1024"} 2 +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="search",segment_state="Sealed",le="2048"} 2 +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="search",segment_state="Sealed",le="4096"} 2 +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="search",segment_state="Sealed",le="8192"} 2 +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="search",segment_state="Sealed",le="16384"} 2 +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="search",segment_state="Sealed",le="32768"} 2 +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="search",segment_state="Sealed",le="65536"} 2 +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="search",segment_state="Sealed",le="131072"} 2 +milvus_querynode_sq_segment_latency_bucket{node_id="37",query_type="search",segment_state="Sealed",le="+Inf"} 2 +milvus_querynode_sq_segment_latency_sum{node_id="37",query_type="search",segment_state="Sealed"} 3 +milvus_querynode_sq_segment_latency_count{node_id="37",query_type="search",segment_state="Sealed"} 2 +# HELP milvus_querynode_sq_wait_tsafe_latency latency of search or query to wait for tsafe +# TYPE milvus_querynode_sq_wait_tsafe_latency histogram +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="query",le="1"} 0 +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="query",le="2"} 0 +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="query",le="4"} 0 +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="query",le="8"} 0 +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="query",le="16"} 0 +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="query",le="32"} 0 +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="query",le="64"} 0 +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="query",le="128"} 0 +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="query",le="256"} 2 +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="query",le="512"} 5 +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="query",le="1024"} 5 +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="query",le="2048"} 5 +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="query",le="4096"} 5 +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="query",le="8192"} 5 +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="query",le="16384"} 5 +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="query",le="32768"} 5 +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="query",le="65536"} 5 +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="query",le="131072"} 5 +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="query",le="+Inf"} 5 +milvus_querynode_sq_wait_tsafe_latency_sum{node_id="37",query_type="query"} 1449 +milvus_querynode_sq_wait_tsafe_latency_count{node_id="37",query_type="query"} 5 +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="search",le="1"} 1 +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="search",le="2"} 1 +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="search",le="4"} 1 +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="search",le="8"} 1 +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="search",le="16"} 1 +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="search",le="32"} 1 +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="search",le="64"} 1 +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="search",le="128"} 1 +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="search",le="256"} 1 +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="search",le="512"} 2 +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="search",le="1024"} 2 +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="search",le="2048"} 2 +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="search",le="4096"} 2 +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="search",le="8192"} 2 +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="search",le="16384"} 2 +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="search",le="32768"} 2 +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="search",le="65536"} 2 +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="search",le="131072"} 2 +milvus_querynode_sq_wait_tsafe_latency_bucket{node_id="37",query_type="search",le="+Inf"} 2 +milvus_querynode_sq_wait_tsafe_latency_sum{node_id="37",query_type="search"} 398 +milvus_querynode_sq_wait_tsafe_latency_count{node_id="37",query_type="search"} 2 +# HELP milvus_querynode_wait_processing_msg_count count of wait processing msg +# TYPE milvus_querynode_wait_processing_msg_count gauge +milvus_querynode_wait_processing_msg_count{msg_type="delete",node_id="37"} 0 +milvus_querynode_wait_processing_msg_count{msg_type="insert",node_id="37"} 0 +# HELP milvus_querynode_watch_dml_channel_latency latency of watch dml channel +# TYPE milvus_querynode_watch_dml_channel_latency histogram +milvus_querynode_watch_dml_channel_latency_bucket{node_id="37",le="1"} 1 +milvus_querynode_watch_dml_channel_latency_bucket{node_id="37",le="2"} 1 +milvus_querynode_watch_dml_channel_latency_bucket{node_id="37",le="4"} 1 +milvus_querynode_watch_dml_channel_latency_bucket{node_id="37",le="8"} 1 +milvus_querynode_watch_dml_channel_latency_bucket{node_id="37",le="16"} 1 +milvus_querynode_watch_dml_channel_latency_bucket{node_id="37",le="32"} 1 +milvus_querynode_watch_dml_channel_latency_bucket{node_id="37",le="64"} 1 +milvus_querynode_watch_dml_channel_latency_bucket{node_id="37",le="128"} 1 +milvus_querynode_watch_dml_channel_latency_bucket{node_id="37",le="256"} 1 +milvus_querynode_watch_dml_channel_latency_bucket{node_id="37",le="512"} 1 +milvus_querynode_watch_dml_channel_latency_bucket{node_id="37",le="1024"} 1 +milvus_querynode_watch_dml_channel_latency_bucket{node_id="37",le="2048"} 1 +milvus_querynode_watch_dml_channel_latency_bucket{node_id="37",le="4096"} 1 +milvus_querynode_watch_dml_channel_latency_bucket{node_id="37",le="8192"} 1 +milvus_querynode_watch_dml_channel_latency_bucket{node_id="37",le="16384"} 1 +milvus_querynode_watch_dml_channel_latency_bucket{node_id="37",le="32768"} 1 +milvus_querynode_watch_dml_channel_latency_bucket{node_id="37",le="65536"} 1 +milvus_querynode_watch_dml_channel_latency_bucket{node_id="37",le="131072"} 1 +milvus_querynode_watch_dml_channel_latency_bucket{node_id="37",le="+Inf"} 1 +milvus_querynode_watch_dml_channel_latency_sum{node_id="37"} 0 +milvus_querynode_watch_dml_channel_latency_count{node_id="37"} 1 +# HELP milvus_rootcoord_collection_num number of collections +# TYPE milvus_rootcoord_collection_num gauge +milvus_rootcoord_collection_num{db_name="default"} 0 +# HELP milvus_rootcoord_credential_num number of credentials +# TYPE milvus_rootcoord_credential_num gauge +milvus_rootcoord_credential_num 0 +# HELP milvus_rootcoord_ddl_req_count count of DDL operations +# TYPE milvus_rootcoord_ddl_req_count counter +milvus_rootcoord_ddl_req_count{function_name="CreateCollection",status="success"} 1 +milvus_rootcoord_ddl_req_count{function_name="CreateCollection",status="total"} 1 +milvus_rootcoord_ddl_req_count{function_name="DescribeCollection",status="fail"} 3 +milvus_rootcoord_ddl_req_count{function_name="DescribeCollection",status="success"} 12 +milvus_rootcoord_ddl_req_count{function_name="DescribeCollection",status="total"} 15 +milvus_rootcoord_ddl_req_count{function_name="DescribeDatabase",status="success"} 3 +milvus_rootcoord_ddl_req_count{function_name="DescribeDatabase",status="total"} 3 +milvus_rootcoord_ddl_req_count{function_name="DropCollection",status="success"} 1 +milvus_rootcoord_ddl_req_count{function_name="DropCollection",status="total"} 1 +milvus_rootcoord_ddl_req_count{function_name="ListDatabases",status="success"} 2 +milvus_rootcoord_ddl_req_count{function_name="ListDatabases",status="total"} 2 +milvus_rootcoord_ddl_req_count{function_name="PolicyList",status="success"} 1 +milvus_rootcoord_ddl_req_count{function_name="PolicyList",status="total"} 1 +milvus_rootcoord_ddl_req_count{function_name="ShowCollections",status="success"} 1 +milvus_rootcoord_ddl_req_count{function_name="ShowCollections",status="total"} 1 +milvus_rootcoord_ddl_req_count{function_name="ShowPartitions",status="success"} 4 +milvus_rootcoord_ddl_req_count{function_name="ShowPartitions",status="total"} 4 +# HELP milvus_rootcoord_ddl_req_latency latency of each DDL operations +# TYPE milvus_rootcoord_ddl_req_latency histogram +milvus_rootcoord_ddl_req_latency_bucket{function_name="CreateCollection",le="1"} 0 +milvus_rootcoord_ddl_req_latency_bucket{function_name="CreateCollection",le="2"} 0 +milvus_rootcoord_ddl_req_latency_bucket{function_name="CreateCollection",le="4"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="CreateCollection",le="8"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="CreateCollection",le="16"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="CreateCollection",le="32"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="CreateCollection",le="64"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="CreateCollection",le="128"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="CreateCollection",le="256"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="CreateCollection",le="512"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="CreateCollection",le="1024"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="CreateCollection",le="2048"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="CreateCollection",le="4096"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="CreateCollection",le="8192"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="CreateCollection",le="16384"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="CreateCollection",le="32768"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="CreateCollection",le="65536"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="CreateCollection",le="131072"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="CreateCollection",le="+Inf"} 1 +milvus_rootcoord_ddl_req_latency_sum{function_name="CreateCollection"} 4 +milvus_rootcoord_ddl_req_latency_count{function_name="CreateCollection"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeCollection",le="1"} 12 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeCollection",le="2"} 12 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeCollection",le="4"} 12 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeCollection",le="8"} 12 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeCollection",le="16"} 12 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeCollection",le="32"} 12 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeCollection",le="64"} 12 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeCollection",le="128"} 12 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeCollection",le="256"} 12 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeCollection",le="512"} 12 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeCollection",le="1024"} 12 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeCollection",le="2048"} 12 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeCollection",le="4096"} 12 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeCollection",le="8192"} 12 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeCollection",le="16384"} 12 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeCollection",le="32768"} 12 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeCollection",le="65536"} 12 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeCollection",le="131072"} 12 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeCollection",le="+Inf"} 12 +milvus_rootcoord_ddl_req_latency_sum{function_name="DescribeCollection"} 0 +milvus_rootcoord_ddl_req_latency_count{function_name="DescribeCollection"} 12 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeDatabase",le="1"} 3 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeDatabase",le="2"} 3 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeDatabase",le="4"} 3 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeDatabase",le="8"} 3 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeDatabase",le="16"} 3 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeDatabase",le="32"} 3 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeDatabase",le="64"} 3 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeDatabase",le="128"} 3 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeDatabase",le="256"} 3 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeDatabase",le="512"} 3 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeDatabase",le="1024"} 3 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeDatabase",le="2048"} 3 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeDatabase",le="4096"} 3 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeDatabase",le="8192"} 3 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeDatabase",le="16384"} 3 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeDatabase",le="32768"} 3 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeDatabase",le="65536"} 3 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeDatabase",le="131072"} 3 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DescribeDatabase",le="+Inf"} 3 +milvus_rootcoord_ddl_req_latency_sum{function_name="DescribeDatabase"} 0 +milvus_rootcoord_ddl_req_latency_count{function_name="DescribeDatabase"} 3 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DropCollection",le="1"} 0 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DropCollection",le="2"} 0 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DropCollection",le="4"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DropCollection",le="8"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DropCollection",le="16"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DropCollection",le="32"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DropCollection",le="64"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DropCollection",le="128"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DropCollection",le="256"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DropCollection",le="512"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DropCollection",le="1024"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DropCollection",le="2048"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DropCollection",le="4096"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DropCollection",le="8192"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DropCollection",le="16384"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DropCollection",le="32768"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DropCollection",le="65536"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DropCollection",le="131072"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="DropCollection",le="+Inf"} 1 +milvus_rootcoord_ddl_req_latency_sum{function_name="DropCollection"} 4 +milvus_rootcoord_ddl_req_latency_count{function_name="DropCollection"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ListDatabases",le="1"} 2 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ListDatabases",le="2"} 2 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ListDatabases",le="4"} 2 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ListDatabases",le="8"} 2 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ListDatabases",le="16"} 2 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ListDatabases",le="32"} 2 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ListDatabases",le="64"} 2 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ListDatabases",le="128"} 2 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ListDatabases",le="256"} 2 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ListDatabases",le="512"} 2 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ListDatabases",le="1024"} 2 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ListDatabases",le="2048"} 2 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ListDatabases",le="4096"} 2 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ListDatabases",le="8192"} 2 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ListDatabases",le="16384"} 2 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ListDatabases",le="32768"} 2 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ListDatabases",le="65536"} 2 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ListDatabases",le="131072"} 2 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ListDatabases",le="+Inf"} 2 +milvus_rootcoord_ddl_req_latency_sum{function_name="ListDatabases"} 0 +milvus_rootcoord_ddl_req_latency_count{function_name="ListDatabases"} 2 +milvus_rootcoord_ddl_req_latency_bucket{function_name="PolicyList",le="1"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="PolicyList",le="2"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="PolicyList",le="4"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="PolicyList",le="8"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="PolicyList",le="16"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="PolicyList",le="32"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="PolicyList",le="64"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="PolicyList",le="128"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="PolicyList",le="256"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="PolicyList",le="512"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="PolicyList",le="1024"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="PolicyList",le="2048"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="PolicyList",le="4096"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="PolicyList",le="8192"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="PolicyList",le="16384"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="PolicyList",le="32768"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="PolicyList",le="65536"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="PolicyList",le="131072"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="PolicyList",le="+Inf"} 1 +milvus_rootcoord_ddl_req_latency_sum{function_name="PolicyList"} 0 +milvus_rootcoord_ddl_req_latency_count{function_name="PolicyList"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowCollections",le="1"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowCollections",le="2"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowCollections",le="4"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowCollections",le="8"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowCollections",le="16"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowCollections",le="32"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowCollections",le="64"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowCollections",le="128"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowCollections",le="256"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowCollections",le="512"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowCollections",le="1024"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowCollections",le="2048"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowCollections",le="4096"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowCollections",le="8192"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowCollections",le="16384"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowCollections",le="32768"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowCollections",le="65536"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowCollections",le="131072"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowCollections",le="+Inf"} 1 +milvus_rootcoord_ddl_req_latency_sum{function_name="ShowCollections"} 0 +milvus_rootcoord_ddl_req_latency_count{function_name="ShowCollections"} 1 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowPartitions",le="1"} 4 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowPartitions",le="2"} 4 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowPartitions",le="4"} 4 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowPartitions",le="8"} 4 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowPartitions",le="16"} 4 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowPartitions",le="32"} 4 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowPartitions",le="64"} 4 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowPartitions",le="128"} 4 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowPartitions",le="256"} 4 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowPartitions",le="512"} 4 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowPartitions",le="1024"} 4 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowPartitions",le="2048"} 4 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowPartitions",le="4096"} 4 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowPartitions",le="8192"} 4 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowPartitions",le="16384"} 4 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowPartitions",le="32768"} 4 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowPartitions",le="65536"} 4 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowPartitions",le="131072"} 4 +milvus_rootcoord_ddl_req_latency_bucket{function_name="ShowPartitions",le="+Inf"} 4 +milvus_rootcoord_ddl_req_latency_sum{function_name="ShowPartitions"} 0 +milvus_rootcoord_ddl_req_latency_count{function_name="ShowPartitions"} 4 +# HELP milvus_rootcoord_ddl_req_latency_in_queue latency of each DDL operations in queue +# TYPE milvus_rootcoord_ddl_req_latency_in_queue histogram +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="CreateCollection",le="0.005"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="CreateCollection",le="0.01"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="CreateCollection",le="0.025"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="CreateCollection",le="0.05"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="CreateCollection",le="0.1"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="CreateCollection",le="0.25"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="CreateCollection",le="0.5"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="CreateCollection",le="1"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="CreateCollection",le="2.5"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="CreateCollection",le="5"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="CreateCollection",le="10"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="CreateCollection",le="+Inf"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_sum{function_name="CreateCollection"} 0 +milvus_rootcoord_ddl_req_latency_in_queue_count{function_name="CreateCollection"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="DescribeCollection",le="0.005"} 12 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="DescribeCollection",le="0.01"} 12 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="DescribeCollection",le="0.025"} 12 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="DescribeCollection",le="0.05"} 12 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="DescribeCollection",le="0.1"} 12 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="DescribeCollection",le="0.25"} 12 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="DescribeCollection",le="0.5"} 12 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="DescribeCollection",le="1"} 12 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="DescribeCollection",le="2.5"} 12 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="DescribeCollection",le="5"} 12 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="DescribeCollection",le="10"} 12 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="DescribeCollection",le="+Inf"} 12 +milvus_rootcoord_ddl_req_latency_in_queue_sum{function_name="DescribeCollection"} 0 +milvus_rootcoord_ddl_req_latency_in_queue_count{function_name="DescribeCollection"} 12 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="DropCollection",le="0.005"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="DropCollection",le="0.01"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="DropCollection",le="0.025"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="DropCollection",le="0.05"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="DropCollection",le="0.1"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="DropCollection",le="0.25"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="DropCollection",le="0.5"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="DropCollection",le="1"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="DropCollection",le="2.5"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="DropCollection",le="5"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="DropCollection",le="10"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="DropCollection",le="+Inf"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_sum{function_name="DropCollection"} 0 +milvus_rootcoord_ddl_req_latency_in_queue_count{function_name="DropCollection"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="ShowCollections",le="0.005"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="ShowCollections",le="0.01"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="ShowCollections",le="0.025"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="ShowCollections",le="0.05"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="ShowCollections",le="0.1"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="ShowCollections",le="0.25"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="ShowCollections",le="0.5"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="ShowCollections",le="1"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="ShowCollections",le="2.5"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="ShowCollections",le="5"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="ShowCollections",le="10"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="ShowCollections",le="+Inf"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_sum{function_name="ShowCollections"} 0 +milvus_rootcoord_ddl_req_latency_in_queue_count{function_name="ShowCollections"} 1 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="ShowPartitions",le="0.005"} 4 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="ShowPartitions",le="0.01"} 4 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="ShowPartitions",le="0.025"} 4 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="ShowPartitions",le="0.05"} 4 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="ShowPartitions",le="0.1"} 4 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="ShowPartitions",le="0.25"} 4 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="ShowPartitions",le="0.5"} 4 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="ShowPartitions",le="1"} 4 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="ShowPartitions",le="2.5"} 4 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="ShowPartitions",le="5"} 4 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="ShowPartitions",le="10"} 4 +milvus_rootcoord_ddl_req_latency_in_queue_bucket{function_name="ShowPartitions",le="+Inf"} 4 +milvus_rootcoord_ddl_req_latency_in_queue_sum{function_name="ShowPartitions"} 0 +milvus_rootcoord_ddl_req_latency_in_queue_count{function_name="ShowPartitions"} 4 +# HELP milvus_rootcoord_dml_channel_num number of DML channels +# TYPE milvus_rootcoord_dml_channel_num gauge +milvus_rootcoord_dml_channel_num 16 +# HELP milvus_rootcoord_entity_num number of entities, clustered by collection and their status(loaded/total) +# TYPE milvus_rootcoord_entity_num gauge +milvus_rootcoord_entity_num{collection_name="hello_milvus",status="loaded"} 3001 +milvus_rootcoord_entity_num{collection_name="hello_milvus",status="total"} 3001 +# HELP milvus_rootcoord_force_deny_writing_counter The number of times milvus turns into force-deny-writing states +# TYPE milvus_rootcoord_force_deny_writing_counter counter +milvus_rootcoord_force_deny_writing_counter 0 +# HELP milvus_rootcoord_id_alloc_count count of ID allocated +# TYPE milvus_rootcoord_id_alloc_count counter +milvus_rootcoord_id_alloc_count 400508 +# HELP milvus_rootcoord_indexed_entity_num indexed number of entities, clustered by collection, index name and whether it's a vector index +# TYPE milvus_rootcoord_indexed_entity_num gauge +milvus_rootcoord_indexed_entity_num{collection_name="hello_milvus",index_name="embeddings",is_vector_index="true"} 3001 +# HELP milvus_rootcoord_msgstream_obj_num number of message streams +# TYPE milvus_rootcoord_msgstream_obj_num gauge +milvus_rootcoord_msgstream_obj_num 16 +# HELP milvus_rootcoord_num_of_roles The number of roles +# TYPE milvus_rootcoord_num_of_roles gauge +milvus_rootcoord_num_of_roles 0 +# HELP milvus_rootcoord_partition_num number of partitions +# TYPE milvus_rootcoord_partition_num gauge +milvus_rootcoord_partition_num 0 +# HELP milvus_rootcoord_produce_tt_lag_ms now time minus tt per physical channel +# TYPE milvus_rootcoord_produce_tt_lag_ms gauge +milvus_rootcoord_produce_tt_lag_ms{channel_name="by-dev-rootcoord-dml_0"} 96 +milvus_rootcoord_produce_tt_lag_ms{channel_name="by-dev-rootcoord-dml_1"} 97 +milvus_rootcoord_produce_tt_lag_ms{channel_name="by-dev-rootcoord-dml_10"} 137 +milvus_rootcoord_produce_tt_lag_ms{channel_name="by-dev-rootcoord-dml_2"} 87 +milvus_rootcoord_produce_tt_lag_ms{channel_name="by-dev-rootcoord-dml_3"} 94 +milvus_rootcoord_produce_tt_lag_ms{channel_name="by-dev-rootcoord-dml_4"} 96 +milvus_rootcoord_produce_tt_lag_ms{channel_name="by-dev-rootcoord-dml_5"} 96 +milvus_rootcoord_produce_tt_lag_ms{channel_name="by-dev-rootcoord-dml_6"} 95 +milvus_rootcoord_produce_tt_lag_ms{channel_name="by-dev-rootcoord-dml_7"} 97 +milvus_rootcoord_produce_tt_lag_ms{channel_name="by-dev-rootcoord-dml_8"} 97 +milvus_rootcoord_produce_tt_lag_ms{channel_name="by-dev-rootcoord-dml_9"} 97 +# HELP milvus_rootcoord_proxy_num number of proxy nodes managered by rootcoord +# TYPE milvus_rootcoord_proxy_num gauge +milvus_rootcoord_proxy_num 1 +# HELP milvus_rootcoord_qn_mem_high_water_level querynode memory high water level +# TYPE milvus_rootcoord_qn_mem_high_water_level gauge +milvus_rootcoord_qn_mem_high_water_level 0 +# HELP milvus_rootcoord_sync_timetick_latency latency of synchronizing timetick message +# TYPE milvus_rootcoord_sync_timetick_latency histogram +milvus_rootcoord_sync_timetick_latency_bucket{le="1"} 42 +milvus_rootcoord_sync_timetick_latency_bucket{le="2"} 65 +milvus_rootcoord_sync_timetick_latency_bucket{le="4"} 181 +milvus_rootcoord_sync_timetick_latency_bucket{le="8"} 310 +milvus_rootcoord_sync_timetick_latency_bucket{le="16"} 349 +milvus_rootcoord_sync_timetick_latency_bucket{le="32"} 353 +milvus_rootcoord_sync_timetick_latency_bucket{le="64"} 353 +milvus_rootcoord_sync_timetick_latency_bucket{le="128"} 353 +milvus_rootcoord_sync_timetick_latency_bucket{le="256"} 353 +milvus_rootcoord_sync_timetick_latency_bucket{le="512"} 353 +milvus_rootcoord_sync_timetick_latency_bucket{le="1024"} 353 +milvus_rootcoord_sync_timetick_latency_bucket{le="2048"} 353 +milvus_rootcoord_sync_timetick_latency_bucket{le="4096"} 353 +milvus_rootcoord_sync_timetick_latency_bucket{le="8192"} 353 +milvus_rootcoord_sync_timetick_latency_bucket{le="16384"} 353 +milvus_rootcoord_sync_timetick_latency_bucket{le="32768"} 353 +milvus_rootcoord_sync_timetick_latency_bucket{le="65536"} 353 +milvus_rootcoord_sync_timetick_latency_bucket{le="131072"} 353 +milvus_rootcoord_sync_timetick_latency_bucket{le="+Inf"} 353 +milvus_rootcoord_sync_timetick_latency_sum 1756 +milvus_rootcoord_sync_timetick_latency_count 353 +# HELP milvus_rootcoord_timestamp lateste timestamp allocated in memory +# TYPE milvus_rootcoord_timestamp gauge +milvus_rootcoord_timestamp 4.548092784887726e+17 +# HELP milvus_rootcoord_timestamp_saved timestamp saved in meta storage +# TYPE milvus_rootcoord_timestamp_saved gauge +milvus_rootcoord_timestamp_saved 1.734959711e+09 +# HELP milvus_runtime_info Runtime information of milvus +# TYPE milvus_runtime_info gauge +milvus_runtime_info{meta="etcd",mq="rocksmq"} 1 +# HELP milvus_thread_num the actual thread number of milvus process +# TYPE milvus_thread_num gauge +milvus_thread_num 73 +# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. +# TYPE process_cpu_seconds_total counter +process_cpu_seconds_total 8.06 +# HELP process_max_fds Maximum number of open file descriptors. +# TYPE process_max_fds gauge +process_max_fds 1.048576e+06 +# HELP process_open_fds Number of open file descriptors. +# TYPE process_open_fds gauge +process_open_fds 71 +# HELP process_resident_memory_bytes Resident memory size in bytes. +# TYPE process_resident_memory_bytes gauge +process_resident_memory_bytes 5.58592e+08 +# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE process_start_time_seconds gauge +process_start_time_seconds 1.73495960802e+09 +# HELP process_virtual_memory_bytes Virtual memory size in bytes. +# TYPE process_virtual_memory_bytes gauge +process_virtual_memory_bytes 8.522620928e+09 +# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes. +# TYPE process_virtual_memory_max_bytes gauge +process_virtual_memory_max_bytes 1.8446744073709552e+19 +# HELP ann_iterator_init_latency ann iterator init latency (ms) +# TYPE ann_iterator_init_latency histogram +ann_iterator_init_latency_bucket{module="cardinal",le="1"} 0 +ann_iterator_init_latency_bucket{module="cardinal",le="2"} 0 +ann_iterator_init_latency_bucket{module="cardinal",le="4"} 0 +ann_iterator_init_latency_bucket{module="cardinal",le="8"} 0 +ann_iterator_init_latency_bucket{module="cardinal",le="16"} 0 +ann_iterator_init_latency_bucket{module="cardinal",le="32"} 0 +ann_iterator_init_latency_bucket{module="cardinal",le="64"} 0 +ann_iterator_init_latency_bucket{module="cardinal",le="128"} 0 +ann_iterator_init_latency_bucket{module="cardinal",le="256"} 0 +ann_iterator_init_latency_bucket{module="cardinal",le="512"} 0 +ann_iterator_init_latency_bucket{module="cardinal",le="1024"} 0 +ann_iterator_init_latency_bucket{module="cardinal",le="2048"} 0 +ann_iterator_init_latency_bucket{module="cardinal",le="4096"} 0 +ann_iterator_init_latency_bucket{module="cardinal",le="8192"} 0 +ann_iterator_init_latency_bucket{module="cardinal",le="16384"} 0 +ann_iterator_init_latency_bucket{module="cardinal",le="32768"} 0 +ann_iterator_init_latency_bucket{module="cardinal",le="65536"} 0 +ann_iterator_init_latency_bucket{module="cardinal",le="1.048576e+06"} 0 +ann_iterator_init_latency_bucket{module="cardinal",le="+Inf"} 0 +ann_iterator_init_latency_sum{module="cardinal"} 0 +ann_iterator_init_latency_count{module="cardinal"} 0 +ann_iterator_init_latency_bucket{module="knowhere",le="1"} 0 +ann_iterator_init_latency_bucket{module="knowhere",le="2"} 0 +ann_iterator_init_latency_bucket{module="knowhere",le="4"} 0 +ann_iterator_init_latency_bucket{module="knowhere",le="8"} 0 +ann_iterator_init_latency_bucket{module="knowhere",le="16"} 0 +ann_iterator_init_latency_bucket{module="knowhere",le="32"} 0 +ann_iterator_init_latency_bucket{module="knowhere",le="64"} 0 +ann_iterator_init_latency_bucket{module="knowhere",le="128"} 0 +ann_iterator_init_latency_bucket{module="knowhere",le="256"} 0 +ann_iterator_init_latency_bucket{module="knowhere",le="512"} 0 +ann_iterator_init_latency_bucket{module="knowhere",le="1024"} 0 +ann_iterator_init_latency_bucket{module="knowhere",le="2048"} 0 +ann_iterator_init_latency_bucket{module="knowhere",le="4096"} 0 +ann_iterator_init_latency_bucket{module="knowhere",le="8192"} 0 +ann_iterator_init_latency_bucket{module="knowhere",le="16384"} 0 +ann_iterator_init_latency_bucket{module="knowhere",le="32768"} 0 +ann_iterator_init_latency_bucket{module="knowhere",le="65536"} 0 +ann_iterator_init_latency_bucket{module="knowhere",le="1.048576e+06"} 0 +ann_iterator_init_latency_bucket{module="knowhere",le="+Inf"} 0 +ann_iterator_init_latency_sum{module="knowhere"} 0 +ann_iterator_init_latency_count{module="knowhere"} 0 +# HELP bf_search_cnt number of bf search per request +# TYPE bf_search_cnt histogram +bf_search_cnt_bucket{module="cardinal",le="1"} 0 +bf_search_cnt_bucket{module="cardinal",le="2"} 0 +bf_search_cnt_bucket{module="cardinal",le="4"} 0 +bf_search_cnt_bucket{module="cardinal",le="8"} 0 +bf_search_cnt_bucket{module="cardinal",le="16"} 0 +bf_search_cnt_bucket{module="cardinal",le="32"} 0 +bf_search_cnt_bucket{module="cardinal",le="64"} 0 +bf_search_cnt_bucket{module="cardinal",le="128"} 0 +bf_search_cnt_bucket{module="cardinal",le="256"} 0 +bf_search_cnt_bucket{module="cardinal",le="512"} 0 +bf_search_cnt_bucket{module="cardinal",le="1024"} 0 +bf_search_cnt_bucket{module="cardinal",le="2048"} 0 +bf_search_cnt_bucket{module="cardinal",le="4096"} 0 +bf_search_cnt_bucket{module="cardinal",le="8192"} 0 +bf_search_cnt_bucket{module="cardinal",le="16384"} 0 +bf_search_cnt_bucket{module="cardinal",le="32768"} 0 +bf_search_cnt_bucket{module="cardinal",le="65536"} 0 +bf_search_cnt_bucket{module="cardinal",le="1.048576e+06"} 0 +bf_search_cnt_bucket{module="cardinal",le="+Inf"} 0 +bf_search_cnt_sum{module="cardinal"} 0 +bf_search_cnt_count{module="cardinal"} 0 +# HELP bitset_ratio bitset ratio +# TYPE bitset_ratio histogram +bitset_ratio_bucket{module="PROMETHEUS_LABEL_CARDINAL",le="0"} 0 +bitset_ratio_bucket{module="PROMETHEUS_LABEL_CARDINAL",le="0.05"} 0 +bitset_ratio_bucket{module="PROMETHEUS_LABEL_CARDINAL",le="0.1"} 0 +bitset_ratio_bucket{module="PROMETHEUS_LABEL_CARDINAL",le="0.15"} 0 +bitset_ratio_bucket{module="PROMETHEUS_LABEL_CARDINAL",le="0.2"} 0 +bitset_ratio_bucket{module="PROMETHEUS_LABEL_CARDINAL",le="0.25"} 0 +bitset_ratio_bucket{module="PROMETHEUS_LABEL_CARDINAL",le="0.3"} 0 +bitset_ratio_bucket{module="PROMETHEUS_LABEL_CARDINAL",le="0.35"} 0 +bitset_ratio_bucket{module="PROMETHEUS_LABEL_CARDINAL",le="0.4"} 0 +bitset_ratio_bucket{module="PROMETHEUS_LABEL_CARDINAL",le="0.45"} 0 +bitset_ratio_bucket{module="PROMETHEUS_LABEL_CARDINAL",le="0.5"} 0 +bitset_ratio_bucket{module="PROMETHEUS_LABEL_CARDINAL",le="0.55"} 0 +bitset_ratio_bucket{module="PROMETHEUS_LABEL_CARDINAL",le="0.6"} 0 +bitset_ratio_bucket{module="PROMETHEUS_LABEL_CARDINAL",le="0.65"} 0 +bitset_ratio_bucket{module="PROMETHEUS_LABEL_CARDINAL",le="0.7"} 0 +bitset_ratio_bucket{module="PROMETHEUS_LABEL_CARDINAL",le="0.75"} 0 +bitset_ratio_bucket{module="PROMETHEUS_LABEL_CARDINAL",le="0.8"} 0 +bitset_ratio_bucket{module="PROMETHEUS_LABEL_CARDINAL",le="0.85"} 0 +bitset_ratio_bucket{module="PROMETHEUS_LABEL_CARDINAL",le="0.9"} 0 +bitset_ratio_bucket{module="PROMETHEUS_LABEL_CARDINAL",le="0.95"} 0 +bitset_ratio_bucket{module="PROMETHEUS_LABEL_CARDINAL",le="1"} 0 +bitset_ratio_bucket{module="PROMETHEUS_LABEL_CARDINAL",le="+Inf"} 0 +bitset_ratio_sum{module="PROMETHEUS_LABEL_CARDINAL"} 0 +bitset_ratio_count{module="PROMETHEUS_LABEL_CARDINAL"} 0 +# HELP build_latency index build latency (s) +# TYPE build_latency histogram +build_latency_bucket{module="cardinal",le="1"} 0 +build_latency_bucket{module="cardinal",le="2"} 0 +build_latency_bucket{module="cardinal",le="4"} 0 +build_latency_bucket{module="cardinal",le="8"} 0 +build_latency_bucket{module="cardinal",le="16"} 0 +build_latency_bucket{module="cardinal",le="32"} 0 +build_latency_bucket{module="cardinal",le="64"} 0 +build_latency_bucket{module="cardinal",le="128"} 0 +build_latency_bucket{module="cardinal",le="256"} 0 +build_latency_bucket{module="cardinal",le="512"} 0 +build_latency_bucket{module="cardinal",le="1024"} 0 +build_latency_bucket{module="cardinal",le="2048"} 0 +build_latency_bucket{module="cardinal",le="4096"} 0 +build_latency_bucket{module="cardinal",le="8192"} 0 +build_latency_bucket{module="cardinal",le="16384"} 0 +build_latency_bucket{module="cardinal",le="32768"} 0 +build_latency_bucket{module="cardinal",le="65536"} 0 +build_latency_bucket{module="cardinal",le="1.048576e+06"} 0 +build_latency_bucket{module="cardinal",le="+Inf"} 0 +build_latency_sum{module="cardinal"} 0 +build_latency_count{module="cardinal"} 0 +build_latency_bucket{module="knowhere",le="1"} 1 +build_latency_bucket{module="knowhere",le="2"} 1 +build_latency_bucket{module="knowhere",le="4"} 1 +build_latency_bucket{module="knowhere",le="8"} 1 +build_latency_bucket{module="knowhere",le="16"} 1 +build_latency_bucket{module="knowhere",le="32"} 1 +build_latency_bucket{module="knowhere",le="64"} 1 +build_latency_bucket{module="knowhere",le="128"} 1 +build_latency_bucket{module="knowhere",le="256"} 1 +build_latency_bucket{module="knowhere",le="512"} 1 +build_latency_bucket{module="knowhere",le="1024"} 1 +build_latency_bucket{module="knowhere",le="2048"} 1 +build_latency_bucket{module="knowhere",le="4096"} 1 +build_latency_bucket{module="knowhere",le="8192"} 1 +build_latency_bucket{module="knowhere",le="16384"} 1 +build_latency_bucket{module="knowhere",le="32768"} 1 +build_latency_bucket{module="knowhere",le="65536"} 1 +build_latency_bucket{module="knowhere",le="1.048576e+06"} 1 +build_latency_bucket{module="knowhere",le="+Inf"} 1 +build_latency_sum{module="knowhere"} 0.015760917 +build_latency_count{module="knowhere"} 1 +# HELP cache_hit_cnt cache hit cnt per request +# TYPE cache_hit_cnt histogram +cache_hit_cnt_bucket{module="cardinal",le="1"} 0 +cache_hit_cnt_bucket{module="cardinal",le="2"} 0 +cache_hit_cnt_bucket{module="cardinal",le="4"} 0 +cache_hit_cnt_bucket{module="cardinal",le="8"} 0 +cache_hit_cnt_bucket{module="cardinal",le="16"} 0 +cache_hit_cnt_bucket{module="cardinal",le="32"} 0 +cache_hit_cnt_bucket{module="cardinal",le="64"} 0 +cache_hit_cnt_bucket{module="cardinal",le="128"} 0 +cache_hit_cnt_bucket{module="cardinal",le="256"} 0 +cache_hit_cnt_bucket{module="cardinal",le="512"} 0 +cache_hit_cnt_bucket{module="cardinal",le="1024"} 0 +cache_hit_cnt_bucket{module="cardinal",le="2048"} 0 +cache_hit_cnt_bucket{module="cardinal",le="4096"} 0 +cache_hit_cnt_bucket{module="cardinal",le="8192"} 0 +cache_hit_cnt_bucket{module="cardinal",le="16384"} 0 +cache_hit_cnt_bucket{module="cardinal",le="32768"} 0 +cache_hit_cnt_bucket{module="cardinal",le="65536"} 0 +cache_hit_cnt_bucket{module="cardinal",le="1.048576e+06"} 0 +cache_hit_cnt_bucket{module="cardinal",le="+Inf"} 0 +cache_hit_cnt_sum{module="cardinal"} 0 +cache_hit_cnt_count{module="cardinal"} 0 +# HELP diskann_bitset_ratio DISKANN bitset ratio for search and range search +# TYPE diskann_bitset_ratio histogram +diskann_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0"} 0 +diskann_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.05"} 0 +diskann_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.1"} 0 +diskann_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.15"} 0 +diskann_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.2"} 0 +diskann_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.25"} 0 +diskann_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.3"} 0 +diskann_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.35"} 0 +diskann_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.4"} 0 +diskann_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.45"} 0 +diskann_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.5"} 0 +diskann_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.55"} 0 +diskann_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.6"} 0 +diskann_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.65"} 0 +diskann_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.7"} 0 +diskann_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.75"} 0 +diskann_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.8"} 0 +diskann_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.85"} 0 +diskann_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.9"} 0 +diskann_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.95"} 0 +diskann_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="1"} 0 +diskann_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="+Inf"} 0 +diskann_bitset_ratio_sum{module="PROMETHEUS_LABEL_KNOWHERE"} 0 +diskann_bitset_ratio_count{module="PROMETHEUS_LABEL_KNOWHERE"} 0 +# HELP diskann_range_search_iters DISKANN range search iterations +# TYPE diskann_range_search_iters histogram +diskann_range_search_iters_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="2"} 0 +diskann_range_search_iters_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="4"} 0 +diskann_range_search_iters_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="6"} 0 +diskann_range_search_iters_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="8"} 0 +diskann_range_search_iters_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="10"} 0 +diskann_range_search_iters_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="12"} 0 +diskann_range_search_iters_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="14"} 0 +diskann_range_search_iters_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="16"} 0 +diskann_range_search_iters_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="18"} 0 +diskann_range_search_iters_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="20"} 0 +diskann_range_search_iters_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="22"} 0 +diskann_range_search_iters_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="+Inf"} 0 +diskann_range_search_iters_sum{module="PROMETHEUS_LABEL_KNOWHERE"} 0 +diskann_range_search_iters_count{module="PROMETHEUS_LABEL_KNOWHERE"} 0 +# HELP diskann_search_hops DISKANN search hops +# TYPE diskann_search_hops histogram +diskann_search_hops_bucket{module="knowhere",le="1"} 0 +diskann_search_hops_bucket{module="knowhere",le="2"} 0 +diskann_search_hops_bucket{module="knowhere",le="4"} 0 +diskann_search_hops_bucket{module="knowhere",le="8"} 0 +diskann_search_hops_bucket{module="knowhere",le="16"} 0 +diskann_search_hops_bucket{module="knowhere",le="32"} 0 +diskann_search_hops_bucket{module="knowhere",le="64"} 0 +diskann_search_hops_bucket{module="knowhere",le="128"} 0 +diskann_search_hops_bucket{module="knowhere",le="256"} 0 +diskann_search_hops_bucket{module="knowhere",le="512"} 0 +diskann_search_hops_bucket{module="knowhere",le="1024"} 0 +diskann_search_hops_bucket{module="knowhere",le="2048"} 0 +diskann_search_hops_bucket{module="knowhere",le="4096"} 0 +diskann_search_hops_bucket{module="knowhere",le="8192"} 0 +diskann_search_hops_bucket{module="knowhere",le="16384"} 0 +diskann_search_hops_bucket{module="knowhere",le="32768"} 0 +diskann_search_hops_bucket{module="knowhere",le="65536"} 0 +diskann_search_hops_bucket{module="knowhere",le="1.048576e+06"} 0 +diskann_search_hops_bucket{module="knowhere",le="+Inf"} 0 +diskann_search_hops_sum{module="knowhere"} 0 +diskann_search_hops_count{module="knowhere"} 0 +# HELP exec_latency execute latency per request +# TYPE exec_latency histogram +exec_latency_bucket{module="cardinal",le="1"} 0 +exec_latency_bucket{module="cardinal",le="2"} 0 +exec_latency_bucket{module="cardinal",le="4"} 0 +exec_latency_bucket{module="cardinal",le="8"} 0 +exec_latency_bucket{module="cardinal",le="16"} 0 +exec_latency_bucket{module="cardinal",le="32"} 0 +exec_latency_bucket{module="cardinal",le="64"} 0 +exec_latency_bucket{module="cardinal",le="128"} 0 +exec_latency_bucket{module="cardinal",le="256"} 0 +exec_latency_bucket{module="cardinal",le="512"} 0 +exec_latency_bucket{module="cardinal",le="1024"} 0 +exec_latency_bucket{module="cardinal",le="2048"} 0 +exec_latency_bucket{module="cardinal",le="4096"} 0 +exec_latency_bucket{module="cardinal",le="8192"} 0 +exec_latency_bucket{module="cardinal",le="16384"} 0 +exec_latency_bucket{module="cardinal",le="32768"} 0 +exec_latency_bucket{module="cardinal",le="65536"} 0 +exec_latency_bucket{module="cardinal",le="1.048576e+06"} 0 +exec_latency_bucket{module="cardinal",le="+Inf"} 0 +exec_latency_sum{module="cardinal"} 0 +exec_latency_count{module="cardinal"} 0 +# HELP filter_connectivity_ratio avg connectivity ratio set under filtering per request +# TYPE filter_connectivity_ratio histogram +filter_connectivity_ratio_bucket{module="cardinal",le="1"} 0 +filter_connectivity_ratio_bucket{module="cardinal",le="2"} 0 +filter_connectivity_ratio_bucket{module="cardinal",le="4"} 0 +filter_connectivity_ratio_bucket{module="cardinal",le="8"} 0 +filter_connectivity_ratio_bucket{module="cardinal",le="16"} 0 +filter_connectivity_ratio_bucket{module="cardinal",le="32"} 0 +filter_connectivity_ratio_bucket{module="cardinal",le="64"} 0 +filter_connectivity_ratio_bucket{module="cardinal",le="128"} 0 +filter_connectivity_ratio_bucket{module="cardinal",le="256"} 0 +filter_connectivity_ratio_bucket{module="cardinal",le="512"} 0 +filter_connectivity_ratio_bucket{module="cardinal",le="1024"} 0 +filter_connectivity_ratio_bucket{module="cardinal",le="2048"} 0 +filter_connectivity_ratio_bucket{module="cardinal",le="4096"} 0 +filter_connectivity_ratio_bucket{module="cardinal",le="8192"} 0 +filter_connectivity_ratio_bucket{module="cardinal",le="16384"} 0 +filter_connectivity_ratio_bucket{module="cardinal",le="32768"} 0 +filter_connectivity_ratio_bucket{module="cardinal",le="65536"} 0 +filter_connectivity_ratio_bucket{module="cardinal",le="1.048576e+06"} 0 +filter_connectivity_ratio_bucket{module="cardinal",le="+Inf"} 0 +filter_connectivity_ratio_sum{module="cardinal"} 0 +filter_connectivity_ratio_count{module="cardinal"} 0 +# HELP filter_mv_activated_fields_cnt avg mv activated fields per request +# TYPE filter_mv_activated_fields_cnt histogram +filter_mv_activated_fields_cnt_bucket{module="cardinal",le="1"} 0 +filter_mv_activated_fields_cnt_bucket{module="cardinal",le="2"} 0 +filter_mv_activated_fields_cnt_bucket{module="cardinal",le="4"} 0 +filter_mv_activated_fields_cnt_bucket{module="cardinal",le="8"} 0 +filter_mv_activated_fields_cnt_bucket{module="cardinal",le="16"} 0 +filter_mv_activated_fields_cnt_bucket{module="cardinal",le="32"} 0 +filter_mv_activated_fields_cnt_bucket{module="cardinal",le="64"} 0 +filter_mv_activated_fields_cnt_bucket{module="cardinal",le="128"} 0 +filter_mv_activated_fields_cnt_bucket{module="cardinal",le="256"} 0 +filter_mv_activated_fields_cnt_bucket{module="cardinal",le="512"} 0 +filter_mv_activated_fields_cnt_bucket{module="cardinal",le="1024"} 0 +filter_mv_activated_fields_cnt_bucket{module="cardinal",le="2048"} 0 +filter_mv_activated_fields_cnt_bucket{module="cardinal",le="4096"} 0 +filter_mv_activated_fields_cnt_bucket{module="cardinal",le="8192"} 0 +filter_mv_activated_fields_cnt_bucket{module="cardinal",le="16384"} 0 +filter_mv_activated_fields_cnt_bucket{module="cardinal",le="32768"} 0 +filter_mv_activated_fields_cnt_bucket{module="cardinal",le="65536"} 0 +filter_mv_activated_fields_cnt_bucket{module="cardinal",le="1.048576e+06"} 0 +filter_mv_activated_fields_cnt_bucket{module="cardinal",le="+Inf"} 0 +filter_mv_activated_fields_cnt_sum{module="cardinal"} 0 +filter_mv_activated_fields_cnt_count{module="cardinal"} 0 +# HELP filter_mv_change_base_cnt mv change base cnt per request +# TYPE filter_mv_change_base_cnt histogram +filter_mv_change_base_cnt_bucket{module="cardinal",le="1"} 0 +filter_mv_change_base_cnt_bucket{module="cardinal",le="2"} 0 +filter_mv_change_base_cnt_bucket{module="cardinal",le="4"} 0 +filter_mv_change_base_cnt_bucket{module="cardinal",le="8"} 0 +filter_mv_change_base_cnt_bucket{module="cardinal",le="16"} 0 +filter_mv_change_base_cnt_bucket{module="cardinal",le="32"} 0 +filter_mv_change_base_cnt_bucket{module="cardinal",le="64"} 0 +filter_mv_change_base_cnt_bucket{module="cardinal",le="128"} 0 +filter_mv_change_base_cnt_bucket{module="cardinal",le="256"} 0 +filter_mv_change_base_cnt_bucket{module="cardinal",le="512"} 0 +filter_mv_change_base_cnt_bucket{module="cardinal",le="1024"} 0 +filter_mv_change_base_cnt_bucket{module="cardinal",le="2048"} 0 +filter_mv_change_base_cnt_bucket{module="cardinal",le="4096"} 0 +filter_mv_change_base_cnt_bucket{module="cardinal",le="8192"} 0 +filter_mv_change_base_cnt_bucket{module="cardinal",le="16384"} 0 +filter_mv_change_base_cnt_bucket{module="cardinal",le="32768"} 0 +filter_mv_change_base_cnt_bucket{module="cardinal",le="65536"} 0 +filter_mv_change_base_cnt_bucket{module="cardinal",le="1.048576e+06"} 0 +filter_mv_change_base_cnt_bucket{module="cardinal",le="+Inf"} 0 +filter_mv_change_base_cnt_sum{module="cardinal"} 0 +filter_mv_change_base_cnt_count{module="cardinal"} 0 +# HELP filter_mv_only_cnt mv only cnt per request +# TYPE filter_mv_only_cnt histogram +filter_mv_only_cnt_bucket{module="cardinal",le="1"} 0 +filter_mv_only_cnt_bucket{module="cardinal",le="2"} 0 +filter_mv_only_cnt_bucket{module="cardinal",le="4"} 0 +filter_mv_only_cnt_bucket{module="cardinal",le="8"} 0 +filter_mv_only_cnt_bucket{module="cardinal",le="16"} 0 +filter_mv_only_cnt_bucket{module="cardinal",le="32"} 0 +filter_mv_only_cnt_bucket{module="cardinal",le="64"} 0 +filter_mv_only_cnt_bucket{module="cardinal",le="128"} 0 +filter_mv_only_cnt_bucket{module="cardinal",le="256"} 0 +filter_mv_only_cnt_bucket{module="cardinal",le="512"} 0 +filter_mv_only_cnt_bucket{module="cardinal",le="1024"} 0 +filter_mv_only_cnt_bucket{module="cardinal",le="2048"} 0 +filter_mv_only_cnt_bucket{module="cardinal",le="4096"} 0 +filter_mv_only_cnt_bucket{module="cardinal",le="8192"} 0 +filter_mv_only_cnt_bucket{module="cardinal",le="16384"} 0 +filter_mv_only_cnt_bucket{module="cardinal",le="32768"} 0 +filter_mv_only_cnt_bucket{module="cardinal",le="65536"} 0 +filter_mv_only_cnt_bucket{module="cardinal",le="1.048576e+06"} 0 +filter_mv_only_cnt_bucket{module="cardinal",le="+Inf"} 0 +filter_mv_only_cnt_sum{module="cardinal"} 0 +filter_mv_only_cnt_count{module="cardinal"} 0 +# HELP filter_mv_supplement_ep_bool_cnt mv supplement ep from bitset boolean cnt per request +# TYPE filter_mv_supplement_ep_bool_cnt histogram +filter_mv_supplement_ep_bool_cnt_bucket{module="cardinal",le="1"} 0 +filter_mv_supplement_ep_bool_cnt_bucket{module="cardinal",le="2"} 0 +filter_mv_supplement_ep_bool_cnt_bucket{module="cardinal",le="4"} 0 +filter_mv_supplement_ep_bool_cnt_bucket{module="cardinal",le="8"} 0 +filter_mv_supplement_ep_bool_cnt_bucket{module="cardinal",le="16"} 0 +filter_mv_supplement_ep_bool_cnt_bucket{module="cardinal",le="32"} 0 +filter_mv_supplement_ep_bool_cnt_bucket{module="cardinal",le="64"} 0 +filter_mv_supplement_ep_bool_cnt_bucket{module="cardinal",le="128"} 0 +filter_mv_supplement_ep_bool_cnt_bucket{module="cardinal",le="256"} 0 +filter_mv_supplement_ep_bool_cnt_bucket{module="cardinal",le="512"} 0 +filter_mv_supplement_ep_bool_cnt_bucket{module="cardinal",le="1024"} 0 +filter_mv_supplement_ep_bool_cnt_bucket{module="cardinal",le="2048"} 0 +filter_mv_supplement_ep_bool_cnt_bucket{module="cardinal",le="4096"} 0 +filter_mv_supplement_ep_bool_cnt_bucket{module="cardinal",le="8192"} 0 +filter_mv_supplement_ep_bool_cnt_bucket{module="cardinal",le="16384"} 0 +filter_mv_supplement_ep_bool_cnt_bucket{module="cardinal",le="32768"} 0 +filter_mv_supplement_ep_bool_cnt_bucket{module="cardinal",le="65536"} 0 +filter_mv_supplement_ep_bool_cnt_bucket{module="cardinal",le="1.048576e+06"} 0 +filter_mv_supplement_ep_bool_cnt_bucket{module="cardinal",le="+Inf"} 0 +filter_mv_supplement_ep_bool_cnt_sum{module="cardinal"} 0 +filter_mv_supplement_ep_bool_cnt_count{module="cardinal"} 0 +# HELP graph_search_cnt number of graph search per request +# TYPE graph_search_cnt histogram +graph_search_cnt_bucket{module="cardinal",le="1"} 0 +graph_search_cnt_bucket{module="cardinal",le="2"} 0 +graph_search_cnt_bucket{module="cardinal",le="4"} 0 +graph_search_cnt_bucket{module="cardinal",le="8"} 0 +graph_search_cnt_bucket{module="cardinal",le="16"} 0 +graph_search_cnt_bucket{module="cardinal",le="32"} 0 +graph_search_cnt_bucket{module="cardinal",le="64"} 0 +graph_search_cnt_bucket{module="cardinal",le="128"} 0 +graph_search_cnt_bucket{module="cardinal",le="256"} 0 +graph_search_cnt_bucket{module="cardinal",le="512"} 0 +graph_search_cnt_bucket{module="cardinal",le="1024"} 0 +graph_search_cnt_bucket{module="cardinal",le="2048"} 0 +graph_search_cnt_bucket{module="cardinal",le="4096"} 0 +graph_search_cnt_bucket{module="cardinal",le="8192"} 0 +graph_search_cnt_bucket{module="cardinal",le="16384"} 0 +graph_search_cnt_bucket{module="cardinal",le="32768"} 0 +graph_search_cnt_bucket{module="cardinal",le="65536"} 0 +graph_search_cnt_bucket{module="cardinal",le="1.048576e+06"} 0 +graph_search_cnt_bucket{module="cardinal",le="+Inf"} 0 +graph_search_cnt_sum{module="cardinal"} 0 +graph_search_cnt_count{module="cardinal"} 0 +# HELP hnsw_bitset_ratio HNSW bitset ratio for search and range search +# TYPE hnsw_bitset_ratio histogram +hnsw_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0"} 0 +hnsw_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.05"} 0 +hnsw_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.1"} 0 +hnsw_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.15"} 0 +hnsw_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.2"} 0 +hnsw_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.25"} 0 +hnsw_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.3"} 0 +hnsw_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.35"} 0 +hnsw_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.4"} 0 +hnsw_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.45"} 0 +hnsw_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.5"} 0 +hnsw_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.55"} 0 +hnsw_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.6"} 0 +hnsw_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.65"} 0 +hnsw_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.7"} 0 +hnsw_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.75"} 0 +hnsw_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.8"} 0 +hnsw_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.85"} 0 +hnsw_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.9"} 0 +hnsw_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="0.95"} 0 +hnsw_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="1"} 0 +hnsw_bitset_ratio_bucket{module="PROMETHEUS_LABEL_KNOWHERE",le="+Inf"} 0 +hnsw_bitset_ratio_sum{module="PROMETHEUS_LABEL_KNOWHERE"} 0 +hnsw_bitset_ratio_count{module="PROMETHEUS_LABEL_KNOWHERE"} 0 +# HELP hnsw_search_hops HNSW search hops in layer 0 +# TYPE hnsw_search_hops histogram +hnsw_search_hops_bucket{module="knowhere",le="1"} 0 +hnsw_search_hops_bucket{module="knowhere",le="2"} 0 +hnsw_search_hops_bucket{module="knowhere",le="4"} 0 +hnsw_search_hops_bucket{module="knowhere",le="8"} 0 +hnsw_search_hops_bucket{module="knowhere",le="16"} 0 +hnsw_search_hops_bucket{module="knowhere",le="32"} 0 +hnsw_search_hops_bucket{module="knowhere",le="64"} 0 +hnsw_search_hops_bucket{module="knowhere",le="128"} 0 +hnsw_search_hops_bucket{module="knowhere",le="256"} 0 +hnsw_search_hops_bucket{module="knowhere",le="512"} 0 +hnsw_search_hops_bucket{module="knowhere",le="1024"} 0 +hnsw_search_hops_bucket{module="knowhere",le="2048"} 0 +hnsw_search_hops_bucket{module="knowhere",le="4096"} 0 +hnsw_search_hops_bucket{module="knowhere",le="8192"} 0 +hnsw_search_hops_bucket{module="knowhere",le="16384"} 0 +hnsw_search_hops_bucket{module="knowhere",le="32768"} 0 +hnsw_search_hops_bucket{module="knowhere",le="65536"} 0 +hnsw_search_hops_bucket{module="knowhere",le="1.048576e+06"} 0 +hnsw_search_hops_bucket{module="knowhere",le="+Inf"} 0 +hnsw_search_hops_sum{module="knowhere"} 0 +hnsw_search_hops_count{module="knowhere"} 0 +# HELP internal_core_search_latency [cpp]latency(us) of search on segment +# TYPE internal_core_search_latency histogram +internal_core_search_latency_bucket{type="groupby_latency",le="1"} 0 +internal_core_search_latency_bucket{type="groupby_latency",le="2"} 0 +internal_core_search_latency_bucket{type="groupby_latency",le="4"} 0 +internal_core_search_latency_bucket{type="groupby_latency",le="8"} 0 +internal_core_search_latency_bucket{type="groupby_latency",le="16"} 0 +internal_core_search_latency_bucket{type="groupby_latency",le="32"} 0 +internal_core_search_latency_bucket{type="groupby_latency",le="64"} 0 +internal_core_search_latency_bucket{type="groupby_latency",le="128"} 0 +internal_core_search_latency_bucket{type="groupby_latency",le="256"} 0 +internal_core_search_latency_bucket{type="groupby_latency",le="512"} 0 +internal_core_search_latency_bucket{type="groupby_latency",le="1024"} 0 +internal_core_search_latency_bucket{type="groupby_latency",le="2048"} 0 +internal_core_search_latency_bucket{type="groupby_latency",le="4096"} 0 +internal_core_search_latency_bucket{type="groupby_latency",le="8192"} 0 +internal_core_search_latency_bucket{type="groupby_latency",le="16384"} 0 +internal_core_search_latency_bucket{type="groupby_latency",le="32768"} 0 +internal_core_search_latency_bucket{type="groupby_latency",le="65536"} 0 +internal_core_search_latency_bucket{type="groupby_latency",le="+Inf"} 0 +internal_core_search_latency_sum{type="groupby_latency"} 0 +internal_core_search_latency_count{type="groupby_latency"} 0 +internal_core_search_latency_bucket{type="iterative_filter_latency",le="1"} 0 +internal_core_search_latency_bucket{type="iterative_filter_latency",le="2"} 0 +internal_core_search_latency_bucket{type="iterative_filter_latency",le="4"} 0 +internal_core_search_latency_bucket{type="iterative_filter_latency",le="8"} 0 +internal_core_search_latency_bucket{type="iterative_filter_latency",le="16"} 0 +internal_core_search_latency_bucket{type="iterative_filter_latency",le="32"} 0 +internal_core_search_latency_bucket{type="iterative_filter_latency",le="64"} 0 +internal_core_search_latency_bucket{type="iterative_filter_latency",le="128"} 0 +internal_core_search_latency_bucket{type="iterative_filter_latency",le="256"} 0 +internal_core_search_latency_bucket{type="iterative_filter_latency",le="512"} 0 +internal_core_search_latency_bucket{type="iterative_filter_latency",le="1024"} 0 +internal_core_search_latency_bucket{type="iterative_filter_latency",le="2048"} 0 +internal_core_search_latency_bucket{type="iterative_filter_latency",le="4096"} 0 +internal_core_search_latency_bucket{type="iterative_filter_latency",le="8192"} 0 +internal_core_search_latency_bucket{type="iterative_filter_latency",le="16384"} 0 +internal_core_search_latency_bucket{type="iterative_filter_latency",le="32768"} 0 +internal_core_search_latency_bucket{type="iterative_filter_latency",le="65536"} 0 +internal_core_search_latency_bucket{type="iterative_filter_latency",le="+Inf"} 0 +internal_core_search_latency_sum{type="iterative_filter_latency"} 0 +internal_core_search_latency_count{type="iterative_filter_latency"} 0 +internal_core_search_latency_bucket{type="scalar_latency",le="1"} 6 +internal_core_search_latency_bucket{type="scalar_latency",le="2"} 6 +internal_core_search_latency_bucket{type="scalar_latency",le="4"} 6 +internal_core_search_latency_bucket{type="scalar_latency",le="8"} 6 +internal_core_search_latency_bucket{type="scalar_latency",le="16"} 6 +internal_core_search_latency_bucket{type="scalar_latency",le="32"} 6 +internal_core_search_latency_bucket{type="scalar_latency",le="64"} 6 +internal_core_search_latency_bucket{type="scalar_latency",le="128"} 6 +internal_core_search_latency_bucket{type="scalar_latency",le="256"} 6 +internal_core_search_latency_bucket{type="scalar_latency",le="512"} 6 +internal_core_search_latency_bucket{type="scalar_latency",le="1024"} 6 +internal_core_search_latency_bucket{type="scalar_latency",le="2048"} 6 +internal_core_search_latency_bucket{type="scalar_latency",le="4096"} 6 +internal_core_search_latency_bucket{type="scalar_latency",le="8192"} 6 +internal_core_search_latency_bucket{type="scalar_latency",le="16384"} 6 +internal_core_search_latency_bucket{type="scalar_latency",le="32768"} 6 +internal_core_search_latency_bucket{type="scalar_latency",le="65536"} 6 +internal_core_search_latency_bucket{type="scalar_latency",le="+Inf"} 6 +internal_core_search_latency_sum{type="scalar_latency"} 0.761541 +internal_core_search_latency_count{type="scalar_latency"} 6 +internal_core_search_latency_bucket{type="scalar_proportion",le="0"} 0 +internal_core_search_latency_bucket{type="scalar_proportion",le="0.05"} 0 +internal_core_search_latency_bucket{type="scalar_proportion",le="0.1"} 0 +internal_core_search_latency_bucket{type="scalar_proportion",le="0.15"} 0 +internal_core_search_latency_bucket{type="scalar_proportion",le="0.2"} 0 +internal_core_search_latency_bucket{type="scalar_proportion",le="0.25"} 0 +internal_core_search_latency_bucket{type="scalar_proportion",le="0.3"} 0 +internal_core_search_latency_bucket{type="scalar_proportion",le="0.35"} 0 +internal_core_search_latency_bucket{type="scalar_proportion",le="0.4"} 0 +internal_core_search_latency_bucket{type="scalar_proportion",le="0.45"} 0 +internal_core_search_latency_bucket{type="scalar_proportion",le="0.5"} 0 +internal_core_search_latency_bucket{type="scalar_proportion",le="0.55"} 0 +internal_core_search_latency_bucket{type="scalar_proportion",le="0.6"} 0 +internal_core_search_latency_bucket{type="scalar_proportion",le="0.65"} 0 +internal_core_search_latency_bucket{type="scalar_proportion",le="0.7"} 0 +internal_core_search_latency_bucket{type="scalar_proportion",le="0.75"} 0 +internal_core_search_latency_bucket{type="scalar_proportion",le="0.8"} 0 +internal_core_search_latency_bucket{type="scalar_proportion",le="0.85"} 0 +internal_core_search_latency_bucket{type="scalar_proportion",le="0.9"} 0 +internal_core_search_latency_bucket{type="scalar_proportion",le="0.95"} 0 +internal_core_search_latency_bucket{type="scalar_proportion",le="1"} 0 +internal_core_search_latency_bucket{type="scalar_proportion",le="+Inf"} 0 +internal_core_search_latency_sum{type="scalar_proportion"} 0 +internal_core_search_latency_count{type="scalar_proportion"} 0 +internal_core_search_latency_bucket{type="vector_latency",le="1"} 2 +internal_core_search_latency_bucket{type="vector_latency",le="2"} 2 +internal_core_search_latency_bucket{type="vector_latency",le="4"} 2 +internal_core_search_latency_bucket{type="vector_latency",le="8"} 2 +internal_core_search_latency_bucket{type="vector_latency",le="16"} 2 +internal_core_search_latency_bucket{type="vector_latency",le="32"} 2 +internal_core_search_latency_bucket{type="vector_latency",le="64"} 2 +internal_core_search_latency_bucket{type="vector_latency",le="128"} 2 +internal_core_search_latency_bucket{type="vector_latency",le="256"} 2 +internal_core_search_latency_bucket{type="vector_latency",le="512"} 2 +internal_core_search_latency_bucket{type="vector_latency",le="1024"} 2 +internal_core_search_latency_bucket{type="vector_latency",le="2048"} 2 +internal_core_search_latency_bucket{type="vector_latency",le="4096"} 2 +internal_core_search_latency_bucket{type="vector_latency",le="8192"} 2 +internal_core_search_latency_bucket{type="vector_latency",le="16384"} 2 +internal_core_search_latency_bucket{type="vector_latency",le="32768"} 2 +internal_core_search_latency_bucket{type="vector_latency",le="65536"} 2 +internal_core_search_latency_bucket{type="vector_latency",le="+Inf"} 2 +internal_core_search_latency_sum{type="vector_latency"} 1.549042 +internal_core_search_latency_count{type="vector_latency"} 2 +# HELP internal_mmap_allocated_space_bytes [cpp]mmap allocated space stats +# TYPE internal_mmap_allocated_space_bytes histogram +internal_mmap_allocated_space_bytes_bucket{type="anon",le="1024"} 0 +internal_mmap_allocated_space_bytes_bucket{type="anon",le="8192"} 0 +internal_mmap_allocated_space_bytes_bucket{type="anon",le="65536"} 0 +internal_mmap_allocated_space_bytes_bucket{type="anon",le="262144"} 0 +internal_mmap_allocated_space_bytes_bucket{type="anon",le="524288"} 0 +internal_mmap_allocated_space_bytes_bucket{type="anon",le="1.048576e+06"} 0 +internal_mmap_allocated_space_bytes_bucket{type="anon",le="4.194304e+06"} 0 +internal_mmap_allocated_space_bytes_bucket{type="anon",le="8.388608e+06"} 0 +internal_mmap_allocated_space_bytes_bucket{type="anon",le="1.6777216e+07"} 0 +internal_mmap_allocated_space_bytes_bucket{type="anon",le="6.7108864e+07"} 0 +internal_mmap_allocated_space_bytes_bucket{type="anon",le="1.34217728e+08"} 0 +internal_mmap_allocated_space_bytes_bucket{type="anon",le="2.68435456e+08"} 0 +internal_mmap_allocated_space_bytes_bucket{type="anon",le="5.36870912e+08"} 0 +internal_mmap_allocated_space_bytes_bucket{type="anon",le="1.073741824e+09"} 0 +internal_mmap_allocated_space_bytes_bucket{type="anon",le="+Inf"} 0 +internal_mmap_allocated_space_bytes_sum{type="anon"} 0 +internal_mmap_allocated_space_bytes_count{type="anon"} 0 +internal_mmap_allocated_space_bytes_bucket{type="file",le="1024"} 0 +internal_mmap_allocated_space_bytes_bucket{type="file",le="8192"} 0 +internal_mmap_allocated_space_bytes_bucket{type="file",le="65536"} 0 +internal_mmap_allocated_space_bytes_bucket{type="file",le="262144"} 0 +internal_mmap_allocated_space_bytes_bucket{type="file",le="524288"} 0 +internal_mmap_allocated_space_bytes_bucket{type="file",le="1.048576e+06"} 0 +internal_mmap_allocated_space_bytes_bucket{type="file",le="4.194304e+06"} 0 +internal_mmap_allocated_space_bytes_bucket{type="file",le="8.388608e+06"} 0 +internal_mmap_allocated_space_bytes_bucket{type="file",le="1.6777216e+07"} 0 +internal_mmap_allocated_space_bytes_bucket{type="file",le="6.7108864e+07"} 0 +internal_mmap_allocated_space_bytes_bucket{type="file",le="1.34217728e+08"} 0 +internal_mmap_allocated_space_bytes_bucket{type="file",le="2.68435456e+08"} 0 +internal_mmap_allocated_space_bytes_bucket{type="file",le="5.36870912e+08"} 0 +internal_mmap_allocated_space_bytes_bucket{type="file",le="1.073741824e+09"} 0 +internal_mmap_allocated_space_bytes_bucket{type="file",le="+Inf"} 0 +internal_mmap_allocated_space_bytes_sum{type="file"} 0 +internal_mmap_allocated_space_bytes_count{type="file"} 0 +# HELP internal_mmap_in_used_count [cpp]mmap in used count stats +# TYPE internal_mmap_in_used_count gauge +internal_mmap_in_used_count{type="anon"} 0 +internal_mmap_in_used_count{type="file"} 0 +# HELP internal_mmap_in_used_space_bytes [cpp]mmap in used space stats +# TYPE internal_mmap_in_used_space_bytes gauge +internal_mmap_in_used_space_bytes{type="anon"} 0 +internal_mmap_in_used_space_bytes{type="file"} 0 +# HELP internal_storage_kv_size [cpp]kv size stats +# TYPE internal_storage_kv_size histogram +internal_storage_kv_size_bucket{persistent_data_op_type="get",le="1"} 0 +internal_storage_kv_size_bucket{persistent_data_op_type="get",le="2"} 0 +internal_storage_kv_size_bucket{persistent_data_op_type="get",le="4"} 0 +internal_storage_kv_size_bucket{persistent_data_op_type="get",le="8"} 0 +internal_storage_kv_size_bucket{persistent_data_op_type="get",le="16"} 0 +internal_storage_kv_size_bucket{persistent_data_op_type="get",le="32"} 0 +internal_storage_kv_size_bucket{persistent_data_op_type="get",le="64"} 0 +internal_storage_kv_size_bucket{persistent_data_op_type="get",le="128"} 0 +internal_storage_kv_size_bucket{persistent_data_op_type="get",le="256"} 0 +internal_storage_kv_size_bucket{persistent_data_op_type="get",le="512"} 0 +internal_storage_kv_size_bucket{persistent_data_op_type="get",le="1024"} 0 +internal_storage_kv_size_bucket{persistent_data_op_type="get",le="2048"} 0 +internal_storage_kv_size_bucket{persistent_data_op_type="get",le="4096"} 0 +internal_storage_kv_size_bucket{persistent_data_op_type="get",le="8192"} 0 +internal_storage_kv_size_bucket{persistent_data_op_type="get",le="16384"} 0 +internal_storage_kv_size_bucket{persistent_data_op_type="get",le="32768"} 0 +internal_storage_kv_size_bucket{persistent_data_op_type="get",le="65536"} 0 +internal_storage_kv_size_bucket{persistent_data_op_type="get",le="+Inf"} 0 +internal_storage_kv_size_sum{persistent_data_op_type="get"} 0 +internal_storage_kv_size_count{persistent_data_op_type="get"} 0 +internal_storage_kv_size_bucket{persistent_data_op_type="put",le="1"} 0 +internal_storage_kv_size_bucket{persistent_data_op_type="put",le="2"} 0 +internal_storage_kv_size_bucket{persistent_data_op_type="put",le="4"} 0 +internal_storage_kv_size_bucket{persistent_data_op_type="put",le="8"} 0 +internal_storage_kv_size_bucket{persistent_data_op_type="put",le="16"} 0 +internal_storage_kv_size_bucket{persistent_data_op_type="put",le="32"} 0 +internal_storage_kv_size_bucket{persistent_data_op_type="put",le="64"} 0 +internal_storage_kv_size_bucket{persistent_data_op_type="put",le="128"} 0 +internal_storage_kv_size_bucket{persistent_data_op_type="put",le="256"} 0 +internal_storage_kv_size_bucket{persistent_data_op_type="put",le="512"} 0 +internal_storage_kv_size_bucket{persistent_data_op_type="put",le="1024"} 0 +internal_storage_kv_size_bucket{persistent_data_op_type="put",le="2048"} 0 +internal_storage_kv_size_bucket{persistent_data_op_type="put",le="4096"} 0 +internal_storage_kv_size_bucket{persistent_data_op_type="put",le="8192"} 0 +internal_storage_kv_size_bucket{persistent_data_op_type="put",le="16384"} 0 +internal_storage_kv_size_bucket{persistent_data_op_type="put",le="32768"} 0 +internal_storage_kv_size_bucket{persistent_data_op_type="put",le="65536"} 0 +internal_storage_kv_size_bucket{persistent_data_op_type="put",le="+Inf"} 0 +internal_storage_kv_size_sum{persistent_data_op_type="put"} 0 +internal_storage_kv_size_count{persistent_data_op_type="put"} 0 +# HELP internal_storage_load_duration [cpp]durations of load segment +# TYPE internal_storage_load_duration histogram +internal_storage_load_duration_bucket{type="deserialize",le="1"} 0 +internal_storage_load_duration_bucket{type="deserialize",le="2"} 0 +internal_storage_load_duration_bucket{type="deserialize",le="4"} 0 +internal_storage_load_duration_bucket{type="deserialize",le="8"} 0 +internal_storage_load_duration_bucket{type="deserialize",le="16"} 0 +internal_storage_load_duration_bucket{type="deserialize",le="32"} 0 +internal_storage_load_duration_bucket{type="deserialize",le="64"} 0 +internal_storage_load_duration_bucket{type="deserialize",le="128"} 0 +internal_storage_load_duration_bucket{type="deserialize",le="256"} 0 +internal_storage_load_duration_bucket{type="deserialize",le="512"} 0 +internal_storage_load_duration_bucket{type="deserialize",le="1024"} 0 +internal_storage_load_duration_bucket{type="deserialize",le="2048"} 0 +internal_storage_load_duration_bucket{type="deserialize",le="4096"} 0 +internal_storage_load_duration_bucket{type="deserialize",le="8192"} 0 +internal_storage_load_duration_bucket{type="deserialize",le="16384"} 0 +internal_storage_load_duration_bucket{type="deserialize",le="32768"} 0 +internal_storage_load_duration_bucket{type="deserialize",le="65536"} 0 +internal_storage_load_duration_bucket{type="deserialize",le="+Inf"} 0 +internal_storage_load_duration_sum{type="deserialize"} 0 +internal_storage_load_duration_count{type="deserialize"} 0 +internal_storage_load_duration_bucket{type="download",le="1"} 0 +internal_storage_load_duration_bucket{type="download",le="2"} 0 +internal_storage_load_duration_bucket{type="download",le="4"} 0 +internal_storage_load_duration_bucket{type="download",le="8"} 0 +internal_storage_load_duration_bucket{type="download",le="16"} 0 +internal_storage_load_duration_bucket{type="download",le="32"} 0 +internal_storage_load_duration_bucket{type="download",le="64"} 0 +internal_storage_load_duration_bucket{type="download",le="128"} 0 +internal_storage_load_duration_bucket{type="download",le="256"} 0 +internal_storage_load_duration_bucket{type="download",le="512"} 0 +internal_storage_load_duration_bucket{type="download",le="1024"} 0 +internal_storage_load_duration_bucket{type="download",le="2048"} 0 +internal_storage_load_duration_bucket{type="download",le="4096"} 0 +internal_storage_load_duration_bucket{type="download",le="8192"} 0 +internal_storage_load_duration_bucket{type="download",le="16384"} 0 +internal_storage_load_duration_bucket{type="download",le="32768"} 0 +internal_storage_load_duration_bucket{type="download",le="65536"} 0 +internal_storage_load_duration_bucket{type="download",le="+Inf"} 0 +internal_storage_load_duration_sum{type="download"} 0 +internal_storage_load_duration_count{type="download"} 0 +internal_storage_load_duration_bucket{type="write_disk",le="1"} 0 +internal_storage_load_duration_bucket{type="write_disk",le="2"} 0 +internal_storage_load_duration_bucket{type="write_disk",le="4"} 0 +internal_storage_load_duration_bucket{type="write_disk",le="8"} 0 +internal_storage_load_duration_bucket{type="write_disk",le="16"} 0 +internal_storage_load_duration_bucket{type="write_disk",le="32"} 0 +internal_storage_load_duration_bucket{type="write_disk",le="64"} 0 +internal_storage_load_duration_bucket{type="write_disk",le="128"} 0 +internal_storage_load_duration_bucket{type="write_disk",le="256"} 0 +internal_storage_load_duration_bucket{type="write_disk",le="512"} 0 +internal_storage_load_duration_bucket{type="write_disk",le="1024"} 0 +internal_storage_load_duration_bucket{type="write_disk",le="2048"} 0 +internal_storage_load_duration_bucket{type="write_disk",le="4096"} 0 +internal_storage_load_duration_bucket{type="write_disk",le="8192"} 0 +internal_storage_load_duration_bucket{type="write_disk",le="16384"} 0 +internal_storage_load_duration_bucket{type="write_disk",le="32768"} 0 +internal_storage_load_duration_bucket{type="write_disk",le="65536"} 0 +internal_storage_load_duration_bucket{type="write_disk",le="+Inf"} 0 +internal_storage_load_duration_sum{type="write_disk"} 0 +internal_storage_load_duration_count{type="write_disk"} 0 +# HELP internal_storage_op_count [cpp]count of persistent data operation +# TYPE internal_storage_op_count counter +internal_storage_op_count{persistent_data_op_type="get"} 0 +internal_storage_op_count{persistent_data_op_type="get",status="success"} 0 +internal_storage_op_count{persistent_data_op_type="list",status="fail"} 0 +internal_storage_op_count{persistent_data_op_type="list",status="success"} 0 +internal_storage_op_count{persistent_data_op_type="put",status="fail"} 0 +internal_storage_op_count{persistent_data_op_type="put",status="success"} 0 +internal_storage_op_count{persistent_data_op_type="remove",status="fail"} 0 +internal_storage_op_count{persistent_data_op_type="remove",status="success"} 0 +internal_storage_op_count{persistent_data_op_type="stat",status="fail"} 0 +internal_storage_op_count{persistent_data_op_type="stat",status="success"} 0 +# HELP internal_storage_request_latency [cpp]request latency(ms) on the client side +# TYPE internal_storage_request_latency histogram +internal_storage_request_latency_bucket{persistent_data_op_type="get",le="1"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="get",le="2"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="get",le="4"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="get",le="8"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="get",le="16"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="get",le="32"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="get",le="64"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="get",le="128"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="get",le="256"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="get",le="512"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="get",le="1024"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="get",le="2048"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="get",le="4096"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="get",le="8192"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="get",le="16384"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="get",le="32768"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="get",le="65536"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="get",le="+Inf"} 0 +internal_storage_request_latency_sum{persistent_data_op_type="get"} 0 +internal_storage_request_latency_count{persistent_data_op_type="get"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="list",le="1"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="list",le="2"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="list",le="4"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="list",le="8"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="list",le="16"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="list",le="32"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="list",le="64"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="list",le="128"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="list",le="256"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="list",le="512"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="list",le="1024"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="list",le="2048"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="list",le="4096"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="list",le="8192"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="list",le="16384"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="list",le="32768"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="list",le="65536"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="list",le="+Inf"} 0 +internal_storage_request_latency_sum{persistent_data_op_type="list"} 0 +internal_storage_request_latency_count{persistent_data_op_type="list"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="put",le="1"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="put",le="2"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="put",le="4"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="put",le="8"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="put",le="16"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="put",le="32"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="put",le="64"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="put",le="128"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="put",le="256"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="put",le="512"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="put",le="1024"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="put",le="2048"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="put",le="4096"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="put",le="8192"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="put",le="16384"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="put",le="32768"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="put",le="65536"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="put",le="+Inf"} 0 +internal_storage_request_latency_sum{persistent_data_op_type="put"} 0 +internal_storage_request_latency_count{persistent_data_op_type="put"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="remove",le="1"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="remove",le="2"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="remove",le="4"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="remove",le="8"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="remove",le="16"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="remove",le="32"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="remove",le="64"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="remove",le="128"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="remove",le="256"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="remove",le="512"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="remove",le="1024"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="remove",le="2048"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="remove",le="4096"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="remove",le="8192"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="remove",le="16384"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="remove",le="32768"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="remove",le="65536"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="remove",le="+Inf"} 0 +internal_storage_request_latency_sum{persistent_data_op_type="remove"} 0 +internal_storage_request_latency_count{persistent_data_op_type="remove"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="stat",le="1"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="stat",le="2"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="stat",le="4"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="stat",le="8"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="stat",le="16"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="stat",le="32"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="stat",le="64"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="stat",le="128"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="stat",le="256"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="stat",le="512"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="stat",le="1024"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="stat",le="2048"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="stat",le="4096"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="stat",le="8192"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="stat",le="16384"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="stat",le="32768"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="stat",le="65536"} 0 +internal_storage_request_latency_bucket{persistent_data_op_type="stat",le="+Inf"} 0 +internal_storage_request_latency_sum{persistent_data_op_type="stat"} 0 +internal_storage_request_latency_count{persistent_data_op_type="stat"} 0 +# HELP io_cnt io cnt per request +# TYPE io_cnt histogram +io_cnt_bucket{module="cardinal",le="1"} 0 +io_cnt_bucket{module="cardinal",le="2"} 0 +io_cnt_bucket{module="cardinal",le="4"} 0 +io_cnt_bucket{module="cardinal",le="8"} 0 +io_cnt_bucket{module="cardinal",le="16"} 0 +io_cnt_bucket{module="cardinal",le="32"} 0 +io_cnt_bucket{module="cardinal",le="64"} 0 +io_cnt_bucket{module="cardinal",le="128"} 0 +io_cnt_bucket{module="cardinal",le="256"} 0 +io_cnt_bucket{module="cardinal",le="512"} 0 +io_cnt_bucket{module="cardinal",le="1024"} 0 +io_cnt_bucket{module="cardinal",le="2048"} 0 +io_cnt_bucket{module="cardinal",le="4096"} 0 +io_cnt_bucket{module="cardinal",le="8192"} 0 +io_cnt_bucket{module="cardinal",le="16384"} 0 +io_cnt_bucket{module="cardinal",le="32768"} 0 +io_cnt_bucket{module="cardinal",le="65536"} 0 +io_cnt_bucket{module="cardinal",le="1.048576e+06"} 0 +io_cnt_bucket{module="cardinal",le="+Inf"} 0 +io_cnt_sum{module="cardinal"} 0 +io_cnt_count{module="cardinal"} 0 +# HELP ivf_search_cnt number of ivf search per request +# TYPE ivf_search_cnt histogram +ivf_search_cnt_bucket{module="cardinal",le="1"} 0 +ivf_search_cnt_bucket{module="cardinal",le="2"} 0 +ivf_search_cnt_bucket{module="cardinal",le="4"} 0 +ivf_search_cnt_bucket{module="cardinal",le="8"} 0 +ivf_search_cnt_bucket{module="cardinal",le="16"} 0 +ivf_search_cnt_bucket{module="cardinal",le="32"} 0 +ivf_search_cnt_bucket{module="cardinal",le="64"} 0 +ivf_search_cnt_bucket{module="cardinal",le="128"} 0 +ivf_search_cnt_bucket{module="cardinal",le="256"} 0 +ivf_search_cnt_bucket{module="cardinal",le="512"} 0 +ivf_search_cnt_bucket{module="cardinal",le="1024"} 0 +ivf_search_cnt_bucket{module="cardinal",le="2048"} 0 +ivf_search_cnt_bucket{module="cardinal",le="4096"} 0 +ivf_search_cnt_bucket{module="cardinal",le="8192"} 0 +ivf_search_cnt_bucket{module="cardinal",le="16384"} 0 +ivf_search_cnt_bucket{module="cardinal",le="32768"} 0 +ivf_search_cnt_bucket{module="cardinal",le="65536"} 0 +ivf_search_cnt_bucket{module="cardinal",le="1.048576e+06"} 0 +ivf_search_cnt_bucket{module="cardinal",le="+Inf"} 0 +ivf_search_cnt_sum{module="cardinal"} 0 +ivf_search_cnt_count{module="cardinal"} 0 +# HELP load_latency index load latency (ms) +# TYPE load_latency histogram +load_latency_bucket{module="cardinal",le="1"} 0 +load_latency_bucket{module="cardinal",le="2"} 0 +load_latency_bucket{module="cardinal",le="4"} 0 +load_latency_bucket{module="cardinal",le="8"} 0 +load_latency_bucket{module="cardinal",le="16"} 0 +load_latency_bucket{module="cardinal",le="32"} 0 +load_latency_bucket{module="cardinal",le="64"} 0 +load_latency_bucket{module="cardinal",le="128"} 0 +load_latency_bucket{module="cardinal",le="256"} 0 +load_latency_bucket{module="cardinal",le="512"} 0 +load_latency_bucket{module="cardinal",le="1024"} 0 +load_latency_bucket{module="cardinal",le="2048"} 0 +load_latency_bucket{module="cardinal",le="4096"} 0 +load_latency_bucket{module="cardinal",le="8192"} 0 +load_latency_bucket{module="cardinal",le="16384"} 0 +load_latency_bucket{module="cardinal",le="32768"} 0 +load_latency_bucket{module="cardinal",le="65536"} 0 +load_latency_bucket{module="cardinal",le="1.048576e+06"} 0 +load_latency_bucket{module="cardinal",le="+Inf"} 0 +load_latency_sum{module="cardinal"} 0 +load_latency_count{module="cardinal"} 0 +load_latency_bucket{module="knowhere",le="1"} 1 +load_latency_bucket{module="knowhere",le="2"} 1 +load_latency_bucket{module="knowhere",le="4"} 1 +load_latency_bucket{module="knowhere",le="8"} 1 +load_latency_bucket{module="knowhere",le="16"} 1 +load_latency_bucket{module="knowhere",le="32"} 1 +load_latency_bucket{module="knowhere",le="64"} 1 +load_latency_bucket{module="knowhere",le="128"} 1 +load_latency_bucket{module="knowhere",le="256"} 1 +load_latency_bucket{module="knowhere",le="512"} 1 +load_latency_bucket{module="knowhere",le="1024"} 1 +load_latency_bucket{module="knowhere",le="2048"} 1 +load_latency_bucket{module="knowhere",le="4096"} 1 +load_latency_bucket{module="knowhere",le="8192"} 1 +load_latency_bucket{module="knowhere",le="16384"} 1 +load_latency_bucket{module="knowhere",le="32768"} 1 +load_latency_bucket{module="knowhere",le="65536"} 1 +load_latency_bucket{module="knowhere",le="1.048576e+06"} 1 +load_latency_bucket{module="knowhere",le="+Inf"} 1 +load_latency_sum{module="knowhere"} 0.1285 +load_latency_count{module="knowhere"} 1 +# HELP quant_compute_cnt quant compute cnt per request +# TYPE quant_compute_cnt histogram +quant_compute_cnt_bucket{module="cardinal",le="1"} 0 +quant_compute_cnt_bucket{module="cardinal",le="2"} 0 +quant_compute_cnt_bucket{module="cardinal",le="4"} 0 +quant_compute_cnt_bucket{module="cardinal",le="8"} 0 +quant_compute_cnt_bucket{module="cardinal",le="16"} 0 +quant_compute_cnt_bucket{module="cardinal",le="32"} 0 +quant_compute_cnt_bucket{module="cardinal",le="64"} 0 +quant_compute_cnt_bucket{module="cardinal",le="128"} 0 +quant_compute_cnt_bucket{module="cardinal",le="256"} 0 +quant_compute_cnt_bucket{module="cardinal",le="512"} 0 +quant_compute_cnt_bucket{module="cardinal",le="1024"} 0 +quant_compute_cnt_bucket{module="cardinal",le="2048"} 0 +quant_compute_cnt_bucket{module="cardinal",le="4096"} 0 +quant_compute_cnt_bucket{module="cardinal",le="8192"} 0 +quant_compute_cnt_bucket{module="cardinal",le="16384"} 0 +quant_compute_cnt_bucket{module="cardinal",le="32768"} 0 +quant_compute_cnt_bucket{module="cardinal",le="65536"} 0 +quant_compute_cnt_bucket{module="cardinal",le="1.048576e+06"} 0 +quant_compute_cnt_bucket{module="cardinal",le="+Inf"} 0 +quant_compute_cnt_sum{module="cardinal"} 0 +quant_compute_cnt_count{module="cardinal"} 0 +# HELP queue_latency queue latency per request +# TYPE queue_latency histogram +queue_latency_bucket{module="cardinal",le="1"} 0 +queue_latency_bucket{module="cardinal",le="2"} 0 +queue_latency_bucket{module="cardinal",le="4"} 0 +queue_latency_bucket{module="cardinal",le="8"} 0 +queue_latency_bucket{module="cardinal",le="16"} 0 +queue_latency_bucket{module="cardinal",le="32"} 0 +queue_latency_bucket{module="cardinal",le="64"} 0 +queue_latency_bucket{module="cardinal",le="128"} 0 +queue_latency_bucket{module="cardinal",le="256"} 0 +queue_latency_bucket{module="cardinal",le="512"} 0 +queue_latency_bucket{module="cardinal",le="1024"} 0 +queue_latency_bucket{module="cardinal",le="2048"} 0 +queue_latency_bucket{module="cardinal",le="4096"} 0 +queue_latency_bucket{module="cardinal",le="8192"} 0 +queue_latency_bucket{module="cardinal",le="16384"} 0 +queue_latency_bucket{module="cardinal",le="32768"} 0 +queue_latency_bucket{module="cardinal",le="65536"} 0 +queue_latency_bucket{module="cardinal",le="1.048576e+06"} 0 +queue_latency_bucket{module="cardinal",le="+Inf"} 0 +queue_latency_sum{module="cardinal"} 0 +queue_latency_count{module="cardinal"} 0 +# HELP range_search_latency range search latency (ms) +# TYPE range_search_latency histogram +range_search_latency_bucket{module="cardinal",le="1"} 0 +range_search_latency_bucket{module="cardinal",le="2"} 0 +range_search_latency_bucket{module="cardinal",le="4"} 0 +range_search_latency_bucket{module="cardinal",le="8"} 0 +range_search_latency_bucket{module="cardinal",le="16"} 0 +range_search_latency_bucket{module="cardinal",le="32"} 0 +range_search_latency_bucket{module="cardinal",le="64"} 0 +range_search_latency_bucket{module="cardinal",le="128"} 0 +range_search_latency_bucket{module="cardinal",le="256"} 0 +range_search_latency_bucket{module="cardinal",le="512"} 0 +range_search_latency_bucket{module="cardinal",le="1024"} 0 +range_search_latency_bucket{module="cardinal",le="2048"} 0 +range_search_latency_bucket{module="cardinal",le="4096"} 0 +range_search_latency_bucket{module="cardinal",le="8192"} 0 +range_search_latency_bucket{module="cardinal",le="16384"} 0 +range_search_latency_bucket{module="cardinal",le="32768"} 0 +range_search_latency_bucket{module="cardinal",le="65536"} 0 +range_search_latency_bucket{module="cardinal",le="1.048576e+06"} 0 +range_search_latency_bucket{module="cardinal",le="+Inf"} 0 +range_search_latency_sum{module="cardinal"} 0 +range_search_latency_count{module="cardinal"} 0 +range_search_latency_bucket{module="knowhere",le="1"} 0 +range_search_latency_bucket{module="knowhere",le="2"} 0 +range_search_latency_bucket{module="knowhere",le="4"} 0 +range_search_latency_bucket{module="knowhere",le="8"} 0 +range_search_latency_bucket{module="knowhere",le="16"} 0 +range_search_latency_bucket{module="knowhere",le="32"} 0 +range_search_latency_bucket{module="knowhere",le="64"} 0 +range_search_latency_bucket{module="knowhere",le="128"} 0 +range_search_latency_bucket{module="knowhere",le="256"} 0 +range_search_latency_bucket{module="knowhere",le="512"} 0 +range_search_latency_bucket{module="knowhere",le="1024"} 0 +range_search_latency_bucket{module="knowhere",le="2048"} 0 +range_search_latency_bucket{module="knowhere",le="4096"} 0 +range_search_latency_bucket{module="knowhere",le="8192"} 0 +range_search_latency_bucket{module="knowhere",le="16384"} 0 +range_search_latency_bucket{module="knowhere",le="32768"} 0 +range_search_latency_bucket{module="knowhere",le="65536"} 0 +range_search_latency_bucket{module="knowhere",le="1.048576e+06"} 0 +range_search_latency_bucket{module="knowhere",le="+Inf"} 0 +range_search_latency_sum{module="knowhere"} 0 +range_search_latency_count{module="knowhere"} 0 +# HELP raw_compute_cnt raw compute cnt per request +# TYPE raw_compute_cnt histogram +raw_compute_cnt_bucket{module="cardinal",le="1"} 0 +raw_compute_cnt_bucket{module="cardinal",le="2"} 0 +raw_compute_cnt_bucket{module="cardinal",le="4"} 0 +raw_compute_cnt_bucket{module="cardinal",le="8"} 0 +raw_compute_cnt_bucket{module="cardinal",le="16"} 0 +raw_compute_cnt_bucket{module="cardinal",le="32"} 0 +raw_compute_cnt_bucket{module="cardinal",le="64"} 0 +raw_compute_cnt_bucket{module="cardinal",le="128"} 0 +raw_compute_cnt_bucket{module="cardinal",le="256"} 0 +raw_compute_cnt_bucket{module="cardinal",le="512"} 0 +raw_compute_cnt_bucket{module="cardinal",le="1024"} 0 +raw_compute_cnt_bucket{module="cardinal",le="2048"} 0 +raw_compute_cnt_bucket{module="cardinal",le="4096"} 0 +raw_compute_cnt_bucket{module="cardinal",le="8192"} 0 +raw_compute_cnt_bucket{module="cardinal",le="16384"} 0 +raw_compute_cnt_bucket{module="cardinal",le="32768"} 0 +raw_compute_cnt_bucket{module="cardinal",le="65536"} 0 +raw_compute_cnt_bucket{module="cardinal",le="1.048576e+06"} 0 +raw_compute_cnt_bucket{module="cardinal",le="+Inf"} 0 +raw_compute_cnt_sum{module="cardinal"} 0 +raw_compute_cnt_count{module="cardinal"} 0 +# HELP re_search_cnt number of fallback search per request +# TYPE re_search_cnt histogram +re_search_cnt_bucket{module="cardinal",le="1"} 0 +re_search_cnt_bucket{module="cardinal",le="2"} 0 +re_search_cnt_bucket{module="cardinal",le="4"} 0 +re_search_cnt_bucket{module="cardinal",le="8"} 0 +re_search_cnt_bucket{module="cardinal",le="16"} 0 +re_search_cnt_bucket{module="cardinal",le="32"} 0 +re_search_cnt_bucket{module="cardinal",le="64"} 0 +re_search_cnt_bucket{module="cardinal",le="128"} 0 +re_search_cnt_bucket{module="cardinal",le="256"} 0 +re_search_cnt_bucket{module="cardinal",le="512"} 0 +re_search_cnt_bucket{module="cardinal",le="1024"} 0 +re_search_cnt_bucket{module="cardinal",le="2048"} 0 +re_search_cnt_bucket{module="cardinal",le="4096"} 0 +re_search_cnt_bucket{module="cardinal",le="8192"} 0 +re_search_cnt_bucket{module="cardinal",le="16384"} 0 +re_search_cnt_bucket{module="cardinal",le="32768"} 0 +re_search_cnt_bucket{module="cardinal",le="65536"} 0 +re_search_cnt_bucket{module="cardinal",le="1.048576e+06"} 0 +re_search_cnt_bucket{module="cardinal",le="+Inf"} 0 +re_search_cnt_sum{module="cardinal"} 0 +re_search_cnt_count{module="cardinal"} 0 +# HELP search_latency search latency (ms) +# TYPE search_latency histogram +search_latency_bucket{module="cardinal",le="1"} 0 +search_latency_bucket{module="cardinal",le="2"} 0 +search_latency_bucket{module="cardinal",le="4"} 0 +search_latency_bucket{module="cardinal",le="8"} 0 +search_latency_bucket{module="cardinal",le="16"} 0 +search_latency_bucket{module="cardinal",le="32"} 0 +search_latency_bucket{module="cardinal",le="64"} 0 +search_latency_bucket{module="cardinal",le="128"} 0 +search_latency_bucket{module="cardinal",le="256"} 0 +search_latency_bucket{module="cardinal",le="512"} 0 +search_latency_bucket{module="cardinal",le="1024"} 0 +search_latency_bucket{module="cardinal",le="2048"} 0 +search_latency_bucket{module="cardinal",le="4096"} 0 +search_latency_bucket{module="cardinal",le="8192"} 0 +search_latency_bucket{module="cardinal",le="16384"} 0 +search_latency_bucket{module="cardinal",le="32768"} 0 +search_latency_bucket{module="cardinal",le="65536"} 0 +search_latency_bucket{module="cardinal",le="1.048576e+06"} 0 +search_latency_bucket{module="cardinal",le="+Inf"} 0 +search_latency_sum{module="cardinal"} 0 +search_latency_count{module="cardinal"} 0 +search_latency_bucket{module="knowhere",le="1"} 2 +search_latency_bucket{module="knowhere",le="2"} 2 +search_latency_bucket{module="knowhere",le="4"} 2 +search_latency_bucket{module="knowhere",le="8"} 2 +search_latency_bucket{module="knowhere",le="16"} 2 +search_latency_bucket{module="knowhere",le="32"} 2 +search_latency_bucket{module="knowhere",le="64"} 2 +search_latency_bucket{module="knowhere",le="128"} 2 +search_latency_bucket{module="knowhere",le="256"} 2 +search_latency_bucket{module="knowhere",le="512"} 2 +search_latency_bucket{module="knowhere",le="1024"} 2 +search_latency_bucket{module="knowhere",le="2048"} 2 +search_latency_bucket{module="knowhere",le="4096"} 2 +search_latency_bucket{module="knowhere",le="8192"} 2 +search_latency_bucket{module="knowhere",le="16384"} 2 +search_latency_bucket{module="knowhere",le="32768"} 2 +search_latency_bucket{module="knowhere",le="65536"} 2 +search_latency_bucket{module="knowhere",le="1.048576e+06"} 2 +search_latency_bucket{module="knowhere",le="+Inf"} 2 +search_latency_sum{module="knowhere"} 0.8826250000000001 +search_latency_count{module="knowhere"} 2 +# HELP search_topk search topk +# TYPE search_topk histogram +search_topk_bucket{module="cardinal",le="1"} 0 +search_topk_bucket{module="cardinal",le="2"} 0 +search_topk_bucket{module="cardinal",le="4"} 0 +search_topk_bucket{module="cardinal",le="8"} 0 +search_topk_bucket{module="cardinal",le="16"} 0 +search_topk_bucket{module="cardinal",le="32"} 0 +search_topk_bucket{module="cardinal",le="64"} 0 +search_topk_bucket{module="cardinal",le="128"} 0 +search_topk_bucket{module="cardinal",le="256"} 0 +search_topk_bucket{module="cardinal",le="512"} 0 +search_topk_bucket{module="cardinal",le="1024"} 0 +search_topk_bucket{module="cardinal",le="2048"} 0 +search_topk_bucket{module="cardinal",le="4096"} 0 +search_topk_bucket{module="cardinal",le="8192"} 0 +search_topk_bucket{module="cardinal",le="16384"} 0 +search_topk_bucket{module="cardinal",le="32768"} 0 +search_topk_bucket{module="cardinal",le="65536"} 0 +search_topk_bucket{module="cardinal",le="1.048576e+06"} 0 +search_topk_bucket{module="cardinal",le="+Inf"} 0 +search_topk_sum{module="cardinal"} 0 +search_topk_count{module="cardinal"} 0 +search_topk_bucket{module="knowhere",le="1"} 0 +search_topk_bucket{module="knowhere",le="2"} 0 +search_topk_bucket{module="knowhere",le="4"} 2 +search_topk_bucket{module="knowhere",le="8"} 2 +search_topk_bucket{module="knowhere",le="16"} 2 +search_topk_bucket{module="knowhere",le="32"} 2 +search_topk_bucket{module="knowhere",le="64"} 2 +search_topk_bucket{module="knowhere",le="128"} 2 +search_topk_bucket{module="knowhere",le="256"} 2 +search_topk_bucket{module="knowhere",le="512"} 2 +search_topk_bucket{module="knowhere",le="1024"} 2 +search_topk_bucket{module="knowhere",le="2048"} 2 +search_topk_bucket{module="knowhere",le="4096"} 2 +search_topk_bucket{module="knowhere",le="8192"} 2 +search_topk_bucket{module="knowhere",le="16384"} 2 +search_topk_bucket{module="knowhere",le="32768"} 2 +search_topk_bucket{module="knowhere",le="65536"} 2 +search_topk_bucket{module="knowhere",le="1.048576e+06"} 2 +search_topk_bucket{module="knowhere",le="+Inf"} 2 +search_topk_sum{module="knowhere"} 6 +search_topk_count{module="knowhere"} 2 diff --git a/milvus/tests/test_e2e.py b/milvus/tests/test_e2e.py new file mode 100644 index 0000000000000..058f4713b4cdf --- /dev/null +++ b/milvus/tests/test_e2e.py @@ -0,0 +1,23 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +import pytest + +from datadog_checks.base.constants import ServiceCheck + +from . import common + + +@pytest.mark.e2e +def test_check_milvus_e2e(dd_agent_check, instance): + aggregator = dd_agent_check(instance, rate=True) + + for metric, _ in common.STANDALONE_TEST_METRICS.items(): + if metric in [ + 'milvus.datacoord.import_tasks', + 'milvus.datacoord.index.task', + ]: # these metrics need a more complex setup to appear + continue + aggregator.assert_metric(name=metric) + + aggregator.assert_service_check('milvus.openmetrics.health', ServiceCheck.OK) diff --git a/milvus/tests/test_unit.py b/milvus/tests/test_unit.py new file mode 100644 index 0000000000000..cbf2e608c731a --- /dev/null +++ b/milvus/tests/test_unit.py @@ -0,0 +1,43 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +import pytest + +from datadog_checks.dev.utils import get_metadata_metrics +from datadog_checks.milvus import MilvusCheck + +from .common import STANDALONE_TEST_METRICS, get_fixture_path + + +def test_check(dd_run_check, aggregator, instance, mock_http_response): + mock_http_response(file_path=get_fixture_path('milvus_payload.txt')) + + check = MilvusCheck('milvus', {}, [instance]) + dd_run_check(check) + + for metric, metric_type in STANDALONE_TEST_METRICS.items(): + aggregator.assert_metric(metric, metric_type=aggregator.METRIC_ENUM_MAP[metric_type]) + aggregator.assert_metric_has_tag(metric, 'test:tag') + + aggregator.assert_all_metrics_covered() + aggregator.assert_metrics_using_metadata(get_metadata_metrics()) + + +def test_empty_instance(dd_run_check): + with pytest.raises( + Exception, + match='InstanceConfig`:\nopenmetrics_endpoint\n Field required', + ): + check = MilvusCheck('milvus', {}, [{}]) + dd_run_check(check) + + +def test_custom_validation(dd_run_check): + endpoint = 'milvus:2112/metrics' + with pytest.raises( + Exception, + match='openmetrics_endpoint: {} is incorrectly configured'.format(endpoint), + ): + check = MilvusCheck('milvus', {}, [{'openmetrics_endpoint': endpoint}]) + dd_run_check(check) diff --git a/mongo/CHANGELOG.md b/mongo/CHANGELOG.md index 682f4352023f3..87f6d903c2462 100644 --- a/mongo/CHANGELOG.md +++ b/mongo/CHANGELOG.md @@ -2,6 +2,12 @@ +## 8.3.1 / 2024-12-26 + +***Fixed***: + +* Skip unauthorized `local` database collections `system.replset`, `replset.election`, and `replset.minvalid` in collection and index stats gathering to avoid permission errors. ([#19244](https://github.com/DataDog/integrations-core/pull/19244)) + ## 8.3.0 / 2024-11-28 ***Added***: diff --git a/mongo/changelog.d/19244.fixed b/mongo/changelog.d/19244.fixed deleted file mode 100644 index 120bd5e2976f1..0000000000000 --- a/mongo/changelog.d/19244.fixed +++ /dev/null @@ -1 +0,0 @@ -Skip unauthorized `local` database collections `system.replset`, `replset.election`, and `replset.minvalid` in collection and index stats gathering to avoid permission errors. diff --git a/mongo/datadog_checks/mongo/__about__.py b/mongo/datadog_checks/mongo/__about__.py index 88bbe435de59f..b401aede304fe 100644 --- a/mongo/datadog_checks/mongo/__about__.py +++ b/mongo/datadog_checks/mongo/__about__.py @@ -2,4 +2,4 @@ # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) -__version__ = '8.3.0' +__version__ = '8.3.1' diff --git a/mysql/CHANGELOG.md b/mysql/CHANGELOG.md index 30d4995571bb1..b9be0956d0102 100644 --- a/mysql/CHANGELOG.md +++ b/mysql/CHANGELOG.md @@ -2,6 +2,12 @@ +## 14.4.0 / 2024-12-26 + +***Added***: + +* Add `mysql.performance.performance_schema_digest_lost`, the number of digest instances that could not be instrumented in the `events_statements_summary_by_digest` table. ([#19121](https://github.com/DataDog/integrations-core/pull/19121)) + ## 14.3.0 / 2024-11-28 ***Added***: diff --git a/mysql/changelog.d/19121.added b/mysql/changelog.d/19121.added deleted file mode 100644 index 97990bdf0ca9b..0000000000000 --- a/mysql/changelog.d/19121.added +++ /dev/null @@ -1 +0,0 @@ -Add `mysql.performance.performance_schema_digest_lost`, the number of digest instances that could not be instrumented in the `events_statements_summary_by_digest` table. diff --git a/mysql/datadog_checks/mysql/__about__.py b/mysql/datadog_checks/mysql/__about__.py index 58eaa19f57f56..e3db75fa3d1e7 100644 --- a/mysql/datadog_checks/mysql/__about__.py +++ b/mysql/datadog_checks/mysql/__about__.py @@ -2,4 +2,4 @@ # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) -__version__ = "14.3.0" +__version__ = "14.4.0" diff --git a/postgres/CHANGELOG.md b/postgres/CHANGELOG.md index 6e304cb914357..2d813a04e204c 100644 --- a/postgres/CHANGELOG.md +++ b/postgres/CHANGELOG.md @@ -2,6 +2,16 @@ +## 22.4.0 / 2024-12-26 + +***Added***: + +* Add postgresql.relation.xmin metric ([#19218](https://github.com/DataDog/integrations-core/pull/19218)) + +***Fixed***: + +* Fix check for Postgres collect query activity to avoid bugs with in-flight duration and missing blocking pids ([#19271](https://github.com/DataDog/integrations-core/pull/19271)) + ## 22.3.0 / 2024-11-28 ***Added***: diff --git a/postgres/changelog.d/19218.added b/postgres/changelog.d/19218.added deleted file mode 100644 index 7ba3bd2933029..0000000000000 --- a/postgres/changelog.d/19218.added +++ /dev/null @@ -1 +0,0 @@ -Add postgresql.relation.xmin metric diff --git a/postgres/changelog.d/19271.fixed b/postgres/changelog.d/19271.fixed deleted file mode 100644 index 9d981b57621e7..0000000000000 --- a/postgres/changelog.d/19271.fixed +++ /dev/null @@ -1 +0,0 @@ -Fix check for Postgres collect query activity to avoid bugs with in-flight duration and missing blocking pids diff --git a/postgres/datadog_checks/postgres/__about__.py b/postgres/datadog_checks/postgres/__about__.py index c510a97eec89f..cdff175ef224f 100644 --- a/postgres/datadog_checks/postgres/__about__.py +++ b/postgres/datadog_checks/postgres/__about__.py @@ -2,4 +2,4 @@ # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) -__version__ = "22.3.0" +__version__ = "22.4.0" diff --git a/quarkus/CHANGELOG.md b/quarkus/CHANGELOG.md index d0112b5d3908f..aeacd4bcf761c 100644 --- a/quarkus/CHANGELOG.md +++ b/quarkus/CHANGELOG.md @@ -2,3 +2,8 @@ +## 1.0.0 / 2024-12-26 + +***Added***: + +* Initial Release ([#19196](https://github.com/DataDog/integrations-core/pull/19196)) diff --git a/quarkus/datadog_checks/quarkus/__about__.py b/quarkus/datadog_checks/quarkus/__about__.py index e9541ce83e9e5..acbfd1c866b84 100644 --- a/quarkus/datadog_checks/quarkus/__about__.py +++ b/quarkus/datadog_checks/quarkus/__about__.py @@ -1,4 +1,4 @@ # (C) Datadog, Inc. 2024-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) -__version__ = '0.0.1' +__version__ = '1.0.0' diff --git a/requirements-agent-release.txt b/requirements-agent-release.txt index 1982fa9e99de6..603be6bf45000 100644 --- a/requirements-agent-release.txt +++ b/requirements-agent-release.txt @@ -4,8 +4,8 @@ datadog-active-directory==4.0.0; sys_platform == 'win32' datadog-activemq-xml==5.0.0 datadog-activemq==5.0.0 -datadog-aerospike==4.0.0; sys_platform == 'linux2' -datadog-airflow==6.1.0 +datadog-aerospike==4.0.1; sys_platform == 'linux2' +datadog-airflow==6.2.0 datadog-amazon-msk==6.0.0 datadog-ambari==6.0.0; sys_platform != 'win32' datadog-apache==6.0.0 @@ -27,11 +27,11 @@ datadog-cassandra==3.0.0 datadog-ceph==4.0.0; sys_platform != 'win32' datadog-cert-manager==5.1.0 datadog-checkpoint-quantum-firewall==1.0.0 -datadog-checks-base==37.2.0 +datadog-checks-base==37.3.0 datadog-checks-dependency-provider==3.0.0 datadog-checks-downloader==7.0.0 datadog-cilium==5.0.0 -datadog-cisco-aci==4.1.0 +datadog-cisco-aci==4.2.0 datadog-cisco-secure-firewall==1.0.0 datadog-citrix-hypervisor==5.0.0 datadog-clickhouse==5.1.0 @@ -44,11 +44,11 @@ datadog-coredns==5.0.0; sys_platform == 'linux2' datadog-couch==8.1.0 datadog-couchbase==5.0.0 datadog-crio==4.0.0 -datadog-datadog-cluster-agent==5.1.0 +datadog-datadog-cluster-agent==5.2.0 datadog-dcgm==3.1.0 datadog-directory==4.0.0 datadog-disk==7.0.0 -datadog-dns-check==5.0.0 +datadog-dns-check==5.0.1 datadog-dotnetclr==4.0.0; sys_platform == 'win32' datadog-druid==4.0.0 datadog-ecs-fargate==6.0.0 @@ -94,6 +94,7 @@ datadog-journald==3.0.0 datadog-kafka-consumer==6.1.0 datadog-kafka==4.0.0 datadog-karpenter==2.1.0 +datadog-keda==1.0.0 datadog-kong==5.0.0 datadog-kube-apiserver-metrics==6.0.0 datadog-kube-controller-manager==7.0.0 @@ -120,8 +121,9 @@ datadog-marklogic==6.0.0 datadog-mcache==6.0.0; sys_platform != 'win32' datadog-mesos-master==5.0.0; sys_platform != 'win32' datadog-mesos-slave==5.0.0; sys_platform != 'win32' -datadog-mongo==8.3.0 -datadog-mysql==14.3.0 +datadog-milvus==1.0.0 +datadog-mongo==8.3.1 +datadog-mysql==14.4.0 datadog-nagios==3.0.0 datadog-network==5.1.0 datadog-nfsstat==3.0.0; sys_platform == 'linux2' @@ -142,13 +144,14 @@ datadog-pgbouncer==8.0.0; sys_platform != 'win32' datadog-php-fpm==5.0.0 datadog-ping-federate==2.0.0 datadog-postfix==3.0.0; sys_platform != 'win32' -datadog-postgres==22.3.0 +datadog-postgres==22.4.0 datadog-powerdns-recursor==4.0.0 datadog-presto==3.1.0 datadog-process==5.0.0 datadog-prometheus==5.0.0 datadog-proxysql==7.0.0 datadog-pulsar==3.1.0 +datadog-quarkus==1.0.0 datadog-rabbitmq==7.0.0 datadog-ray==2.1.0 datadog-redisdb==7.0.0 @@ -167,11 +170,12 @@ datadog-solr==2.1.0 datadog-sonarqube==5.1.0 datadog-sonicwall-firewall==1.0.0 datadog-spark==6.1.0 -datadog-sqlserver==20.2.0 +datadog-sqlserver==21.0.0 datadog-squid==4.0.0 datadog-ssh-check==4.0.0 datadog-statsd==3.0.0 datadog-strimzi==3.1.0 +datadog-supabase==1.0.0 datadog-supervisord==4.0.0 datadog-suricata==2.0.0 datadog-symantec-endpoint-protection==1.0.0 diff --git a/snmp/README.md b/snmp/README.md index 805be6729159b..35ea9f7164bae 100644 --- a/snmp/README.md +++ b/snmp/README.md @@ -26,7 +26,7 @@ Additional helpful documentation, links, and articles: Need help? Contact [Datadog support][4]. -[1]: https://docs.datadoghq.com/network_performance_monitoring/devices/setup +[1]: https://docs.datadoghq.com/network_monitoring/devices/setup [2]: https://www.datadoghq.com/blog/monitor-snmp-with-datadog/ [3]: https://datadoghq.dev/integrations-core/tutorials/snmp/introduction/ [4]: https://docs.datadoghq.com/help/ diff --git a/snmp/tests/test_e2e_core_metadata.py b/snmp/tests/test_e2e_core_metadata.py index b79c75c4dd938..ab3edc0127344 100644 --- a/snmp/tests/test_e2e_core_metadata.py +++ b/snmp/tests/test_e2e_core_metadata.py @@ -1102,6 +1102,7 @@ def test_e2e_core_metadata_aos_lldp(dd_agent_check): # CHANGE topology_link1 = { 'id': device_id + ':1.216', + 'integration': 'snmp', 'source_type': 'lldp', "local": { "device": {'dd_id': device_id}, @@ -1114,6 +1115,7 @@ def test_e2e_core_metadata_aos_lldp(dd_agent_check): } topology_link2 = { 'id': device_id + ':11.217', + 'integration': 'snmp', 'source_type': 'lldp', "local": { "device": {'dd_id': device_id}, @@ -1286,6 +1288,7 @@ def test_e2e_core_metadata_cisco_cdp(dd_agent_check): topology_link1 = { 'id': device_id + ':1.5', + 'integration': 'snmp', 'source_type': 'cdp', "local": { "device": {'dd_id': device_id}, @@ -1303,6 +1306,7 @@ def test_e2e_core_metadata_cisco_cdp(dd_agent_check): } topology_link2 = { 'id': device_id + ':2.3', + 'integration': 'snmp', 'source_type': 'cdp', "local": { "device": {'dd_id': device_id}, @@ -1346,6 +1350,7 @@ def test_e2e_core_metadata_cisco_cdp_lldp(dd_agent_check): topology_link = { 'id': device_id + ':7.1', + 'integration': 'snmp', 'source_type': 'lldp', "local": { "device": {'dd_id': device_id}, diff --git a/snmp_american_power_conversion/README.md b/snmp_american_power_conversion/README.md index a4e868875c75f..d4fad09e07624 100644 --- a/snmp_american_power_conversion/README.md +++ b/snmp_american_power_conversion/README.md @@ -29,8 +29,8 @@ Additional helpful documentation, links, and articles: * [Monitor SNMP with Datadog][5] -[1]: https://docs.datadoghq.com/network_performance_monitoring/devices/data -[2]: https://docs.datadoghq.com/network_performance_monitoring/devices/setup +[1]: https://docs.datadoghq.com/network_monitoring/devices/data/ +[2]: https://docs.datadoghq.com/network_monitoring/devices/setup [3]: https://github.com/DataDog/integrations-core/blob/master/snmp_american_power_conversion/assets/service_checks.json [4]: https://docs.datadoghq.com/help/ [5]: https://www.datadoghq.com/blog/monitor-snmp-with-datadog/ diff --git a/snmp_arista/README.md b/snmp_arista/README.md index d6037608bcd54..36cd9b99b5386 100644 --- a/snmp_arista/README.md +++ b/snmp_arista/README.md @@ -31,8 +31,8 @@ Additional helpful documentation, links, and articles: * [Monitor SNMP with Datadog][5] -[1]: https://docs.datadoghq.com/network_performance_monitoring/devices/data -[2]: https://docs.datadoghq.com/network_performance_monitoring/devices/setup +[1]: https://docs.datadoghq.com/network_monitoring/devices/data +[2]: https://docs.datadoghq.com/network_monitoring/devices/setup [3]: https://github.com/DataDog/integrations-core/blob/master/snmp_arista/assets/service_checks.json [4]: https://docs.datadoghq.com/help/ [5]: https://www.datadoghq.com/blog/monitor-snmp-with-datadog/ diff --git a/snmp_aruba/README.md b/snmp_aruba/README.md index 9eb7f0f348c71..a05b13a5cd981 100644 --- a/snmp_aruba/README.md +++ b/snmp_aruba/README.md @@ -31,8 +31,8 @@ Additional helpful documentation, links, and articles: * [Monitor SNMP with Datadog][5] -[1]: https://docs.datadoghq.com/network_performance_monitoring/devices/data -[2]: https://docs.datadoghq.com/network_performance_monitoring/devices/setup +[1]: https://docs.datadoghq.com/network_monitoring/devices/data +[2]: https://docs.datadoghq.com/network_monitoring/devices/setup [3]: https://github.com/DataDog/integrations-core/blob/master/snmp_aruba/assets/service_checks.json [4]: https://docs.datadoghq.com/help/ [5]: https://www.datadoghq.com/blog/monitor-snmp-with-datadog/ diff --git a/snmp_chatsworth_products/README.md b/snmp_chatsworth_products/README.md index abe0cb0ee9ad1..d13955af2f451 100644 --- a/snmp_chatsworth_products/README.md +++ b/snmp_chatsworth_products/README.md @@ -33,8 +33,8 @@ Additional helpful documentation, links, and articles: * [Monitor SNMP with Datadog][5] -[1]: https://docs.datadoghq.com/network_performance_monitoring/devices/data -[2]: https://docs.datadoghq.com/network_performance_monitoring/devices/setup +[1]: https://docs.datadoghq.com/network_monitoring/devices/data +[2]: https://docs.datadoghq.com/network_monitoring/devices/setup [3]: https://github.com/DataDog/integrations-core/blob/master/snmp_chatsworth_products/assets/service_checks.json [4]: https://docs.datadoghq.com/help/ [5]: https://www.datadoghq.com/blog/monitor-snmp-with-datadog/ diff --git a/snmp_check_point/README.md b/snmp_check_point/README.md index 7078e73ebed35..4419c7fe3c92b 100644 --- a/snmp_check_point/README.md +++ b/snmp_check_point/README.md @@ -27,8 +27,8 @@ Additional helpful documentation, links, and articles: * [Monitor SNMP with Datadog][5] -[1]: https://docs.datadoghq.com/network_performance_monitoring/devices/data -[2]: https://docs.datadoghq.com/network_performance_monitoring/devices/setup +[1]: https://docs.datadoghq.com/network_monitoring/devices/data +[2]: https://docs.datadoghq.com/network_monitoring/devices/setup [3]: https://github.com/DataDog/integrations-core/blob/master/snmp_check_point/assets/service_checks.json [4]: https://docs.datadoghq.com/help/ [5]: https://www.datadoghq.com/blog/monitor-snmp-with-datadog/ diff --git a/snmp_cisco/README.md b/snmp_cisco/README.md index 9ee12eed74007..933bb3a53654d 100644 --- a/snmp_cisco/README.md +++ b/snmp_cisco/README.md @@ -48,7 +48,7 @@ Need help? Contact [Datadog support][5]. [1]: https://app.datadoghq.com/account/settings#integrations/meraki [2]: https://app.datadoghq.com/account/settings#integrations/snmp -[3]: https://docs.datadoghq.com/network_performance_monitoring/devices/setup +[3]: https://docs.datadoghq.com/network_monitoring/devices/setup [4]: https://www.datadoghq.com/blog/monitor-snmp-with-datadog/ [5]: https://docs.datadoghq.com/help/ [6]: https://docs.datadoghq.com/network_monitoring/devices/supported_devices/ diff --git a/snmp_dell/README.md b/snmp_dell/README.md index 66837675085bf..f180452cc8dac 100644 --- a/snmp_dell/README.md +++ b/snmp_dell/README.md @@ -42,8 +42,8 @@ Additional helpful documentation, links, and articles: Need help? Contact [Datadog support][4]. -[1]: https://docs.datadoghq.com/network_performance_monitoring/devices/data -[2]: https://docs.datadoghq.com/network_performance_monitoring/devices/setup +[1]: https://docs.datadoghq.com/network_monitoring/devices/data +[2]: https://docs.datadoghq.com/network_monitoring/devices/setup [3]: https://www.datadoghq.com/blog/monitor-snmp-with-datadog/ [4]: https://docs.datadoghq.com/help/ [5]: https://docs.datadoghq.com/network_monitoring/devices/supported_devices/ \ No newline at end of file diff --git a/snmp_fortinet/README.md b/snmp_fortinet/README.md index a25ce7deb4ed1..c6ed9f61a91cc 100644 --- a/snmp_fortinet/README.md +++ b/snmp_fortinet/README.md @@ -31,8 +31,8 @@ Additional helpful documentation, links, and articles: * [Monitor SNMP with Datadog][5] -[1]: https://docs.datadoghq.com/network_performance_monitoring/devices/data -[2]: https://docs.datadoghq.com/network_performance_monitoring/devices/setup +[1]: https://docs.datadoghq.com/network_monitoring/devices/data +[2]: https://docs.datadoghq.com/network_monitoring/devices/setup [3]: https://github.com/DataDog/integrations-core/blob/master/snmp_fortinet/assets/service_checks.json [4]: https://docs.datadoghq.com/help/ [5]: https://www.datadoghq.com/blog/monitor-snmp-with-datadog/ diff --git a/snmp_hewlett_packard_enterprise/README.md b/snmp_hewlett_packard_enterprise/README.md index edc99ea06b0b5..ec66d3084086d 100644 --- a/snmp_hewlett_packard_enterprise/README.md +++ b/snmp_hewlett_packard_enterprise/README.md @@ -33,8 +33,8 @@ Additional helpful documentation, links, and articles: * [Monitor SNMP with Datadog][5] -[1]: https://docs.datadoghq.com/network_performance_monitoring/devices/data -[2]: https://docs.datadoghq.com/network_performance_monitoring/devices/setup +[1]: https://docs.datadoghq.com/network_monitoring/devices/data +[2]: https://docs.datadoghq.com/network_monitoring/devices/setup [3]: https://github.com/DataDog/integrations-core/blob/master/snmp_hewlett_packard_enterprise/assets/service_checks.json [4]: https://docs.datadoghq.com/help/ [5]: https://www.datadoghq.com/blog/monitor-snmp-with-datadog/ diff --git a/snmp_netapp/README.md b/snmp_netapp/README.md index f27220dadb534..b159878312ce5 100644 --- a/snmp_netapp/README.md +++ b/snmp_netapp/README.md @@ -28,8 +28,8 @@ Additional helpful documentation, links, and articles: * [Monitor SNMP with Datadog][5] -[1]: https://docs.datadoghq.com/network_performance_monitoring/devices/data -[2]: https://docs.datadoghq.com/network_performance_monitoring/devices/setup +[1]: https://docs.datadoghq.com/network_monitoring/devices/data +[2]: https://docs.datadoghq.com/network_monitoring/devices/setup [3]: https://github.com/DataDog/integrations-core/blob/master/snmp_netapp/assets/service_checks.json [4]: https://docs.datadoghq.com/help/ [5]: https://www.datadoghq.com/blog/monitor-snmp-with-datadog/ diff --git a/sqlserver/CHANGELOG.md b/sqlserver/CHANGELOG.md index 4cb8686237ec3..43f88407d0385 100644 --- a/sqlserver/CHANGELOG.md +++ b/sqlserver/CHANGELOG.md @@ -2,6 +2,18 @@ +## 21.0.0 / 2024-12-26 + +***Changed***: + +* Fall back to ``system_health/event_file`` when querying deadlocks if `datadog` XE session wasn't created. ([#19189](https://github.com/DataDog/integrations-core/pull/19189)) + +***Added***: + +* Update configuration structure and allow configuration of all database metrics ([#19111](https://github.com/DataDog/integrations-core/pull/19111)) +* Send schema name as part of index usage metrics ([#19266](https://github.com/DataDog/integrations-core/pull/19266)) +* Add schema tag to db_fragmentation metrics for sqlserver ([#19277](https://github.com/DataDog/integrations-core/pull/19277)) + ## 20.2.0 / 2024-11-28 ***Added***: diff --git a/sqlserver/changelog.d/19111.added b/sqlserver/changelog.d/19111.added deleted file mode 100644 index c3ae6706f8277..0000000000000 --- a/sqlserver/changelog.d/19111.added +++ /dev/null @@ -1 +0,0 @@ -Update configuration structure and allow configuration of all database metrics \ No newline at end of file diff --git a/sqlserver/changelog.d/19189.changed b/sqlserver/changelog.d/19189.changed deleted file mode 100644 index c79730c4307db..0000000000000 --- a/sqlserver/changelog.d/19189.changed +++ /dev/null @@ -1 +0,0 @@ -Fall back to ``system_health/event_file`` when querying deadlocks if `datadog` XE session wasn't created. diff --git a/sqlserver/changelog.d/19266.added b/sqlserver/changelog.d/19266.added deleted file mode 100644 index fdd9c88d9bf1f..0000000000000 --- a/sqlserver/changelog.d/19266.added +++ /dev/null @@ -1 +0,0 @@ -Send schema name as part of index usage metrics diff --git a/sqlserver/changelog.d/19277.added b/sqlserver/changelog.d/19277.added deleted file mode 100644 index 527d2697cc0d2..0000000000000 --- a/sqlserver/changelog.d/19277.added +++ /dev/null @@ -1 +0,0 @@ -Add schema tag to db_fragmentation metrics for sqlserver diff --git a/sqlserver/datadog_checks/sqlserver/__about__.py b/sqlserver/datadog_checks/sqlserver/__about__.py index cc600abdac80d..6919269729939 100644 --- a/sqlserver/datadog_checks/sqlserver/__about__.py +++ b/sqlserver/datadog_checks/sqlserver/__about__.py @@ -2,4 +2,4 @@ # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) -__version__ = '20.2.0' +__version__ = '21.0.0' diff --git a/supabase/CHANGELOG.md b/supabase/CHANGELOG.md new file mode 100644 index 0000000000000..2551522a7cd24 --- /dev/null +++ b/supabase/CHANGELOG.md @@ -0,0 +1,9 @@ +# CHANGELOG - supabase + + + +## 1.0.0 / 2024-12-26 + +***Added***: + +* Initial Release ([#19307](https://github.com/DataDog/integrations-core/pull/19307)) diff --git a/supabase/README.md b/supabase/README.md new file mode 100644 index 0000000000000..e7bb5b92dcea0 --- /dev/null +++ b/supabase/README.md @@ -0,0 +1,60 @@ +# Agent Check: supabase + +## Overview + +This check monitors [supabase][1] through the Datadog Agent. + +Include a high level overview of what this integration does: +- What does your product do (in 1-2 sentences)? +- What value will customers get from this integration, and why is it valuable to them? +- What specific data will your integration monitor, and what's the value of that data? + +## Setup + +Follow the instructions below to install and configure this check for an Agent running on a host. For containerized environments, see the [Autodiscovery Integration Templates][3] for guidance on applying these instructions. + +### Installation + +The supabase check is included in the [Datadog Agent][2] package. +No additional installation is needed on your server. + +### Configuration + +1. Edit the `supabase.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your supabase performance data. See the [sample supabase.d/conf.yaml][4] for all available configuration options. + +2. [Restart the Agent][5]. + +### Validation + +[Run the Agent's status subcommand][6] and look for `supabase` under the Checks section. + +## Data Collected + +### Metrics + +See [metadata.csv][7] for a list of metrics provided by this integration. + +### Events + +The supabase integration does not include any events. + +### Service Checks + +The supabase integration does not include any service checks. + +See [service_checks.json][8] for a list of service checks provided by this integration. + +## Troubleshooting + +Need help? Contact [Datadog support][9]. + + +[1]: **LINK_TO_INTEGRATION_SITE** +[2]: https://app.datadoghq.com/account/settings/agent/latest +[3]: https://docs.datadoghq.com/agent/kubernetes/integrations/ +[4]: https://github.com/DataDog/integrations-core/blob/master/supabase/datadog_checks/supabase/data/conf.yaml.example +[5]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent +[6]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information +[7]: https://github.com/DataDog/integrations-core/blob/master/supabase/metadata.csv +[8]: https://github.com/DataDog/integrations-core/blob/master/supabase/assets/service_checks.json +[9]: https://docs.datadoghq.com/help/ diff --git a/supabase/assets/configuration/spec.yaml b/supabase/assets/configuration/spec.yaml new file mode 100644 index 0000000000000..14ceebef476cc --- /dev/null +++ b/supabase/assets/configuration/spec.yaml @@ -0,0 +1,28 @@ +name: Supabase +files: +- name: supabase.yaml + options: + - template: init_config + options: + - template: init_config/default + - template: instances + options: + - name: priviledged_metrics_endpoint + description: | + Endpoint exposing Supabase customer metrics + https://supabase.com/docs/guides/monitoring-troubleshooting/metrics#accessing-the-metrics-endpoint + value: + display_default: null + example: https://.supabase.co/customer/v1/privileged/metrics + type: string + - name: storage_api_endpoint + description: | + Endpoint exposing the S3 Storage API Prometheus metrics. + value: + display_default: null + example: http://%%host%%:5000/metrics + type: string + - template: instances/openmetrics + overrides: + openmetrics_endpoint.required: false + openmetrics_endpoint.hidden: true \ No newline at end of file diff --git a/supabase/assets/dashboards/supabase_overview.json b/supabase/assets/dashboards/supabase_overview.json new file mode 100644 index 0000000000000..eac5ccdf59ebc --- /dev/null +++ b/supabase/assets/dashboards/supabase_overview.json @@ -0,0 +1,77 @@ +{ + "title": "Supabase Overview", + "description": "## Supabase\n", + "widgets": [ + { + "id": 4717263751542750, + "definition": { + "title": "", + "banner_img": "/static/images/logos/supabase_large.svg", + "show_title": true, + "type": "group", + "layout_type": "ordered", + "widgets": [ + { + "id": 5685022835071772, + "definition": { + "type": "note", + "content": "## Supabase\n", + "background_color": "white", + "font_size": "14", + "text_align": "left", + "vertical_align": "center", + "show_tick": false, + "tick_pos": "50%", + "tick_edge": "left", + "has_padding": true + }, + "layout": { + "x": 0, + "y": 0, + "width": 3, + "height": 3 + } + }, + { + "id": 8921963557059570, + "definition": { + "type": "note", + "content": "", + "background_color": "white", + "font_size": "14", + "text_align": "center", + "vertical_align": "center", + "show_tick": false, + "tick_pos": "50%", + "tick_edge": "left", + "has_padding": true + }, + "layout": { + "x": 3, + "y": 0, + "width": 3, + "height": 3 + } + } + ] + }, + "layout": { + "x": 0, + "y": 0, + "width": 6, + "height": 6 + } + } + ], + "template_variables": [ + { + "name": "host", + "prefix": "host", + "available_values": [], + "default": "*" + } + ], + "layout_type": "ordered", + "notify_list": [], + "reflow_type": "fixed" +} \ No newline at end of file diff --git a/supabase/assets/service_checks.json b/supabase/assets/service_checks.json new file mode 100644 index 0000000000000..f1b12843fedde --- /dev/null +++ b/supabase/assets/service_checks.json @@ -0,0 +1,32 @@ +[ + { + "agent_version": "7.62.0", + "integration": "Supabase", + "check": "supabase.openmetrics.health", + "statuses": [ + "ok", + "critical" + ], + "groups": [ + "host", + "endpoint" + ], + "name": "Supabase OpenMetrics endpoint health", + "description": "Returns `CRITICAL` if the Agent is unable to connect to the Supabase OpenMetrics endpoint, otherwise returns `OK`." + }, + { + "agent_version": "7.62.0", + "integration": "Supabase", + "check": "supabase.storage_api.openmetrics.health", + "statuses": [ + "ok", + "critical" + ], + "groups": [ + "host", + "endpoint" + ], + "name": "Supabase Storage API OpenMetrics endpoint health", + "description": "Returns `CRITICAL` if the Agent is unable to connect to the Supabase Storage API OpenMetrics endpoint, otherwise returns `OK`." + } +] \ No newline at end of file diff --git a/supabase/datadog_checks/__init__.py b/supabase/datadog_checks/__init__.py new file mode 100644 index 0000000000000..1517d901c0aae --- /dev/null +++ b/supabase/datadog_checks/__init__.py @@ -0,0 +1,4 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +__path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore diff --git a/supabase/datadog_checks/supabase/__about__.py b/supabase/datadog_checks/supabase/__about__.py new file mode 100644 index 0000000000000..acbfd1c866b84 --- /dev/null +++ b/supabase/datadog_checks/supabase/__about__.py @@ -0,0 +1,4 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +__version__ = '1.0.0' diff --git a/supabase/datadog_checks/supabase/__init__.py b/supabase/datadog_checks/supabase/__init__.py new file mode 100644 index 0000000000000..e3ec3035e8ec2 --- /dev/null +++ b/supabase/datadog_checks/supabase/__init__.py @@ -0,0 +1,7 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from .__about__ import __version__ +from .check import SupabaseCheck + +__all__ = ['__version__', 'SupabaseCheck'] diff --git a/supabase/datadog_checks/supabase/check.py b/supabase/datadog_checks/supabase/check.py new file mode 100644 index 0000000000000..5e1cd85b581eb --- /dev/null +++ b/supabase/datadog_checks/supabase/check.py @@ -0,0 +1,57 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from typing import Any # noqa: F401 + +from datadog_checks.base import ConfigurationError, OpenMetricsBaseCheckV2 # noqa: F401 + +from .config_models import ConfigMixin +from .metrics import RENAME_LABELS_MAP, STORAGE_API_METRICS, SUPABASE_METRICS + +( + PRIVILEGED_METRICS_NAMESPACE, + STORAGE_API_METRICS_NAMESPACE, +) = [ + 'supabase', + 'supabase.storage_api', +] + + +class SupabaseCheck(OpenMetricsBaseCheckV2, ConfigMixin): + + DEFAULT_METRIC_LIMIT = 0 + + def __init__(self, name, init_config, instances=None): + super(SupabaseCheck, self).__init__(name, init_config, instances) + self.check_initializations.appendleft(self.parse_config) + # Use self.instance to read the check configuration + # self.url = self.instance.get("url") + + def parse_config(self): + self.scraper_configs = [] + privileged_metrics_endpoint = self.instance.get("privileged_metrics_endpoint") + storage_api_endpoint = self.instance.get("storage_api_endpoint") + + if not privileged_metrics_endpoint and not storage_api_endpoint: + raise ConfigurationError( + "Must specify at least one of the following:" "`privileged_metrics_endpoint` or `storage_api_endpoint`." + ) + + if privileged_metrics_endpoint: + self.scraper_configs.append( + self.generate_config(privileged_metrics_endpoint, PRIVILEGED_METRICS_NAMESPACE, SUPABASE_METRICS) + ) + if storage_api_endpoint: + self.scraper_configs.append( + self.generate_config(storage_api_endpoint, STORAGE_API_METRICS_NAMESPACE, STORAGE_API_METRICS) + ) + + def generate_config(self, endpoint, namespace, metrics): + config = { + 'openmetrics_endpoint': endpoint, + 'metrics': metrics, + 'namespace': namespace, + 'rename_labels': RENAME_LABELS_MAP, + } + config.update(self.instance) + return config diff --git a/supabase/datadog_checks/supabase/config_models/__init__.py b/supabase/datadog_checks/supabase/config_models/__init__.py new file mode 100644 index 0000000000000..106fff2032f68 --- /dev/null +++ b/supabase/datadog_checks/supabase/config_models/__init__.py @@ -0,0 +1,24 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +# This file is autogenerated. +# To change this file you should edit assets/configuration/spec.yaml and then run the following commands: +# ddev -x validate config -s +# ddev -x validate models -s + +from .instance import InstanceConfig +from .shared import SharedConfig + + +class ConfigMixin: + _config_model_instance: InstanceConfig + _config_model_shared: SharedConfig + + @property + def config(self) -> InstanceConfig: + return self._config_model_instance + + @property + def shared_config(self) -> SharedConfig: + return self._config_model_shared diff --git a/supabase/datadog_checks/supabase/config_models/defaults.py b/supabase/datadog_checks/supabase/config_models/defaults.py new file mode 100644 index 0000000000000..0138cd77a5ea8 --- /dev/null +++ b/supabase/datadog_checks/supabase/config_models/defaults.py @@ -0,0 +1,124 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +# This file is autogenerated. +# To change this file you should edit assets/configuration/spec.yaml and then run the following commands: +# ddev -x validate config -s +# ddev -x validate models -s + + +def instance_allow_redirects(): + return True + + +def instance_auth_type(): + return 'basic' + + +def instance_cache_metric_wildcards(): + return True + + +def instance_cache_shared_labels(): + return True + + +def instance_collect_counters_with_distributions(): + return False + + +def instance_collect_histogram_buckets(): + return True + + +def instance_disable_generic_tags(): + return False + + +def instance_empty_default_hostname(): + return False + + +def instance_enable_health_service_check(): + return True + + +def instance_histogram_buckets_as_distributions(): + return False + + +def instance_ignore_connection_errors(): + return False + + +def instance_kerberos_auth(): + return 'disabled' + + +def instance_kerberos_delegate(): + return False + + +def instance_kerberos_force_initiate(): + return False + + +def instance_log_requests(): + return False + + +def instance_min_collection_interval(): + return 15 + + +def instance_non_cumulative_histogram_buckets(): + return False + + +def instance_persist_connections(): + return False + + +def instance_request_size(): + return 16 + + +def instance_skip_proxy(): + return False + + +def instance_tag_by_endpoint(): + return True + + +def instance_telemetry(): + return False + + +def instance_timeout(): + return 10 + + +def instance_tls_ignore_warning(): + return False + + +def instance_tls_use_host_header(): + return False + + +def instance_tls_verify(): + return True + + +def instance_use_latest_spec(): + return False + + +def instance_use_legacy_auth_encoding(): + return True + + +def instance_use_process_start_time(): + return False diff --git a/supabase/datadog_checks/supabase/config_models/instance.py b/supabase/datadog_checks/supabase/config_models/instance.py new file mode 100644 index 0000000000000..517705d04bb9f --- /dev/null +++ b/supabase/datadog_checks/supabase/config_models/instance.py @@ -0,0 +1,173 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +# This file is autogenerated. +# To change this file you should edit assets/configuration/spec.yaml and then run the following commands: +# ddev -x validate config -s +# ddev -x validate models -s + +from __future__ import annotations + +from types import MappingProxyType +from typing import Any, Optional, Union + +from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator + +from datadog_checks.base.utils.functions import identity +from datadog_checks.base.utils.models import validation + +from . import defaults, validators + + +class AuthToken(BaseModel): + model_config = ConfigDict( + arbitrary_types_allowed=True, + frozen=True, + ) + reader: Optional[MappingProxyType[str, Any]] = None + writer: Optional[MappingProxyType[str, Any]] = None + + +class ExtraMetrics(BaseModel): + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra='allow', + frozen=True, + ) + name: Optional[str] = None + type: Optional[str] = None + + +class MetricPatterns(BaseModel): + model_config = ConfigDict( + arbitrary_types_allowed=True, + frozen=True, + ) + exclude: Optional[tuple[str, ...]] = None + include: Optional[tuple[str, ...]] = None + + +class Metrics(BaseModel): + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra='allow', + frozen=True, + ) + name: Optional[str] = None + type: Optional[str] = None + + +class Proxy(BaseModel): + model_config = ConfigDict( + arbitrary_types_allowed=True, + frozen=True, + ) + http: Optional[str] = None + https: Optional[str] = None + no_proxy: Optional[tuple[str, ...]] = None + + +class ShareLabels(BaseModel): + model_config = ConfigDict( + arbitrary_types_allowed=True, + frozen=True, + ) + labels: Optional[tuple[str, ...]] = None + match: Optional[tuple[str, ...]] = None + + +class InstanceConfig(BaseModel): + model_config = ConfigDict( + validate_default=True, + arbitrary_types_allowed=True, + frozen=True, + ) + allow_redirects: Optional[bool] = None + auth_token: Optional[AuthToken] = None + auth_type: Optional[str] = None + aws_host: Optional[str] = None + aws_region: Optional[str] = None + aws_service: Optional[str] = None + cache_metric_wildcards: Optional[bool] = None + cache_shared_labels: Optional[bool] = None + collect_counters_with_distributions: Optional[bool] = None + collect_histogram_buckets: Optional[bool] = None + connect_timeout: Optional[float] = None + disable_generic_tags: Optional[bool] = None + empty_default_hostname: Optional[bool] = None + enable_health_service_check: Optional[bool] = None + exclude_labels: Optional[tuple[str, ...]] = None + exclude_metrics: Optional[tuple[str, ...]] = None + exclude_metrics_by_labels: Optional[MappingProxyType[str, Union[bool, tuple[str, ...]]]] = None + extra_headers: Optional[MappingProxyType[str, Any]] = None + extra_metrics: Optional[tuple[Union[str, MappingProxyType[str, Union[str, ExtraMetrics]]], ...]] = None + headers: Optional[MappingProxyType[str, Any]] = None + histogram_buckets_as_distributions: Optional[bool] = None + hostname_format: Optional[str] = None + hostname_label: Optional[str] = None + ignore_connection_errors: Optional[bool] = None + ignore_tags: Optional[tuple[str, ...]] = None + include_labels: Optional[tuple[str, ...]] = None + kerberos_auth: Optional[str] = None + kerberos_cache: Optional[str] = None + kerberos_delegate: Optional[bool] = None + kerberos_force_initiate: Optional[bool] = None + kerberos_hostname: Optional[str] = None + kerberos_keytab: Optional[str] = None + kerberos_principal: Optional[str] = None + log_requests: Optional[bool] = None + metric_patterns: Optional[MetricPatterns] = None + metrics: Optional[tuple[Union[str, MappingProxyType[str, Union[str, Metrics]]], ...]] = None + min_collection_interval: Optional[float] = None + namespace: Optional[str] = Field(None, pattern='\\w*') + non_cumulative_histogram_buckets: Optional[bool] = None + ntlm_domain: Optional[str] = None + openmetrics_endpoint: Optional[str] = None + password: Optional[str] = None + persist_connections: Optional[bool] = None + priviledged_metrics_endpoint: Optional[str] = None + proxy: Optional[Proxy] = None + raw_line_filters: Optional[tuple[str, ...]] = None + raw_metric_prefix: Optional[str] = None + read_timeout: Optional[float] = None + rename_labels: Optional[MappingProxyType[str, Any]] = None + request_size: Optional[float] = None + service: Optional[str] = None + share_labels: Optional[MappingProxyType[str, Union[bool, ShareLabels]]] = None + skip_proxy: Optional[bool] = None + storage_api_endpoint: Optional[str] = None + tag_by_endpoint: Optional[bool] = None + tags: Optional[tuple[str, ...]] = None + telemetry: Optional[bool] = None + timeout: Optional[float] = None + tls_ca_cert: Optional[str] = None + tls_cert: Optional[str] = None + tls_ignore_warning: Optional[bool] = None + tls_private_key: Optional[str] = None + tls_protocols_allowed: Optional[tuple[str, ...]] = None + tls_use_host_header: Optional[bool] = None + tls_verify: Optional[bool] = None + use_latest_spec: Optional[bool] = None + use_legacy_auth_encoding: Optional[bool] = None + use_process_start_time: Optional[bool] = None + username: Optional[str] = None + + @model_validator(mode='before') + def _initial_validation(cls, values): + return validation.core.initialize_config(getattr(validators, 'initialize_instance', identity)(values)) + + @field_validator('*', mode='before') + def _validate(cls, value, info): + field = cls.model_fields[info.field_name] + field_name = field.alias or info.field_name + if field_name in info.context['configured_fields']: + value = getattr(validators, f'instance_{info.field_name}', identity)(value, field=field) + else: + value = getattr(defaults, f'instance_{info.field_name}', lambda: value)() + + return validation.utils.make_immutable(value) + + @model_validator(mode='after') + def _final_validation(cls, model): + return validation.core.check_model(getattr(validators, 'check_instance', identity)(model)) diff --git a/supabase/datadog_checks/supabase/config_models/shared.py b/supabase/datadog_checks/supabase/config_models/shared.py new file mode 100644 index 0000000000000..e39d447dfc4b9 --- /dev/null +++ b/supabase/datadog_checks/supabase/config_models/shared.py @@ -0,0 +1,45 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +# This file is autogenerated. +# To change this file you should edit assets/configuration/spec.yaml and then run the following commands: +# ddev -x validate config -s +# ddev -x validate models -s + +from __future__ import annotations + +from typing import Optional + +from pydantic import BaseModel, ConfigDict, field_validator, model_validator + +from datadog_checks.base.utils.functions import identity +from datadog_checks.base.utils.models import validation + +from . import validators + + +class SharedConfig(BaseModel): + model_config = ConfigDict( + validate_default=True, + arbitrary_types_allowed=True, + frozen=True, + ) + service: Optional[str] = None + + @model_validator(mode='before') + def _initial_validation(cls, values): + return validation.core.initialize_config(getattr(validators, 'initialize_shared', identity)(values)) + + @field_validator('*', mode='before') + def _validate(cls, value, info): + field = cls.model_fields[info.field_name] + field_name = field.alias or info.field_name + if field_name in info.context['configured_fields']: + value = getattr(validators, f'shared_{info.field_name}', identity)(value, field=field) + + return validation.utils.make_immutable(value) + + @model_validator(mode='after') + def _final_validation(cls, model): + return validation.core.check_model(getattr(validators, 'check_shared', identity)(model)) diff --git a/supabase/datadog_checks/supabase/config_models/validators.py b/supabase/datadog_checks/supabase/config_models/validators.py new file mode 100644 index 0000000000000..70150e85e6124 --- /dev/null +++ b/supabase/datadog_checks/supabase/config_models/validators.py @@ -0,0 +1,13 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +# Here you can include additional config validators or transformers +# +# def initialize_instance(values, **kwargs): +# if 'my_option' not in values and 'my_legacy_option' in values: +# values['my_option'] = values['my_legacy_option'] +# if values.get('my_number') > 10: +# raise ValueError('my_number max value is 10, got %s' % str(values.get('my_number'))) +# +# return values diff --git a/supabase/datadog_checks/supabase/data/conf.yaml.example b/supabase/datadog_checks/supabase/data/conf.yaml.example new file mode 100644 index 0000000000000..113f1e13c4d5c --- /dev/null +++ b/supabase/datadog_checks/supabase/data/conf.yaml.example @@ -0,0 +1,600 @@ +## All options defined here are available to all instances. +# +init_config: + + ## @param service - string - optional + ## Attach the tag `service:` to every metric, event, and service check emitted by this integration. + ## + ## Additionally, this sets the default `service` for every log source. + # + # service: + +## Every instance is scheduled independently of the others. +# +instances: + + - + ## @param priviledged_metrics_endpoint - string - optional + ## Endpoint exposing Supabase customer metrics + ## https://supabase.com/docs/guides/monitoring-troubleshooting/metrics#accessing-the-metrics-endpoint + # + # priviledged_metrics_endpoint: https://.supabase.co/customer/v1/privileged/metrics + + ## @param storage_api_endpoint - string - optional + ## Endpoint exposing the S3 Storage API Prometheus metrics. + # + # storage_api_endpoint: http://%%host%%:5000/metrics + + ## @param raw_metric_prefix - string - optional + ## A prefix that is removed from all exposed metric names, if present. + ## All configuration options will use the prefix-less name. + # + # raw_metric_prefix: _ + + ## @param extra_metrics - (list of string or mapping) - optional + ## This list defines metrics to collect from the `openmetrics_endpoint`, in addition to + ## what the check collects by default. If the check already collects a metric, then + ## metric definitions here take precedence. Metrics may be defined in 3 ways: + ## + ## 1. If the item is a string, then it represents the exposed metric name, and + ## the sent metric name will be identical. For example: + ## + ## extra_metrics: + ## - + ## - + ## 2. If the item is a mapping, then the keys represent the exposed metric names. + ## + ## a. If a value is a string, then it represents the sent metric name. For example: + ## + ## extra_metrics: + ## - : + ## - : + ## b. If a value is a mapping, then it must have a `name` and/or `type` key. + ## The `name` represents the sent metric name, and the `type` represents how + ## the metric should be handled, overriding any type information the endpoint + ## may provide. For example: + ## + ## extra_metrics: + ## - : + ## name: + ## type: + ## - : + ## name: + ## type: + ## + ## The supported native types are `gauge`, `counter`, `histogram`, and `summary`. + ## + ## Note: To collect counter metrics with names ending in `_total`, specify the metric name without the `_total` + ## suffix. For example, to collect the counter metric `promhttp_metric_handler_requests_total`, specify + ## `promhttp_metric_handler_requests`. This submits to Datadog the metric name appended with `.count`. + ## For more information, see: + ## https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#suffixes + ## + ## Regular expressions may be used to match the exposed metric names, for example: + ## + ## extra_metrics: + ## - ^network_(ingress|egress)_.+ + ## - .+: + ## type: gauge + # + # extra_metrics: [] + + ## @param exclude_metrics - list of strings - optional + ## A list of metrics to exclude, with each entry being either + ## the exact metric name or a regular expression. + ## In order to exclude all metrics but the ones matching a specific filter, + ## you can use a negative lookahead regex like: + ## - ^(?!foo).*$ + # + # exclude_metrics: [] + + ## @param exclude_metrics_by_labels - mapping - optional + ## A mapping of labels to exclude metrics with matching label name and their corresponding metric values. To match + ## all values of a label, set it to `true`. + ## + ## Note: Label filtering happens before `rename_labels`. + ## + ## For example, the following configuration instructs the check to exclude all metrics with + ## a label `worker` or a label `pid` with the value of either `23` or `42`. + ## + ## exclude_metrics_by_labels: + ## worker: true + ## pid: + ## - '23' + ## - '42' + # + # exclude_metrics_by_labels: {} + + ## @param exclude_labels - list of strings - optional + ## A list of labels to exclude, useful for high cardinality values like timestamps or UUIDs. + ## May be used in conjunction with `include_labels`. + ## Labels defined in `exclude_labels` will take precedence in case of overlap. + ## + ## Note: Label filtering happens before `rename_labels`. + # + # exclude_labels: [] + + ## @param include_labels - list of strings - optional + ## A list of labels to include. May be used in conjunction with `exclude_labels`. + ## Labels defined in `exclude_labels` will take precedence in case of overlap. + ## + ## Note: Label filtering happens before `rename_labels`. + # + # include_labels: [] + + ## @param rename_labels - mapping - optional + ## A mapping of label names to their new names. + # + # rename_labels: + # : + # : + + ## @param enable_health_service_check - boolean - optional - default: true + ## Whether or not to send a service check named `.openmetrics.health` which reports + ## the health of the `openmetrics_endpoint`. + # + # enable_health_service_check: true + + ## @param ignore_connection_errors - boolean - optional - default: false + ## Whether or not to ignore connection errors when scraping `openmetrics_endpoint`. + # + # ignore_connection_errors: false + + ## @param hostname_label - string - optional + ## Override the hostname for every metric submission with the value of one of its labels. + # + # hostname_label: + + ## @param hostname_format - string - optional + ## When `hostname_label` is set, this instructs the check how to format the values. The string + ## `` is replaced by the value of the label defined by `hostname_label`. + # + # hostname_format: + + ## @param collect_histogram_buckets - boolean - optional - default: true + ## Whether or not to send histogram buckets. + # + # collect_histogram_buckets: true + + ## @param non_cumulative_histogram_buckets - boolean - optional - default: false + ## Whether or not histogram buckets are non-cumulative and to come with a `lower_bound` tag. + # + # non_cumulative_histogram_buckets: false + + ## @param histogram_buckets_as_distributions - boolean - optional - default: false + ## Whether or not to send histogram buckets as Datadog distribution metrics. This implicitly + ## enables the `collect_histogram_buckets` and `non_cumulative_histogram_buckets` options. + ## + ## Learn more about distribution metrics: + ## https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#metric-types + # + # histogram_buckets_as_distributions: false + + ## @param collect_counters_with_distributions - boolean - optional - default: false + ## Whether or not to also collect the observation counter metrics ending in `.sum` and `.count` + ## when sending histogram buckets as Datadog distribution metrics. This implicitly enables the + ## `histogram_buckets_as_distributions` option. + # + # collect_counters_with_distributions: false + + ## @param use_process_start_time - boolean - optional - default: false + ## Whether to enable a heuristic for reporting counter values on the first scrape. When true, + ## the first time an endpoint is scraped, check `process_start_time_seconds` to decide whether zero + ## initial value can be assumed for counters. This requires keeping metrics in memory until the entire + ## response is received. + # + # use_process_start_time: false + + ## @param share_labels - mapping - optional + ## This mapping allows for the sharing of labels across multiple metrics. The keys represent the + ## exposed metrics from which to share labels, and the values are mappings that configure the + ## sharing behavior. Each mapping must have at least one of the following keys: + ## + ## labels - This is a list of labels to share. All labels are shared if this is not set. + ## match - This is a list of labels to match on other metrics as a condition for sharing. + ## values - This is a list of allowed values as a condition for sharing. + ## + ## To unconditionally share all labels of a metric, set it to `true`. + ## + ## For example, the following configuration instructs the check to apply all labels from `metric_a` + ## to all other metrics, the `node` label from `metric_b` to only those metrics that have a `pod` + ## label value that matches the `pod` label value of `metric_b`, and all labels from `metric_c` + ## to all other metrics if their value is equal to `23` or `42`. + ## + ## share_labels: + ## metric_a: true + ## metric_b: + ## labels: + ## - node + ## match: + ## - pod + ## metric_c: + ## values: + ## - 23 + ## - 42 + # + # share_labels: {} + + ## @param cache_shared_labels - boolean - optional - default: true + ## When `share_labels` is set, it instructs the check to cache labels collected from the first payload + ## for improved performance. + ## + ## Set this to `false` to compute label sharing for every payload at the risk of potentially increased memory usage. + # + # cache_shared_labels: true + + ## @param raw_line_filters - list of strings - optional + ## A list of regular expressions used to exclude lines read from the `openmetrics_endpoint` + ## from being parsed. + # + # raw_line_filters: [] + + ## @param cache_metric_wildcards - boolean - optional - default: true + ## Whether or not to cache data from metrics that are defined by regular expressions rather + ## than the full metric name. + # + # cache_metric_wildcards: true + + ## @param telemetry - boolean - optional - default: false + ## Whether or not to submit metrics prefixed by `.telemetry.` for debugging purposes. + # + # telemetry: false + + ## @param ignore_tags - list of strings - optional + ## A list of regular expressions used to ignore tags added by Autodiscovery and entries in the `tags` option. + # + # ignore_tags: + # - + # - + # - + + ## @param proxy - mapping - optional + ## This overrides the `proxy` setting in `init_config`. + ## + ## Set HTTP or HTTPS proxies for this instance. Use the `no_proxy` list + ## to specify hosts that must bypass proxies. + ## + ## The SOCKS protocol is also supported, for example: + ## + ## socks5://user:pass@host:port + ## + ## Using the scheme `socks5` causes the DNS resolution to happen on the + ## client, rather than on the proxy server. This is in line with `curl`, + ## which uses the scheme to decide whether to do the DNS resolution on + ## the client or proxy. If you want to resolve the domains on the proxy + ## server, use `socks5h` as the scheme. + # + # proxy: + # http: http://: + # https: https://: + # no_proxy: + # - + # - + + ## @param skip_proxy - boolean - optional - default: false + ## This overrides the `skip_proxy` setting in `init_config`. + ## + ## If set to `true`, this makes the check bypass any proxy + ## settings enabled and attempt to reach services directly. + # + # skip_proxy: false + + ## @param auth_type - string - optional - default: basic + ## The type of authentication to use. The available types (and related options) are: + ## + ## - basic + ## |__ username + ## |__ password + ## |__ use_legacy_auth_encoding + ## - digest + ## |__ username + ## |__ password + ## - ntlm + ## |__ ntlm_domain + ## |__ password + ## - kerberos + ## |__ kerberos_auth + ## |__ kerberos_cache + ## |__ kerberos_delegate + ## |__ kerberos_force_initiate + ## |__ kerberos_hostname + ## |__ kerberos_keytab + ## |__ kerberos_principal + ## - aws + ## |__ aws_region + ## |__ aws_host + ## |__ aws_service + ## + ## The `aws` auth type relies on boto3 to automatically gather AWS credentials, for example: from `.aws/credentials`. + ## Details: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html#configuring-credentials + # + # auth_type: basic + + ## @param use_legacy_auth_encoding - boolean - optional - default: true + ## When `auth_type` is set to `basic`, this determines whether to encode as `latin1` rather than `utf-8`. + # + # use_legacy_auth_encoding: true + + ## @param username - string - optional + ## The username to use if services are behind basic or digest auth. + # + # username: + + ## @param password - string - optional + ## The password to use if services are behind basic or NTLM auth. + # + # password: + + ## @param ntlm_domain - string - optional + ## If your services use NTLM authentication, specify + ## the domain used in the check. For NTLM Auth, append + ## the username to domain, not as the `username` parameter. + # + # ntlm_domain: \ + + ## @param kerberos_auth - string - optional - default: disabled + ## If your services use Kerberos authentication, you can specify the Kerberos + ## strategy to use between: + ## + ## - required + ## - optional + ## - disabled + ## + ## See https://github.com/requests/requests-kerberos#mutual-authentication + # + # kerberos_auth: disabled + + ## @param kerberos_cache - string - optional + ## Sets the KRB5CCNAME environment variable. + ## It should point to a credential cache with a valid TGT. + # + # kerberos_cache: + + ## @param kerberos_delegate - boolean - optional - default: false + ## Set to `true` to enable Kerberos delegation of credentials to a server that requests delegation. + ## + ## See https://github.com/requests/requests-kerberos#delegation + # + # kerberos_delegate: false + + ## @param kerberos_force_initiate - boolean - optional - default: false + ## Set to `true` to preemptively initiate the Kerberos GSS exchange and + ## present a Kerberos ticket on the initial request (and all subsequent). + ## + ## See https://github.com/requests/requests-kerberos#preemptive-authentication + # + # kerberos_force_initiate: false + + ## @param kerberos_hostname - string - optional + ## Override the hostname used for the Kerberos GSS exchange if its DNS name doesn't + ## match its Kerberos hostname, for example: behind a content switch or load balancer. + ## + ## See https://github.com/requests/requests-kerberos#hostname-override + # + # kerberos_hostname: + + ## @param kerberos_principal - string - optional + ## Set an explicit principal, to force Kerberos to look for a + ## matching credential cache for the named user. + ## + ## See https://github.com/requests/requests-kerberos#explicit-principal + # + # kerberos_principal: + + ## @param kerberos_keytab - string - optional + ## Set the path to your Kerberos key tab file. + # + # kerberos_keytab: + + ## @param auth_token - mapping - optional + ## This allows for the use of authentication information from dynamic sources. + ## Both a reader and writer must be configured. + ## + ## The available readers are: + ## + ## - type: file + ## path (required): The absolute path for the file to read from. + ## pattern: A regular expression pattern with a single capture group used to find the + ## token rather than using the entire file, for example: Your secret is (.+) + ## - type: oauth + ## url (required): The token endpoint. + ## client_id (required): The client identifier. + ## client_secret (required): The client secret. + ## basic_auth: Whether the provider expects credentials to be transmitted in + ## an HTTP Basic Auth header. The default is: false + ## options: Mapping of additional options to pass to the provider, such as the audience + ## or the scope. For example: + ## options: + ## audience: https://example.com + ## scope: read:example + ## + ## The available writers are: + ## + ## - type: header + ## name (required): The name of the field, for example: Authorization + ## value: The template value, for example `Bearer `. The default is: + ## placeholder: The substring in `value` to replace with the token, defaults to: + # + # auth_token: + # reader: + # type: + # : + # : + # writer: + # type: + # : + # : + + ## @param aws_region - string - optional + ## If your services require AWS Signature Version 4 signing, set the region. + ## + ## See https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html + # + # aws_region: + + ## @param aws_host - string - optional + ## If your services require AWS Signature Version 4 signing, set the host. + ## This only needs the hostname and does not require the protocol (HTTP, HTTPS, and more). + ## For example, if connecting to https://us-east-1.amazonaws.com/, set `aws_host` to `us-east-1.amazonaws.com`. + ## + ## Note: This setting is not necessary for official integrations. + ## + ## See https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html + # + # aws_host: + + ## @param aws_service - string - optional + ## If your services require AWS Signature Version 4 signing, set the service code. For a list + ## of available service codes, see https://docs.aws.amazon.com/general/latest/gr/rande.html + ## + ## Note: This setting is not necessary for official integrations. + ## + ## See https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html + # + # aws_service: + + ## @param tls_verify - boolean - optional - default: true + ## Instructs the check to validate the TLS certificate of services. + # + # tls_verify: true + + ## @param tls_use_host_header - boolean - optional - default: false + ## If a `Host` header is set, this enables its use for SNI (matching against the TLS certificate CN or SAN). + # + # tls_use_host_header: false + + ## @param tls_ignore_warning - boolean - optional - default: false + ## If `tls_verify` is disabled, security warnings are logged by the check. + ## Disable those by setting `tls_ignore_warning` to true. + # + # tls_ignore_warning: false + + ## @param tls_cert - string - optional + ## The path to a single file in PEM format containing a certificate as well as any + ## number of CA certificates needed to establish the certificate's authenticity for + ## use when connecting to services. It may also contain an unencrypted private key to use. + # + # tls_cert: + + ## @param tls_private_key - string - optional + ## The unencrypted private key to use for `tls_cert` when connecting to services. This is + ## required if `tls_cert` is set and it does not already contain a private key. + # + # tls_private_key: + + ## @param tls_ca_cert - string - optional + ## The path to a file of concatenated CA certificates in PEM format or a directory + ## containing several CA certificates in PEM format. If a directory, the directory + ## must have been processed using the `openssl rehash` command. See: + ## https://www.openssl.org/docs/man3.2/man1/c_rehash.html + # + # tls_ca_cert: + + ## @param tls_protocols_allowed - list of strings - optional + ## The expected versions of TLS/SSL when fetching intermediate certificates. + ## Only `SSLv3`, `TLSv1.2`, `TLSv1.3` are allowed by default. The possible values are: + ## SSLv3 + ## TLSv1 + ## TLSv1.1 + ## TLSv1.2 + ## TLSv1.3 + # + # tls_protocols_allowed: + # - SSLv3 + # - TLSv1.2 + # - TLSv1.3 + + ## @param headers - mapping - optional + ## The headers parameter allows you to send specific headers with every request. + ## You can use it for explicitly specifying the host header or adding headers for + ## authorization purposes. + ## + ## This overrides any default headers. + # + # headers: + # Host: + # X-Auth-Token: + + ## @param extra_headers - mapping - optional + ## Additional headers to send with every request. + # + # extra_headers: + # Host: + # X-Auth-Token: + + ## @param timeout - number - optional - default: 10 + ## The timeout for accessing services. + ## + ## This overrides the `timeout` setting in `init_config`. + # + # timeout: 10 + + ## @param connect_timeout - number - optional + ## The connect timeout for accessing services. Defaults to `timeout`. + # + # connect_timeout: + + ## @param read_timeout - number - optional + ## The read timeout for accessing services. Defaults to `timeout`. + # + # read_timeout: + + ## @param request_size - number - optional - default: 16 + ## The number of kibibytes (KiB) to read from streaming HTTP responses at a time. + # + # request_size: 16 + + ## @param log_requests - boolean - optional - default: false + ## Whether or not to debug log the HTTP(S) requests made, including the method and URL. + # + # log_requests: false + + ## @param persist_connections - boolean - optional - default: false + ## Whether or not to persist cookies and use connection pooling for improved performance. + # + # persist_connections: false + + ## @param allow_redirects - boolean - optional - default: true + ## Whether or not to allow URL redirection. + # + # allow_redirects: true + + ## @param tags - list of strings - optional + ## A list of tags to attach to every metric and service check emitted by this instance. + ## + ## Learn more about tagging at https://docs.datadoghq.com/tagging + # + # tags: + # - : + # - : + + ## @param service - string - optional + ## Attach the tag `service:` to every metric, event, and service check emitted by this integration. + ## + ## Overrides any `service` defined in the `init_config` section. + # + # service: + + ## @param min_collection_interval - number - optional - default: 15 + ## This changes the collection interval of the check. For more information, see: + ## https://docs.datadoghq.com/developers/write_agent_check/#collection-interval + # + # min_collection_interval: 15 + + ## @param empty_default_hostname - boolean - optional - default: false + ## This forces the check to send metrics with no hostname. + ## + ## This is useful for cluster-level checks. + # + # empty_default_hostname: false + + ## @param metric_patterns - mapping - optional + ## A mapping of metrics to include or exclude, with each entry being a regular expression. + ## + ## Metrics defined in `exclude` will take precedence in case of overlap. + # + # metric_patterns: + # include: + # - + # exclude: + # - diff --git a/supabase/datadog_checks/supabase/metrics.py b/supabase/datadog_checks/supabase/metrics.py new file mode 100644 index 0000000000000..3b772fc0f7ee3 --- /dev/null +++ b/supabase/datadog_checks/supabase/metrics.py @@ -0,0 +1,286 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from datadog_checks.base.checks.openmetrics.v2.metrics import DEFAULT_GO_METRICS + +# https://argo-cd.readthedocs.io/en/stable/operator-manual/metrics/ +PRIVELEGED_METRICS = { + 'auth_users_user_count': 'auth_users.user_count', + 'db_sql_connection_closed_max_idle_time': 'db.sql.connection_closed_max_idle_time', + 'db_sql_connection_closed_max_idle': 'db.sql.connection_closed_max_idle', + 'db_sql_connection_closed_max_lifetime': 'db.sql.connection_closed_max_lifetime', + 'db_sql_connection_max_open': 'db.sql.connection_max_open', + 'db_sql_connection_open': 'db.sql.connection_open', + 'db_sql_connection_wait_duration_milliseconds': 'db.sql.connection_wait_duration', + 'db_sql_connection_wait': 'db.sql.connection_wait', + 'db_transmit_bytes': 'db.transmit_bytes', + 'go_memstats_last_gc_time_seconds': { + 'name': 'go.memstats.last_gc_time.seconds', + 'type': 'time_elapsed', + }, + 'http_server_duration_milliseconds': 'http.server.duration', + 'http_server_request_size_bytes': 'http.server.request.size_bytes', + 'http_server_response_size_bytes': 'http.server.response.size_bytes', + 'http_status_codes': 'http.status_codes', + 'node_cpu_guest_seconds': 'node.cpu.guest_seconds', + 'node_cpu_seconds': 'node.cpu.seconds', + 'node_disk_discard_time_seconds': 'node.disk.discard_time_seconds', + 'node_disk_discarded_sectors': 'node.disk.discarded_sectors', + 'node_disk_discards_completed': 'node.disk.discards_completed', + 'node_disk_discards_merged': 'node.disk.discards_merged', + 'node_disk_flush_requests_time_seconds': 'node.disk.flush_requests_time_seconds', + 'node_disk_flush_requests': 'node.disk.flush_requests', + 'node_disk_io_now': 'node.disk.io_now', + 'node_disk_io_time_seconds': 'node.disk.io_time_seconds', + 'node_disk_io_time_weighted_seconds': 'node.disk.io_time_weighted_seconds', + 'node_disk_read_bytes': 'node.disk.read_bytes', + 'node_disk_read_time_seconds': 'node.disk.read_time_seconds', + 'node_disk_reads_completed': 'node.disk.reads_completed', + 'node_disk_reads_merged': 'node.disk.reads_merged', + 'node_disk_write_time_seconds': 'node.disk.write_time_seconds', + 'node_disk_writes_completed': 'node.disk.writes_completed', + 'node_disk_writes_merged': 'node.disk.writes_merged', + 'node_disk_written_bytes': 'node.disk.written_bytes', + 'node_filesystem_avail_bytes': 'node.filesystem.available_bytes', + 'node_filesystem_device_error': 'node.filesystem.device_error', + 'node_filesystem_files': 'node.filesystem.files', + 'node_filesystem_files_free': 'node.filesystem.files_free', + 'node_filesystem_free_bytes': 'node.filesystem.free_bytes', + 'node_filesystem_readonly': 'node.filesystem.readonly', + 'node_filesystem_size_bytes': 'node.filesystem.size_bytes', + 'node_load1': 'node.load1', + 'node_load15': 'node.load15', + 'node_load5': 'node.load5', + 'node_memory_Active_anon_bytes': 'node.memory.active_anon_bytes', + 'node_memory_Active_bytes': 'node.memory.active_bytes', + 'node_memory_Active_file_bytes': 'node.memory.active_file_bytes', + 'node_memory_AnonHugePages_bytes': 'node.memory.anon_huge_pages_bytes', + 'node_memory_AnonPages_bytes': 'node.memory.anon_pages_bytes', + 'node_memory_Bounce_bytes': 'node.memory.bounce_bytes', + 'node_memory_Buffers_bytes': 'node.memory.buffers_bytes', + 'node_memory_Cached_bytes': 'node.memory.cached_bytes', + 'node_memory_CommitLimit_bytes': 'node.memory.commit_limit_bytes', + 'node_memory_Committed_AS_bytes': 'node.memory.committed_as_bytes', + 'node_memory_Dirty_bytes': 'node.memory.dirty_bytes', + 'node_memory_FileHugePages_bytes': 'node.memory.file_huge_pages_bytes', + 'node_memory_FilePmdMapped_bytes': 'node.memory.file_pmd_mapped_bytes', + 'node_memory_HardwareCorrupted_bytes': 'node.memory.hardware_corrupted_bytes', + 'node_memory_HugePages_Free': 'node.memory.huge_pages_free', + 'node_memory_HugePages_Rsvd': 'node.memory.huge_pages_reserved', + 'node_memory_HugePages_Surp': 'node.memory.huge_pages_surp', + 'node_memory_HugePages_Total': 'node.memory.huge_pages_total', + 'node_memory_Hugepagesize_bytes': 'node.memory.huge_page_size_bytes', + 'node_memory_Hugetlb_bytes': 'node.memory.hugetlb_bytes', + 'node_memory_Inactive_anon_bytes': 'node.memory.inactive_anon_bytes', + 'node_memory_Inactive_bytes': 'node.memory.inactive_bytes', + 'node_memory_Inactive_file_bytes': 'node.memory.inactive_file_bytes', + 'node_memory_KReclaimable_bytes': 'node.memory.kreclaimable_bytes', + 'node_memory_KernelStack_bytes': 'node.memory.kernel_stack_bytes', + 'node_memory_Mapped_bytes': 'node.memory.mapped_bytes', + 'node_memory_MemAvailable_bytes': 'node.memory.mem_available_bytes', + 'node_memory_MemFree_bytes': 'node.memory.mem_free_bytes', + 'node_memory_MemTotal_bytes': 'node.memory.mem_total_bytes', + 'node_memory_Mlocked_bytes': 'node.memory.mlocked_bytes', + 'node_memory_NFS_Unstable_bytes': 'node.memory.nfs_unstable_bytes', + 'node_memory_PageTables_bytes': 'node.memory.page_tables_bytes', + 'node_memory_Percpu_bytes': 'node.memory.percpu_bytes', + 'node_memory_SReclaimable_bytes': 'node.memory.sreclaimable_bytes', + 'node_memory_SUnreclaim_bytes': 'node.memory.sunreclaim_bytes', + 'node_memory_ShmemHugePages_bytes': 'node.memory.shmem_huge_pages_bytes', + 'node_memory_ShmemPmdMapped_bytes': 'node.memory.shmem_pmd_mapped_bytes', + 'node_memory_Shmem_bytes': 'node.memory.shmem_bytes', + 'node_memory_Slab_bytes': 'node.memory.slab_bytes', + 'node_memory_SwapCached_bytes': 'node.memory.swap_cached_bytes', + 'node_memory_SwapFree_bytes': 'node.memory.swap_free_bytes', + 'node_memory_SwapTotal_bytes': 'node.memory.swap_total_bytes', + 'node_memory_Unevictable_bytes': 'node.memory.unevictable_bytes', + 'node_memory_VmallocChunk_bytes': 'node.memory.vm_alloc_chunk_bytes', + 'node_memory_VmallocTotal_bytes': 'node.memory.vm_alloc_total_bytes', + 'node_memory_VmallocUsed_bytes': 'node.memory.vm_alloc_used_bytes', + 'node_memory_WritebackTmp_bytes': 'node.memory.writeback_tmp_bytes', + 'node_memory_Writeback_bytes': 'node.memory.writeback_bytes', + 'node_network_receive_bytes': 'node.network.receive_bytes', + 'node_network_receive_compressed': 'node.network.receive_compressed', + 'node_network_receive_drop': 'node.network.receive_drop', + 'node_network_receive_errs': 'node.network.receive_errors', + 'node_network_receive_fifo': 'node.network.receive_fifo', + 'node_network_receive_frame': 'node.network.receive_frame', + 'node_network_receive_multicast': 'node.network.receive_multicast', + 'node_network_receive_packets': 'node.network.receive_packets', + 'node_network_transmit_bytes': 'node.network.transmit_bytes', + 'node_network_transmit_carrier': 'node.network.transmit_carrier', + 'node_network_transmit_colls': 'node.network.transmit_colls', + 'node_network_transmit_compressed': 'node.network.transmit_compressed', + 'node_network_transmit_drop': 'node.network.transmit_drop', + 'node_network_transmit_errs': 'node.network.transmit_errors', + 'node_network_transmit_fifo': 'node.network.transmit_fifo', + 'node_network_transmit_packets': 'node.network.transmit_packets', + 'node_scrape_collector_duration_seconds': 'node.scrape.collector_duration_seconds', + 'node_scrape_collector_success': 'node.scrape.collector_success', + # We force type since node.vmstat.* metrics are untyped + 'node_vmstat_oom_kill': { + 'name': 'node.vmstat.oom_kill', + 'type': 'counter', + }, + 'node_vmstat_pgfault': { + 'name': 'node.vmstat.pgfault', + 'type': 'counter', + }, + 'node_vmstat_pgmajfault': { + 'name': 'node.vmstat.pgmajfault', + 'type': 'counter', + }, + 'node_vmstat_pgpgin': { + 'name': 'node.vmstat.pgpgin', + 'type': 'counter', + }, + 'node_vmstat_pgpgout': { + 'name': 'node.vmstat.pgpgout', + 'type': 'counter', + }, + 'node_vmstat_pswpin': { + 'name': 'node.vmstat.pswpin', + 'type': 'counter', + }, + 'node_vmstat_pswpout': { + 'name': 'node.vmstat.pswpout', + 'type': 'counter', + }, + 'pg_database_size_bytes': 'pg_database_size.bytes', + 'pg_database_size_mb': 'pg_database_size.mb', + 'pg_exporter_last_scrape_duration_seconds': 'pg_exporter.last_scrape_duration_seconds', + 'pg_exporter_last_scrape_error': 'pg_exporter.last_scrape_error', + 'pg_exporter_scrapes': 'pg_exporter.scrapes', + 'pg_exporter_user_queries_load_error': 'pg_exporter.user_queries_load_error', + 'pg_ls_archive_statusdir_wal_pending_count': 'pg_ls.archive_statusdir_wal_pending_count', + 'pg_scrape_collector_duration_seconds': 'pg_scrape_collector.duration_seconds', + 'pg_scrape_collector_success': 'pg_scrape_collector.success', + 'pg_settings_default_transaction_read_only': 'pg_settings.default_transaction_read_only', + 'pg_stat_activity_xact_runtime': 'pg_stat_activity.xact_runtime', + 'pg_stat_bgwriter_buffers_alloc': 'pg_stat_bgwriter.buffers_alloc', + 'pg_stat_bgwriter_buffers_backend_fsync': 'pg_stat_bgwriter.buffers_backend_fsync', + 'pg_stat_bgwriter_buffers_backend': 'pg_stat_bgwriter.buffers_backend', + 'pg_stat_bgwriter_buffers_checkpoint': 'pg_stat_bgwriter.buffers_checkpoint', + 'pg_stat_bgwriter_buffers_clean': 'pg_stat_bgwriter.buffers_clean', + 'pg_stat_bgwriter_checkpoint_sync_time': 'pg_stat_bgwriter.checkpoint_sync_time', + 'pg_stat_bgwriter_checkpoint_write_time': 'pg_stat_bgwriter.checkpoint_write_time', + 'pg_stat_bgwriter_checkpoints_req': 'pg_stat_bgwriter.checkpoints_req', + 'pg_stat_bgwriter_checkpoints_timed': 'pg_stat_bgwriter.checkpoints_timed', + 'pg_stat_bgwriter_maxwritten_clean': 'pg_stat_bgwriter.maxwritten_clean', + 'pg_stat_bgwriter_stats_reset': 'pg_stat_bgwriter.stats_reset', + 'pg_stat_database_blks_hit': 'pg_stat_database.blks_hit', + 'pg_stat_database_blks_read': 'pg_stat_database.blks_read', + 'pg_stat_database_conflicts_confl_bufferpin': 'pg_stat_database_conflicts.confl_bufferpin', + 'pg_stat_database_conflicts_confl_deadlock': 'pg_stat_database_conflicts.confl_deadlock', + 'pg_stat_database_conflicts_confl_lock': 'pg_stat_database_conflicts.confl_lock', + 'pg_stat_database_conflicts_confl_snapshot': 'pg_stat_database_conflicts.confl_snapshot', + 'pg_stat_database_conflicts_confl_tablespace': 'pg_stat_database_conflicts.confl_tablespace', + 'pg_stat_database_conflicts': 'pg_stat_database.conflicts', + 'pg_stat_database_deadlocks': 'pg_stat_database.deadlocks', + 'pg_stat_database_most_recent_reset': 'pg_stat_database.most_recent_reset', + 'pg_stat_database_num_backends': 'pg_stat_database.num_backends', + 'pg_stat_database_temp_bytes': 'pg_stat_database.temp_bytes', + 'pg_stat_database_temp_files': 'pg_stat_database.temp_files', + 'pg_stat_database_tup_deleted': 'pg_stat_database.tup_deleted', + 'pg_stat_database_tup_fetched': 'pg_stat_database.tup_fetched', + 'pg_stat_database_tup_inserted': 'pg_stat_database.tup_inserted', + 'pg_stat_database_tup_returned': 'pg_stat_database.tup_returned', + 'pg_stat_database_tup_updated': 'pg_stat_database.tup_updated', + 'pg_stat_database_xact_commit': 'pg_stat_database.xact_commit', + 'pg_stat_database_xact_rollback': 'pg_stat_database.xact_rollback', + 'pg_stat_replication_replay_lag': 'pg_stat_replication.replay_lag', + 'pg_stat_replication_send_lag': 'pg_stat_replication.send_lag', + 'pg_stat_statements_total_queries': 'pg_stat_statements.total_queries', + 'pg_stat_statements_total_time_seconds': 'pg_stat_statements.total_time_seconds', + 'pg_status_in_recovery': 'pg_status.in_recovery', + 'pg_up': 'pg.up', + 'pg_wal_size_mb': 'pg_wal.size', + 'pgrst_db_pool_available': 'pgrst.db_pool.available_connections', + 'pgrst_db_pool_max': 'pgrst.db_pool.max_connections', + 'pgrst_db_pool_timeouts': 'pgrst.db_pool.connection_timeouts', + 'pgrst_db_pool_waiting': 'pgrst.db_pool.requests_waiting', + 'pgrst_schema_cache_loads': 'pgrst.schema_cache.loads', + 'pgrst_schema_cache_query_time_seconds': 'pgrst.schema_cache.query_time_seconds', + 'physical_replication_lag_is_connected_to_primary': 'physical_replication_lag.is_connected_to_primary', + 'physical_replication_lag_is_wal_replay_paused': 'physical_replication_lag.is_wal_replay_paused', + 'physical_replication_lag_physical_replication_lag_seconds': 'physical_replication_lag.seconds', + 'postgres_exporter_build_info': 'postgres_exporter.build_info', + 'postgres_exporter_config_last_reload_success_timestamp_seconds': 'postgres_exporter.config_last_reload_success_timestamp_seconds', # noqa: E501 + 'postgres_exporter_config_last_reload_successful': 'postgres_exporter.config_last_reload_successful', + 'postgresql_restarts': 'postgresql.restarts', + 'process_start_time_seconds': { + 'name': 'process.start_time.seconds', + 'type': 'time_elapsed', + }, + 'process_runtime_go_mem_live_objects': 'process.runtime.go_mem_live_objects', + 'promhttp_metric_handler_requests_in_flight': 'promhttp_metric_handler.requests_in_flight', + 'promhttp_metric_handler_requests': 'promhttp_metric_handler.requests', + 'realtime_postgres_changes_client_subscriptions': 'realtime_postgres_changes.client_subscriptions', + 'realtime_postgres_changes_total_subscriptions': 'realtime_postgres_changes.total_subscriptions', + 'replication_slots_max_lag_bytes': 'pg_replication_slots.max_lag_bytes', + 'runtime_uptime_milliseconds': {'name': 'runtime.uptime_milliseconds', 'type': 'time_elapsed'}, + 'storage_storage_size_mb': 'storage.storage_size', + 'supabase_usage_metrics_user_queries': 'usage_metrics.user_queries', +} + +STORAGE_API_METRICS = [ + { + 'storage_api_upload_started': 'upload_started', + 'storage_api_upload_success': 'upload_success', + 'storage_api_database_query_performance': 'database_query_performance', + 'storage_api_queue_job_scheduled': 'queue.job_scheduled', + 'storage_api_queue_job_scheduled_time': 'queue.job_scheduled_time', + 'storage_api_queue_job_completed': 'queue.job_completed', + 'storage_api_queue_job_retry_failed': 'queue.job_retry_failed', + 'storage_api_queue_job_error': 'queue.job_error', + 'storage_api_s3_upload_part': 's3_upload_part', + 'storage_api_db_pool': 'db_pool', + 'storage_api_db_connections': 'db_connections', + 'storage_api_http_pool_busy_sockets': 'http_pool.busy_sockets', + 'storage_api_http_pool_free_sockets': 'http_pool.free_sockets', + 'storage_api_http_pool_requests': 'http_pool.requests', + 'storage_api_http_pool_errors': 'http_pool.errors', + 'storage_api_http_request_summary_seconds': 'http_request.summary_seconds', + 'storage_api_http_request_duration_seconds': 'http_request.duration_seconds', + 'storage_api_process_cpu_seconds': 'process_cpu.seconds', + 'storage_api_process_cpu_system_seconds': 'process_cpu.system.seconds', + 'storage_api_process_cpu_user_seconds': 'process_cpu.user.seconds', + 'storage_api_process_start_time_seconds': { + 'name': 'process.uptime.seconds', + 'type': 'time_elapsed', + }, + 'storage_api_process_resident_memory_bytes': 'process.resident_memory.bytes', + 'storage_api_process_virtual_memory_bytes': 'process.virtual_memory.bytes', + 'storage_api_process_heap_bytes': 'process.heap_bytes', + 'storage_api_process_open_fds': 'process.open_fds', + 'storage_api_process_max_fds': 'process.max_fds', + 'storage_api_nodejs_eventloop_lag_seconds': 'nodejs.eventloop_lag.seconds', + 'storage_api_nodejs_eventloop_lag_min_seconds': 'nodejs_eventloop_lag.min_seconds', + 'storage_api_nodejs_eventloop_lag_max_seconds': 'nodejs.eventloop_lag.max_seconds', + 'storage_api_nodejs_eventloop_lag_mean_seconds': 'nodejs.eventloop_lag.mean_seconds', + 'storage_api_nodejs_eventloop_lag_stddev_seconds': 'nodejs.eventloop_lag.stddev_seconds', + 'storage_api_nodejs_eventloop_lag_p50_seconds': 'nodejs.eventloop_lag.p50_seconds', + 'storage_api_nodejs_eventloop_lag_p90_seconds': 'nodejs.eventloop_lag.p90_seconds', + 'storage_api_nodejs_eventloop_lag_p99_seconds': 'nodejs.eventloop_lag.p99_seconds', + 'storage_api_nodejs_active_resources': 'nodejs.active_resources', + 'storage_api_nodejs_active_resources_total': 'nodejs.active_resources.total', + 'storage_api_nodejs_active_handles': 'nodejs.active_handles', + 'storage_api_nodejs_active_handles_total': 'nodejs.active_handles.total', + 'storage_api_nodejs_active_requests': 'nodejs.active_requests', + 'storage_api_nodejs_active_requests_total': 'nodejs.active_requests.total', + 'storage_api_nodejs_gc_duration_seconds': 'nodejs.gc_duration.seconds', + 'storage_api_nodejs_heap_size_total_bytes': 'nodejs.heap_size.total_bytes', + 'storage_api_nodejs_heap_size_used_bytes': 'nodejs.heap_size.used_bytes', + 'storage_api_nodejs_external_memory_bytes': 'nodejs.external_memory.bytes', + 'storage_api_nodejs_heap_space_size_total_bytes': 'nodejs.heap_space_size.total_bytes', + 'storage_api_nodejs_heap_space_size_used_bytes': 'nodejs.heap_space_size.used_bytes', + 'storage_api_nodejs_heap_space_size_available_bytes': 'nodejs.heap_space_size.available_bytes', + 'storage_api_nodejs_version_info': 'nodejs.version_info', + } +] + +RENAME_LABELS_MAP = { + 'version': 'component_version', +} + +SUPABASE_METRICS = [{**DEFAULT_GO_METRICS, **PRIVELEGED_METRICS}] diff --git a/supabase/hatch.toml b/supabase/hatch.toml new file mode 100644 index 0000000000000..c85c5f07a7df2 --- /dev/null +++ b/supabase/hatch.toml @@ -0,0 +1,4 @@ +[env.collectors.datadog-checks] + +[[envs.default.matrix]] +python = ["3.12"] diff --git a/supabase/manifest.json b/supabase/manifest.json new file mode 100644 index 0000000000000..8f0168564f6d4 --- /dev/null +++ b/supabase/manifest.json @@ -0,0 +1,55 @@ +{ + "manifest_version": "2.0.0", + "app_uuid": "f22fec2a-ff0a-4380-8ddf-3348f1e7ff15", + "app_id": "supabase", + "display_on_public_website": false, + "tile": { + "overview": "README.md#Overview", + "configuration": "README.md#Setup", + "support": "README.md#Support", + "changelog": "CHANGELOG.md", + "description": "", + "title": "Supabase", + "media": [], + "classifier_tags": [ + "Supported OS::Linux", + "Supported OS::Windows", + "Supported OS::macOS", + "Category::Metrics", + "Category::Kubernetes", + "Category::Security", + "Submitted Data Type::Metrics", + "Offering::Integration" + ] + }, + "assets": { + "integration": { + "auto_install": true, + "source_type_id": 34976974, + "source_type_name": "Supabase", + "configuration": { + "spec": "assets/configuration/spec.yaml" + }, + "events": { + "creates_events": false + }, + "metrics": { + "prefix": "supabase.", + "check": "supabase.pg.up", + "metadata_path": "metadata.csv" + }, + "service_checks": { + "metadata_path": "assets/service_checks.json" + } + }, + "dashboards": { + "Supabase Overview": "assets/dashboards/supabase_overview.json" + } + }, + "author": { + "support_email": "help@datadoghq.com", + "name": "Datadog", + "homepage": "https://www.datadoghq.com", + "sales_email": "info@datadoghq.com" + } +} diff --git a/supabase/metadata.csv b/supabase/metadata.csv new file mode 100644 index 0000000000000..3743e4ad09e3f --- /dev/null +++ b/supabase/metadata.csv @@ -0,0 +1,268 @@ +metric_name,metric_type,interval,unit_name,per_unit_name,description,orientation,integration,short_name,curated_metric,sample_tags +supabase.auth_users.user_count,gauge,,,,,0,supabase,,, +supabase.db.sql.connection_closed_max_idle.count,count,,,,,0,supabase,,, +supabase.db.sql.connection_closed_max_idle_time.count,count,,,,,0,supabase,,, +supabase.db.sql.connection_closed_max_lifetime.count,count,,,,,0,supabase,,, +supabase.db.sql.connection_max_open,gauge,,,,,0,supabase,,, +supabase.db.sql.connection_open,gauge,,,,,0,supabase,,, +supabase.db.sql.connection_wait.count,count,,,,,0,supabase,,, +supabase.db.sql.connection_wait_duration.count,count,,,,,0,supabase,,, +supabase.db.transmit_bytes.count,count,,,,,0,supabase,,, +supabase.go.gc.duration.seconds.count,count,,,,,0,supabase,,, +supabase.go.gc.duration.seconds.quantile,gauge,,,,,0,supabase,,, +supabase.go.gc.duration.seconds.sum,count,,,,,0,supabase,,, +supabase.go.goroutines,gauge,,,,,0,supabase,,, +supabase.go.memstats.alloc_bytes,gauge,,,,,0,supabase,,, +supabase.go.memstats.alloc_bytes.count,count,,,,,0,supabase,,, +supabase.go.memstats.buck_hash.sys_bytes,gauge,,,,,0,supabase,,, +supabase.go.memstats.frees.count,count,,,,,0,supabase,,, +supabase.go.memstats.gc.sys_bytes,gauge,,,,,0,supabase,,, +supabase.go.memstats.heap.alloc_bytes,gauge,,,,,0,supabase,,, +supabase.go.memstats.heap.idle_bytes,gauge,,,,,0,supabase,,, +supabase.go.memstats.heap.inuse_bytes,gauge,,,,,0,supabase,,, +supabase.go.memstats.heap.objects,gauge,,,,,0,supabase,,, +supabase.go.memstats.heap.released_bytes,gauge,,,,,0,supabase,,, +supabase.go.memstats.heap.sys_bytes,gauge,,,,,0,supabase,,, +supabase.go.memstats.last_gc_time.seconds,gauge,,,,,0,supabase,,, +supabase.go.memstats.lookups.count,count,,,,,0,supabase,,, +supabase.go.memstats.mallocs.count,count,,,,,0,supabase,,, +supabase.go.memstats.mcache.inuse_bytes,gauge,,,,,0,supabase,,, +supabase.go.memstats.mcache.sys_bytes,gauge,,,,,0,supabase,,, +supabase.go.memstats.mspan.inuse_bytes,gauge,,,,,0,supabase,,, +supabase.go.memstats.mspan.sys_bytes,gauge,,,,,0,supabase,,, +supabase.go.memstats.next.gc_bytes,gauge,,,,,0,supabase,,, +supabase.go.memstats.other.sys_bytes,gauge,,,,,0,supabase,,, +supabase.go.memstats.stack.inuse_bytes,gauge,,,,,0,supabase,,, +supabase.go.memstats.stack.sys_bytes,gauge,,,,,0,supabase,,, +supabase.go.memstats.sys_bytes,gauge,,,,,0,supabase,,, +supabase.go.threads,gauge,,,,,0,supabase,,, +supabase.http.server.duration.bucket,count,,,,,0,supabase,,, +supabase.http.server.duration.count,count,,,,,0,supabase,,, +supabase.http.server.duration.sum,count,,,,,0,supabase,,, +supabase.http.server.request.size_bytes.count,count,,,,,0,supabase,,, +supabase.http.server.response.size_bytes.count,count,,,,,0,supabase,,, +supabase.http.status_codes.count,count,,,,,0,supabase,,, +supabase.node.cpu.guest_seconds.count,count,,,,,0,supabase,,, +supabase.node.cpu.seconds.count,count,,,,,0,supabase,,, +supabase.node.disk.discard_time_seconds.count,count,,,,,0,supabase,,, +supabase.node.disk.discarded_sectors.count,count,,,,,0,supabase,,, +supabase.node.disk.discards_completed.count,count,,,,,0,supabase,,, +supabase.node.disk.discards_merged.count,count,,,,,0,supabase,,, +supabase.node.disk.flush_requests.count,count,,,,,0,supabase,,, +supabase.node.disk.flush_requests_time_seconds.count,count,,,,,0,supabase,,, +supabase.node.disk.io_now,gauge,,,,,0,supabase,,, +supabase.node.disk.io_time_seconds.count,count,,,,,0,supabase,,, +supabase.node.disk.io_time_weighted_seconds.count,count,,,,,0,supabase,,, +supabase.node.disk.read_bytes.count,count,,,,,0,supabase,,, +supabase.node.disk.read_time_seconds.count,count,,,,,0,supabase,,, +supabase.node.disk.reads_completed.count,count,,,,,0,supabase,,, +supabase.node.disk.reads_merged.count,count,,,,,0,supabase,,, +supabase.node.disk.write_time_seconds.count,count,,,,,0,supabase,,, +supabase.node.disk.writes_completed.count,count,,,,,0,supabase,,, +supabase.node.disk.writes_merged.count,count,,,,,0,supabase,,, +supabase.node.disk.written_bytes.count,count,,,,,0,supabase,,, +supabase.node.filesystem.available_bytes,gauge,,,,,0,supabase,,, +supabase.node.filesystem.device_error,gauge,,,,,0,supabase,,, +supabase.node.filesystem.files,gauge,,,,,0,supabase,,, +supabase.node.filesystem.files_free,gauge,,,,,0,supabase,,, +supabase.node.filesystem.free_bytes,gauge,,,,,0,supabase,,, +supabase.node.filesystem.readonly,gauge,,,,,0,supabase,,, +supabase.node.filesystem.size_bytes,gauge,,,,,0,supabase,,, +supabase.node.load1,gauge,,,,,0,supabase,,, +supabase.node.load15,gauge,,,,,0,supabase,,, +supabase.node.load5,gauge,,,,,0,supabase,,, +supabase.node.memory.active_anon_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.active_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.active_file_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.anon_huge_pages_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.anon_pages_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.bounce_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.buffers_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.cached_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.commit_limit_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.committed_as_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.dirty_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.file_huge_pages_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.file_pmd_mapped_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.hardware_corrupted_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.huge_page_size_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.huge_pages_free,gauge,,,,,0,supabase,,, +supabase.node.memory.huge_pages_reserved,gauge,,,,,0,supabase,,, +supabase.node.memory.huge_pages_surp,gauge,,,,,0,supabase,,, +supabase.node.memory.huge_pages_total,gauge,,,,,0,supabase,,, +supabase.node.memory.hugetlb_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.inactive_anon_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.inactive_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.inactive_file_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.kernel_stack_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.kreclaimable_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.mapped_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.mem_available_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.mem_free_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.mem_total_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.mlocked_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.nfs_unstable_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.page_tables_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.percpu_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.shmem_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.shmem_huge_pages_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.shmem_pmd_mapped_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.slab_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.sreclaimable_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.sunreclaim_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.swap_cached_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.swap_free_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.swap_total_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.unevictable_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.vm_alloc_chunk_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.vm_alloc_total_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.vm_alloc_used_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.writeback_bytes,gauge,,,,,0,supabase,,, +supabase.node.memory.writeback_tmp_bytes,gauge,,,,,0,supabase,,, +supabase.node.network.receive_bytes.count,count,,,,,0,supabase,,, +supabase.node.network.receive_compressed.count,count,,,,,0,supabase,,, +supabase.node.network.receive_drop.count,count,,,,,0,supabase,,, +supabase.node.network.receive_errors.count,count,,,,,0,supabase,,, +supabase.node.network.receive_fifo.count,count,,,,,0,supabase,,, +supabase.node.network.receive_frame.count,count,,,,,0,supabase,,, +supabase.node.network.receive_multicast.count,count,,,,,0,supabase,,, +supabase.node.network.receive_packets.count,count,,,,,0,supabase,,, +supabase.node.network.transmit_bytes.count,count,,,,,0,supabase,,, +supabase.node.network.transmit_carrier.count,count,,,,,0,supabase,,, +supabase.node.network.transmit_colls.count,count,,,,,0,supabase,,, +supabase.node.network.transmit_compressed.count,count,,,,,0,supabase,,, +supabase.node.network.transmit_drop.count,count,,,,,0,supabase,,, +supabase.node.network.transmit_errors.count,count,,,,,0,supabase,,, +supabase.node.network.transmit_fifo.count,count,,,,,0,supabase,,, +supabase.node.network.transmit_packets.count,count,,,,,0,supabase,,, +supabase.node.scrape.collector_duration_seconds,gauge,,,,,0,supabase,,, +supabase.node.scrape.collector_success,gauge,,,,,0,supabase,,, +supabase.node.vmstat.oom_kill.count,count,,,,,0,supabase,,, +supabase.node.vmstat.pgfault.count,count,,,,,0,supabase,,, +supabase.node.vmstat.pgmajfault.count,count,,,,,0,supabase,,, +supabase.node.vmstat.pgpgin.count,count,,,,,0,supabase,,, +supabase.node.vmstat.pgpgout.count,count,,,,,0,supabase,,, +supabase.node.vmstat.pswpin.count,count,,,,,0,supabase,,, +supabase.node.vmstat.pswpout.count,count,,,,,0,supabase,,, +supabase.pg.up,gauge,,,,,0,supabase,,, +supabase.pg_database_size.bytes,gauge,,,,,0,supabase,,, +supabase.pg_database_size.mb,gauge,,,,,0,supabase,,, +supabase.pg_exporter.last_scrape_duration_seconds,gauge,,,,,0,supabase,,, +supabase.pg_exporter.last_scrape_error,gauge,,,,,0,supabase,,, +supabase.pg_exporter.scrapes.count,count,,,,,0,supabase,,, +supabase.pg_exporter.user_queries_load_error,gauge,,,,,0,supabase,,, +supabase.pg_ls.archive_statusdir_wal_pending_count.count,count,,,,,0,supabase,,, +supabase.pg_replication_slots.max_lag_bytes,gauge,,,,,0,supabase,,, +supabase.pg_scrape_collector.duration_seconds,gauge,,,,,0,supabase,,, +supabase.pg_scrape_collector.success,gauge,,,,,0,supabase,,, +supabase.pg_settings.default_transaction_read_only,gauge,,,,,0,supabase,,, +supabase.pg_stat_activity.xact_runtime,gauge,,,,,0,supabase,,, +supabase.pg_stat_bgwriter.buffers_alloc.count,count,,,,,0,supabase,,, +supabase.pg_stat_bgwriter.buffers_backend.count,count,,,,,0,supabase,,, +supabase.pg_stat_bgwriter.buffers_backend_fsync.count,count,,,,,0,supabase,,, +supabase.pg_stat_bgwriter.buffers_checkpoint.count,count,,,,,0,supabase,,, +supabase.pg_stat_bgwriter.buffers_clean.count,count,,,,,0,supabase,,, +supabase.pg_stat_bgwriter.checkpoint_sync_time.count,count,,,,,0,supabase,,, +supabase.pg_stat_bgwriter.checkpoint_write_time.count,count,,,,,0,supabase,,, +supabase.pg_stat_bgwriter.checkpoints_req.count,count,,,,,0,supabase,,, +supabase.pg_stat_bgwriter.checkpoints_timed.count,count,,,,,0,supabase,,, +supabase.pg_stat_bgwriter.maxwritten_clean.count,count,,,,,0,supabase,,, +supabase.pg_stat_bgwriter.stats_reset.count,count,,,,,0,supabase,,, +supabase.pg_stat_database.blks_hit.count,count,,,,,0,supabase,,, +supabase.pg_stat_database.blks_read.count,count,,,,,0,supabase,,, +supabase.pg_stat_database.conflicts.count,count,,,,,0,supabase,,, +supabase.pg_stat_database.deadlocks.count,count,,,,,0,supabase,,, +supabase.pg_stat_database.num_backends,gauge,,,,,0,supabase,,, +supabase.pg_stat_database.temp_bytes.count,count,,,,,0,supabase,,, +supabase.pg_stat_database.temp_files.count,count,,,,,0,supabase,,, +supabase.pg_stat_database.tup_deleted.count,count,,,,,0,supabase,,, +supabase.pg_stat_database.tup_fetched.count,count,,,,,0,supabase,,, +supabase.pg_stat_database.tup_inserted.count,count,,,,,0,supabase,,, +supabase.pg_stat_database.tup_returned.count,count,,,,,0,supabase,,, +supabase.pg_stat_database.tup_updated.count,count,,,,,0,supabase,,, +supabase.pg_stat_database.xact_commit.count,count,,,,,0,supabase,,, +supabase.pg_stat_database.xact_rollback.count,count,,,,,0,supabase,,, +supabase.pg_stat_database_conflicts.confl_bufferpin.count,count,,,,,0,supabase,,, +supabase.pg_stat_database_conflicts.confl_deadlock.count,count,,,,,0,supabase,,, +supabase.pg_stat_database_conflicts.confl_lock.count,count,,,,,0,supabase,,, +supabase.pg_stat_database_conflicts.confl_snapshot.count,count,,,,,0,supabase,,, +supabase.pg_stat_database_conflicts.confl_tablespace.count,count,,,,,0,supabase,,, +supabase.pg_stat_statements.total_queries.count,count,,,,,0,supabase,,, +supabase.pg_stat_statements.total_time_seconds.count,count,,,,,0,supabase,,, +supabase.pg_status.in_recovery,gauge,,,,,0,supabase,,, +supabase.pg_wal.size,gauge,,,,,0,supabase,,, +supabase.pgrst.db_pool.available_connections,gauge,,,,,0,supabase,,, +supabase.pgrst.db_pool.connection_timeouts.count,count,,,,,0,supabase,,, +supabase.pgrst.db_pool.max_connections,gauge,,,,,0,supabase,,, +supabase.pgrst.db_pool.requests_waiting,gauge,,,,,0,supabase,,, +supabase.pgrst.schema_cache.loads.count,count,,,,,0,supabase,,, +supabase.pgrst.schema_cache.query_time_seconds,gauge,,,,,0,supabase,,, +supabase.physical_replication_lag.is_connected_to_primary,gauge,,,,,0,supabase,,, +supabase.physical_replication_lag.is_wal_replay_paused,gauge,,,,,0,supabase,,, +supabase.physical_replication_lag.seconds,gauge,,,,,0,supabase,,, +supabase.postgres_exporter.build_info,gauge,,,,,0,supabase,,, +supabase.postgres_exporter.config_last_reload_success_timestamp_seconds,gauge,,,,,0,supabase,,, +supabase.postgres_exporter.config_last_reload_successful,gauge,,,,,0,supabase,,, +supabase.postgresql.restarts.count,count,,,,,0,supabase,,, +supabase.process.cpu.seconds.count,count,,,,,0,supabase,,, +supabase.process.max_fds,gauge,,,,,0,supabase,,, +supabase.process.open_fds,gauge,,,,,0,supabase,,, +supabase.process.resident_memory.bytes,gauge,,,,,0,supabase,,, +supabase.process.runtime.go_mem_live_objects,gauge,,,,,0,supabase,,, +supabase.process.start_time.seconds,gauge,,,,,0,supabase,,, +supabase.process.virtual_memory.bytes,gauge,,,,,0,supabase,,, +supabase.process.virtual_memory.max_bytes,gauge,,,,,0,supabase,,, +supabase.promhttp_metric_handler.requests.count,count,,,,,0,supabase,,, +supabase.promhttp_metric_handler.requests_in_flight,gauge,,,,,0,supabase,,, +supabase.realtime_postgres_changes.client_subscriptions,gauge,,,,,0,supabase,,, +supabase.realtime_postgres_changes.total_subscriptions,gauge,,,,,0,supabase,,, +supabase.runtime.uptime_milliseconds,gauge,,,,,0,supabase,,, +supabase.storage.storage_size,gauge,,,,,0,supabase,,, +supabase.storage_api.database_query_performance.bucket,count,,,,,0,supabase,,, +supabase.storage_api.database_query_performance.count,count,,,,,0,supabase,,, +supabase.storage_api.database_query_performance.sum,count,,,,,0,supabase,,, +supabase.storage_api.db_connections,gauge,,,,,0,supabase,,, +supabase.storage_api.db_pool,gauge,,,,,0,supabase,,, +supabase.storage_api.http_pool.busy_sockets,gauge,,,,,0,supabase,,, +supabase.storage_api.http_pool.errors,gauge,,,,,0,supabase,,, +supabase.storage_api.http_pool.free_sockets,gauge,,,,,0,supabase,,, +supabase.storage_api.http_pool.requests,gauge,,,,,0,supabase,,, +supabase.storage_api.http_request.duration_seconds.bucket,count,,,,,0,supabase,,, +supabase.storage_api.http_request.duration_seconds.count,count,,,,,0,supabase,,, +supabase.storage_api.http_request.duration_seconds.sum,count,,,,,0,supabase,,, +supabase.storage_api.http_request.summary_seconds.count,count,,,,,0,supabase,,, +supabase.storage_api.http_request.summary_seconds.quantile,gauge,,,,,0,supabase,,, +supabase.storage_api.http_request.summary_seconds.sum,count,,,,,0,supabase,,, +supabase.storage_api.nodejs.active_handles,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs.active_handles.total,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs.active_requests.total,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs.active_resources,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs.active_resources.total,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs.eventloop_lag.max_seconds,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs.eventloop_lag.mean_seconds,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs.eventloop_lag.p50_seconds,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs.eventloop_lag.p90_seconds,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs.eventloop_lag.p99_seconds,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs.eventloop_lag.seconds,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs.eventloop_lag.stddev_seconds,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs.external_memory.bytes,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs.gc_duration.seconds.bucket,count,,,,,0,supabase,,, +supabase.storage_api.nodejs.gc_duration.seconds.count,count,,,,,0,supabase,,, +supabase.storage_api.nodejs.gc_duration.seconds.sum,count,,,,,0,supabase,,, +supabase.storage_api.nodejs.heap_size.total_bytes,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs.heap_size.used_bytes,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs.heap_space_size.available_bytes,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs.heap_space_size.total_bytes,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs.heap_space_size.used_bytes,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs.version_info,gauge,,,,,0,supabase,,, +supabase.storage_api.nodejs_eventloop_lag.min_seconds,gauge,,,,,0,supabase,,, +supabase.storage_api.process.heap_bytes,gauge,,,,,0,supabase,,, +supabase.storage_api.process.max_fds,gauge,,,,,0,supabase,,, +supabase.storage_api.process.open_fds,gauge,,,,,0,supabase,,, +supabase.storage_api.process.resident_memory.bytes,gauge,,,,,0,supabase,,, +supabase.storage_api.process.uptime.seconds,gauge,,,,,0,supabase,,, +supabase.storage_api.process.virtual_memory.bytes,gauge,,,,,0,supabase,,, +supabase.storage_api.process_cpu.seconds.count,count,,,,,0,supabase,,, +supabase.storage_api.process_cpu.system.seconds.count,count,,,,,0,supabase,,, +supabase.storage_api.process_cpu.user.seconds.count,count,,,,,0,supabase,,, +supabase.usage_metrics.user_queries.count,count,,,,,0,supabase,,, diff --git a/supabase/pyproject.toml b/supabase/pyproject.toml new file mode 100644 index 0000000000000..de884876b82f1 --- /dev/null +++ b/supabase/pyproject.toml @@ -0,0 +1,60 @@ +[build-system] +requires = [ + "hatchling>=0.13.0", +] +build-backend = "hatchling.build" + +[project] +name = "datadog-supabase" +description = "The supabase check" +readme = "README.md" +license = "BSD-3-Clause" +requires-python = ">=3.12" +keywords = [ + "datadog", + "datadog agent", + "datadog check", + "supabase", +] +authors = [ + { name = "Datadog", email = "packages@datadoghq.com" }, +] +classifiers = [ + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "Intended Audience :: System Administrators", + "License :: OSI Approved :: BSD License", + "Private :: Do Not Upload", + "Programming Language :: Python :: 3.12", + "Topic :: System :: Monitoring", +] +dependencies = [ + "datadog-checks-base>=37.0.0", +] +dynamic = [ + "version", +] + +[project.optional-dependencies] +deps = [] + +[project.urls] +Source = "https://github.com/DataDog/integrations-core" + +[tool.hatch.version] +path = "datadog_checks/supabase/__about__.py" + +[tool.hatch.build.targets.sdist] +include = [ + "/datadog_checks", + "/tests", + "/manifest.json", +] + +[tool.hatch.build.targets.wheel] +include = [ + "/datadog_checks/supabase", +] +dev-mode-dirs = [ + ".", +] diff --git a/supabase/tests/__init__.py b/supabase/tests/__init__.py new file mode 100644 index 0000000000000..9103122bf028d --- /dev/null +++ b/supabase/tests/__init__.py @@ -0,0 +1,3 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) diff --git a/supabase/tests/common.py b/supabase/tests/common.py new file mode 100644 index 0000000000000..502578ef204bb --- /dev/null +++ b/supabase/tests/common.py @@ -0,0 +1,315 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +import os + +from datadog_checks.dev import get_docker_hostname, get_here + +HERE = get_here() +HOST = get_docker_hostname() +PRIVELEGED_METRICS_PORT = 8000 +STORAGE_API_PORT = 9000 + + +def get_fixture_path(filename): + return os.path.join(HERE, 'fixtures', filename) + + +MOCKED_INSTANCE = { + "privileged_metrics_endpoint": f"http://{HOST}:{PRIVELEGED_METRICS_PORT}/metrics", + "storage_api_endpoint": f"http://{HOST}:{STORAGE_API_PORT}/metrics", + "tags": ['test:test'], +} + +COMPOSE_FILE = os.path.join(HERE, 'docker', 'docker-compose.yaml') + +PRIVILEGED_METRICS_INSTANCE = { + "privileged_metrics_endpoint": f"http://{HOST}:{PRIVELEGED_METRICS_PORT}/metrics", + "tags": ['test:test'], +} + +STORAGE_API_INSTANCE = { + "storage_api_endpoint": f"http://{HOST}:{STORAGE_API_PORT}/metrics", + "tags": ['test:test'], +} + +COMPOSE_FILE = os.path.join(HERE, 'docker', 'docker-compose.yaml') + +( + PRIVILEGED_METRICS_NAMESPACE, + STORAGE_API_METRICS_NAMESPACE, +) = [ + 'supabase', + 'supabase.storage_api', +] + +PRIVILEGED_METRICS = [ + 'supabase.auth_users.user_count', + 'supabase.db.sql.connection_closed_max_idle.count', + 'supabase.db.sql.connection_closed_max_idle_time.count', + 'supabase.db.sql.connection_closed_max_lifetime.count', + 'supabase.db.sql.connection_max_open', + 'supabase.db.sql.connection_open', + 'supabase.db.sql.connection_wait.count', + 'supabase.db.sql.connection_wait_duration.count', + 'supabase.db.transmit_bytes.count', + 'supabase.go.gc.duration.seconds.count', + 'supabase.go.gc.duration.seconds.quantile', + 'supabase.go.gc.duration.seconds.sum', + 'supabase.go.goroutines', + 'supabase.go.memstats.alloc_bytes', + 'supabase.go.memstats.alloc_bytes.count', + 'supabase.go.memstats.buck_hash.sys_bytes', + 'supabase.go.memstats.frees.count', + 'supabase.go.memstats.gc.sys_bytes', + 'supabase.go.memstats.heap.alloc_bytes', + 'supabase.go.memstats.heap.idle_bytes', + 'supabase.go.memstats.heap.inuse_bytes', + 'supabase.go.memstats.heap.objects', + 'supabase.go.memstats.heap.released_bytes', + 'supabase.go.memstats.heap.sys_bytes', + 'supabase.go.memstats.last_gc_time.seconds', + 'supabase.go.memstats.lookups.count', + 'supabase.go.memstats.mallocs.count', + 'supabase.go.memstats.mcache.inuse_bytes', + 'supabase.go.memstats.mcache.sys_bytes', + 'supabase.go.memstats.mspan.inuse_bytes', + 'supabase.go.memstats.mspan.sys_bytes', + 'supabase.go.memstats.next.gc_bytes', + 'supabase.go.memstats.other.sys_bytes', + 'supabase.go.memstats.stack.inuse_bytes', + 'supabase.go.memstats.stack.sys_bytes', + 'supabase.go.memstats.sys_bytes', + 'supabase.go.threads', + 'supabase.http.server.duration.bucket', + 'supabase.http.server.duration.count', + 'supabase.http.server.duration.sum', + 'supabase.http.server.request.size_bytes.count', + 'supabase.http.server.response.size_bytes.count', + 'supabase.http.status_codes.count', + 'supabase.node.cpu.guest_seconds.count', + 'supabase.node.cpu.seconds.count', + 'supabase.node.disk.discard_time_seconds.count', + 'supabase.node.disk.discarded_sectors.count', + 'supabase.node.disk.discards_completed.count', + 'supabase.node.disk.discards_merged.count', + 'supabase.node.disk.flush_requests.count', + 'supabase.node.disk.flush_requests_time_seconds.count', + 'supabase.node.disk.io_now', + 'supabase.node.disk.io_time_seconds.count', + 'supabase.node.disk.io_time_weighted_seconds.count', + 'supabase.node.disk.read_bytes.count', + 'supabase.node.disk.read_time_seconds.count', + 'supabase.node.disk.reads_completed.count', + 'supabase.node.disk.reads_merged.count', + 'supabase.node.disk.write_time_seconds.count', + 'supabase.node.disk.writes_completed.count', + 'supabase.node.disk.writes_merged.count', + 'supabase.node.disk.written_bytes.count', + 'supabase.node.filesystem.available_bytes', + 'supabase.node.filesystem.device_error', + 'supabase.node.filesystem.files', + 'supabase.node.filesystem.files_free', + 'supabase.node.filesystem.free_bytes', + 'supabase.node.filesystem.readonly', + 'supabase.node.filesystem.size_bytes', + 'supabase.node.load1', + 'supabase.node.load15', + 'supabase.node.load5', + 'supabase.node.memory.active_anon_bytes', + 'supabase.node.memory.active_bytes', + 'supabase.node.memory.active_file_bytes', + 'supabase.node.memory.anon_huge_pages_bytes', + 'supabase.node.memory.anon_pages_bytes', + 'supabase.node.memory.bounce_bytes', + 'supabase.node.memory.buffers_bytes', + 'supabase.node.memory.cached_bytes', + 'supabase.node.memory.commit_limit_bytes', + 'supabase.node.memory.committed_as_bytes', + 'supabase.node.memory.dirty_bytes', + 'supabase.node.memory.file_huge_pages_bytes', + 'supabase.node.memory.file_pmd_mapped_bytes', + 'supabase.node.memory.hardware_corrupted_bytes', + 'supabase.node.memory.huge_page_size_bytes', + 'supabase.node.memory.huge_pages_free', + 'supabase.node.memory.huge_pages_reserved', + 'supabase.node.memory.huge_pages_surp', + 'supabase.node.memory.huge_pages_total', + 'supabase.node.memory.hugetlb_bytes', + 'supabase.node.memory.inactive_anon_bytes', + 'supabase.node.memory.inactive_bytes', + 'supabase.node.memory.inactive_file_bytes', + 'supabase.node.memory.kernel_stack_bytes', + 'supabase.node.memory.kreclaimable_bytes', + 'supabase.node.memory.mapped_bytes', + 'supabase.node.memory.mem_available_bytes', + 'supabase.node.memory.mem_free_bytes', + 'supabase.node.memory.mem_total_bytes', + 'supabase.node.memory.mlocked_bytes', + 'supabase.node.memory.nfs_unstable_bytes', + 'supabase.node.memory.page_tables_bytes', + 'supabase.node.memory.percpu_bytes', + 'supabase.node.memory.shmem_bytes', + 'supabase.node.memory.shmem_huge_pages_bytes', + 'supabase.node.memory.shmem_pmd_mapped_bytes', + 'supabase.node.memory.slab_bytes', + 'supabase.node.memory.sreclaimable_bytes', + 'supabase.node.memory.sunreclaim_bytes', + 'supabase.node.memory.swap_cached_bytes', + 'supabase.node.memory.swap_free_bytes', + 'supabase.node.memory.swap_total_bytes', + 'supabase.node.memory.unevictable_bytes', + 'supabase.node.memory.vm_alloc_chunk_bytes', + 'supabase.node.memory.vm_alloc_total_bytes', + 'supabase.node.memory.vm_alloc_used_bytes', + 'supabase.node.memory.writeback_bytes', + 'supabase.node.memory.writeback_tmp_bytes', + 'supabase.node.network.receive_bytes.count', + 'supabase.node.network.receive_compressed.count', + 'supabase.node.network.receive_drop.count', + 'supabase.node.network.receive_errors.count', + 'supabase.node.network.receive_fifo.count', + 'supabase.node.network.receive_frame.count', + 'supabase.node.network.receive_multicast.count', + 'supabase.node.network.receive_packets.count', + 'supabase.node.network.transmit_bytes.count', + 'supabase.node.network.transmit_carrier.count', + 'supabase.node.network.transmit_colls.count', + 'supabase.node.network.transmit_compressed.count', + 'supabase.node.network.transmit_drop.count', + 'supabase.node.network.transmit_errors.count', + 'supabase.node.network.transmit_fifo.count', + 'supabase.node.network.transmit_packets.count', + 'supabase.node.scrape.collector_duration_seconds', + 'supabase.node.scrape.collector_success', + 'supabase.node.vmstat.oom_kill.count', + 'supabase.node.vmstat.pgfault.count', + 'supabase.node.vmstat.pgmajfault.count', + 'supabase.node.vmstat.pgpgin.count', + 'supabase.node.vmstat.pgpgout.count', + 'supabase.node.vmstat.pswpin.count', + 'supabase.node.vmstat.pswpout.count', + 'supabase.pg.up', + 'supabase.pg_database_size.bytes', + 'supabase.pg_database_size.mb', + 'supabase.pg_exporter.last_scrape_duration_seconds', + 'supabase.pg_exporter.last_scrape_error', + 'supabase.pg_exporter.scrapes.count', + 'supabase.pg_exporter.user_queries_load_error', + 'supabase.pg_ls.archive_statusdir_wal_pending_count.count', + 'supabase.pg_replication_slots.max_lag_bytes', + 'supabase.pg_scrape_collector.duration_seconds', + 'supabase.pg_scrape_collector.success', + 'supabase.pg_settings.default_transaction_read_only', + 'supabase.pg_stat_activity.xact_runtime', + 'supabase.pg_stat_bgwriter.buffers_alloc.count', + 'supabase.pg_stat_bgwriter.buffers_backend.count', + 'supabase.pg_stat_bgwriter.buffers_backend_fsync.count', + 'supabase.pg_stat_bgwriter.buffers_checkpoint.count', + 'supabase.pg_stat_bgwriter.buffers_clean.count', + 'supabase.pg_stat_bgwriter.checkpoint_sync_time.count', + 'supabase.pg_stat_bgwriter.checkpoint_write_time.count', + 'supabase.pg_stat_bgwriter.checkpoints_req.count', + 'supabase.pg_stat_bgwriter.checkpoints_timed.count', + 'supabase.pg_stat_bgwriter.maxwritten_clean.count', + 'supabase.pg_stat_bgwriter.stats_reset.count', + 'supabase.pg_stat_database.blks_hit.count', + 'supabase.pg_stat_database.blks_read.count', + 'supabase.pg_stat_database.conflicts.count', + 'supabase.pg_stat_database.deadlocks.count', + 'supabase.pg_stat_database.num_backends', + 'supabase.pg_stat_database.temp_bytes.count', + 'supabase.pg_stat_database.temp_files.count', + 'supabase.pg_stat_database.tup_deleted.count', + 'supabase.pg_stat_database.tup_fetched.count', + 'supabase.pg_stat_database.tup_inserted.count', + 'supabase.pg_stat_database.tup_returned.count', + 'supabase.pg_stat_database.tup_updated.count', + 'supabase.pg_stat_database.xact_commit.count', + 'supabase.pg_stat_database.xact_rollback.count', + 'supabase.pg_stat_database_conflicts.confl_bufferpin.count', + 'supabase.pg_stat_database_conflicts.confl_deadlock.count', + 'supabase.pg_stat_database_conflicts.confl_lock.count', + 'supabase.pg_stat_database_conflicts.confl_snapshot.count', + 'supabase.pg_stat_database_conflicts.confl_tablespace.count', + 'supabase.pg_stat_statements.total_queries.count', + 'supabase.pg_stat_statements.total_time_seconds.count', + 'supabase.pg_status.in_recovery', + 'supabase.pg_wal.size', + 'supabase.pgrst.db_pool.available_connections', + 'supabase.pgrst.db_pool.connection_timeouts.count', + 'supabase.pgrst.db_pool.max_connections', + 'supabase.pgrst.db_pool.requests_waiting', + 'supabase.pgrst.schema_cache.loads.count', + 'supabase.pgrst.schema_cache.query_time_seconds', + 'supabase.physical_replication_lag.is_connected_to_primary', + 'supabase.physical_replication_lag.is_wal_replay_paused', + 'supabase.physical_replication_lag.seconds', + 'supabase.postgres_exporter.build_info', + 'supabase.postgres_exporter.config_last_reload_success_timestamp_seconds', + 'supabase.postgres_exporter.config_last_reload_successful', + 'supabase.postgresql.restarts.count', + 'supabase.process.cpu.seconds.count', + 'supabase.process.max_fds', + 'supabase.process.open_fds', + 'supabase.process.resident_memory.bytes', + 'supabase.process.runtime.go_mem_live_objects', + 'supabase.process.start_time.seconds', + 'supabase.process.virtual_memory.bytes', + 'supabase.process.virtual_memory.max_bytes', + 'supabase.promhttp_metric_handler.requests.count', + 'supabase.promhttp_metric_handler.requests_in_flight', + 'supabase.realtime_postgres_changes.client_subscriptions', + 'supabase.realtime_postgres_changes.total_subscriptions', + 'supabase.runtime.uptime_milliseconds', + 'supabase.usage_metrics.user_queries.count', +] +STORAGE_API_METRICS = [ + 'supabase.storage_api.database_query_performance.bucket', + 'supabase.storage_api.database_query_performance.count', + 'supabase.storage_api.database_query_performance.sum', + 'supabase.storage_api.db_connections', + 'supabase.storage_api.db_pool', + 'supabase.storage_api.http_pool.busy_sockets', + 'supabase.storage_api.http_pool.errors', + 'supabase.storage_api.http_pool.free_sockets', + 'supabase.storage_api.http_pool.requests', + 'supabase.storage_api.http_request.duration_seconds.bucket', + 'supabase.storage_api.http_request.duration_seconds.count', + 'supabase.storage_api.http_request.duration_seconds.sum', + 'supabase.storage_api.http_request.summary_seconds.count', + 'supabase.storage_api.http_request.summary_seconds.quantile', + 'supabase.storage_api.http_request.summary_seconds.sum', + 'supabase.storage_api.nodejs.active_handles', + 'supabase.storage_api.nodejs.active_handles.total', + 'supabase.storage_api.nodejs.active_requests.total', + 'supabase.storage_api.nodejs.active_resources', + 'supabase.storage_api.nodejs.active_resources.total', + 'supabase.storage_api.nodejs.eventloop_lag.max_seconds', + 'supabase.storage_api.nodejs.eventloop_lag.mean_seconds', + 'supabase.storage_api.nodejs.eventloop_lag.p50_seconds', + 'supabase.storage_api.nodejs.eventloop_lag.p90_seconds', + 'supabase.storage_api.nodejs.eventloop_lag.p99_seconds', + 'supabase.storage_api.nodejs.eventloop_lag.seconds', + 'supabase.storage_api.nodejs.eventloop_lag.stddev_seconds', + 'supabase.storage_api.nodejs.external_memory.bytes', + 'supabase.storage_api.nodejs.gc_duration.seconds.bucket', + 'supabase.storage_api.nodejs.gc_duration.seconds.count', + 'supabase.storage_api.nodejs.gc_duration.seconds.sum', + 'supabase.storage_api.nodejs.heap_size.total_bytes', + 'supabase.storage_api.nodejs.heap_size.used_bytes', + 'supabase.storage_api.nodejs.heap_space_size.available_bytes', + 'supabase.storage_api.nodejs.heap_space_size.total_bytes', + 'supabase.storage_api.nodejs.heap_space_size.used_bytes', + 'supabase.storage_api.nodejs.version_info', + 'supabase.storage_api.nodejs_eventloop_lag.min_seconds', + 'supabase.storage_api.process.heap_bytes', + 'supabase.storage_api.process.max_fds', + 'supabase.storage_api.process.open_fds', + 'supabase.storage_api.process.resident_memory.bytes', + 'supabase.storage_api.process.uptime.seconds', + 'supabase.storage_api.process.virtual_memory.bytes', + 'supabase.storage_api.process_cpu.system.seconds.count', + 'supabase.storage_api.process_cpu.user.seconds.count', + 'supabase.storage_api.process_cpu.seconds.count', +] diff --git a/supabase/tests/conftest.py b/supabase/tests/conftest.py new file mode 100644 index 0000000000000..ccaed8fc65b94 --- /dev/null +++ b/supabase/tests/conftest.py @@ -0,0 +1,30 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +import copy + +import pytest + +from datadog_checks.dev import docker_run +from datadog_checks.dev.conditions import CheckDockerLogs, CheckEndpoints + +from .common import COMPOSE_FILE, MOCKED_INSTANCE + + +@pytest.fixture(scope='session') +def dd_environment(): + compose_file = COMPOSE_FILE + conditions = [ + CheckDockerLogs(identifier='caddy', patterns=['server running']), + CheckEndpoints(MOCKED_INSTANCE["privileged_metrics_endpoint"]), + CheckEndpoints(MOCKED_INSTANCE["storage_api_endpoint"]), + ] + with docker_run(compose_file, conditions=conditions): + yield { + 'instances': [MOCKED_INSTANCE], + } + + +@pytest.fixture +def instance(): + return copy.deepcopy(MOCKED_INSTANCE) diff --git a/supabase/tests/docker/Caddyfile b/supabase/tests/docker/Caddyfile new file mode 100644 index 0000000000000..bb31bb8c0e71e --- /dev/null +++ b/supabase/tests/docker/Caddyfile @@ -0,0 +1,15 @@ +:8000 { + handle_path /metrics { + root * /usr/share/caddy + rewrite * /privileged_metrics + file_server + } +} + +:9000 { + handle_path /metrics { + root * /usr/share/caddy + rewrite * /storage_api_metrics + file_server + } +} diff --git a/supabase/tests/docker/docker-compose.yaml b/supabase/tests/docker/docker-compose.yaml new file mode 100644 index 0000000000000..8b0b2e5a5cb36 --- /dev/null +++ b/supabase/tests/docker/docker-compose.yaml @@ -0,0 +1,12 @@ +version: "3.9" +services: + caddy: + image: caddy:2 + container_name: caddy + ports: + - "8000:8000" + - "9000:9000" + volumes: + - ./Caddyfile:/etc/caddy/Caddyfile + - ../fixtures/privileged_metrics.txt:/usr/share/caddy/privileged_metrics + - ../fixtures/storage_api_metrics.txt:/usr/share/caddy/storage_api_metrics \ No newline at end of file diff --git a/supabase/tests/fixtures/privileged_metrics.txt b/supabase/tests/fixtures/privileged_metrics.txt new file mode 100644 index 0000000000000..3499a28ff89a1 --- /dev/null +++ b/supabase/tests/fixtures/privileged_metrics.txt @@ -0,0 +1,924 @@ +# HELP node_memory_Inactive_file_bytes Memory information field Inactive_file_bytes. +# TYPE node_memory_Inactive_file_bytes gauge +node_memory_Inactive_file_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 7.9081472e+07 +# HELP node_memory_SwapFree_bytes Memory information field SwapFree_bytes. +# TYPE node_memory_SwapFree_bytes gauge +node_memory_SwapFree_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 9.3165568e+08 +# HELP node_vmstat_oom_kill /proc/vmstat information field oom_kill. +# TYPE node_vmstat_oom_kill untyped +node_vmstat_oom_kill{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_load15 15m load average. +# TYPE node_load15 gauge +node_load15{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_memory_Unevictable_bytes Memory information field Unevictable_bytes. +# TYPE node_memory_Unevictable_bytes gauge +node_memory_Unevictable_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 4096 +# HELP node_network_transmit_carrier_total Network device statistic transmit_carrier. +# TYPE node_network_transmit_carrier_total counter +node_network_transmit_carrier_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="ens5"} 0 +# HELP node_network_transmit_packets_total Network device statistic transmit_packets. +# TYPE node_network_transmit_packets_total counter +node_network_transmit_packets_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="ens5"} 11601 +# HELP node_memory_Dirty_bytes Memory information field Dirty_bytes. +# TYPE node_memory_Dirty_bytes gauge +node_memory_Dirty_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 57344 +# HELP node_memory_SwapTotal_bytes Memory information field SwapTotal_bytes. +# TYPE node_memory_SwapTotal_bytes gauge +node_memory_SwapTotal_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 1.073737728e+09 +# HELP node_memory_AnonPages_bytes Memory information field AnonPages_bytes. +# TYPE node_memory_AnonPages_bytes gauge +node_memory_AnonPages_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 1.29445888e+08 +# HELP node_memory_FileHugePages_bytes Memory information field FileHugePages_bytes. +# TYPE node_memory_FileHugePages_bytes gauge +node_memory_FileHugePages_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_memory_HugePages_Surp Memory information field HugePages_Surp. +# TYPE node_memory_HugePages_Surp gauge +node_memory_HugePages_Surp{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_scrape_collector_success node_exporter: Whether a collector succeeded. +# TYPE node_scrape_collector_success gauge +node_scrape_collector_success{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",collector="cpu"} 1 +node_scrape_collector_success{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",collector="diskstats"} 1 +node_scrape_collector_success{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",collector="filesystem"} 1 +node_scrape_collector_success{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",collector="loadavg"} 1 +node_scrape_collector_success{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",collector="meminfo"} 1 +node_scrape_collector_success{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",collector="netdev"} 1 +node_scrape_collector_success{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",collector="vmstat"} 1 +# HELP node_vmstat_pgpgin /proc/vmstat information field pgpgin. +# TYPE node_vmstat_pgpgin untyped +node_vmstat_pgpgin{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 2.418884e+06 +# HELP node_filesystem_device_error Whether an error occurred while getting statistics for the given device. +# TYPE node_filesystem_device_error gauge +node_filesystem_device_error{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="/dev/nvme0n1p2",fstype="ext4",mountpoint="/"} 0 +node_filesystem_device_error{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="/dev/nvme1n1",fstype="ext4",mountpoint="/data"} 0 +# HELP node_memory_Cached_bytes Memory information field Cached_bytes. +# TYPE node_memory_Cached_bytes gauge +node_memory_Cached_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 1.67378944e+08 +# HELP node_memory_Percpu_bytes Memory information field Percpu_bytes. +# TYPE node_memory_Percpu_bytes gauge +node_memory_Percpu_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 1.138688e+06 +# HELP node_memory_SwapCached_bytes Memory information field SwapCached_bytes. +# TYPE node_memory_SwapCached_bytes gauge +node_memory_SwapCached_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 1.5106048e+07 +# HELP node_network_receive_frame_total Network device statistic receive_frame. +# TYPE node_network_receive_frame_total counter +node_network_receive_frame_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="ens5"} 0 +# HELP node_scrape_collector_duration_seconds node_exporter: Duration of a collector scrape. +# TYPE node_scrape_collector_duration_seconds gauge +node_scrape_collector_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",collector="cpu"} 0.00023986 +node_scrape_collector_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",collector="diskstats"} 0.000140044 +node_scrape_collector_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",collector="filesystem"} 0.000852738 +node_scrape_collector_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",collector="loadavg"} 3.9507e-05 +node_scrape_collector_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",collector="meminfo"} 0.00016187 +node_scrape_collector_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",collector="netdev"} 9.5664e-05 +node_scrape_collector_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",collector="vmstat"} 0.000140988 +# HELP node_memory_ShmemHugePages_bytes Memory information field ShmemHugePages_bytes. +# TYPE node_memory_ShmemHugePages_bytes gauge +node_memory_ShmemHugePages_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_network_transmit_fifo_total Network device statistic transmit_fifo. +# TYPE node_network_transmit_fifo_total counter +node_network_transmit_fifo_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="ens5"} 0 +# HELP node_vmstat_pgpgout /proc/vmstat information field pgpgout. +# TYPE node_vmstat_pgpgout untyped +node_vmstat_pgpgout{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 1.055552e+06 +# HELP node_memory_NFS_Unstable_bytes Memory information field NFS_Unstable_bytes. +# TYPE node_memory_NFS_Unstable_bytes gauge +node_memory_NFS_Unstable_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_memory_MemFree_bytes Memory information field MemFree_bytes. +# TYPE node_memory_MemFree_bytes gauge +node_memory_MemFree_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 2.2237184e+07 +# HELP node_network_transmit_colls_total Network device statistic transmit_colls. +# TYPE node_network_transmit_colls_total counter +node_network_transmit_colls_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="ens5"} 0 +# HELP node_vmstat_pswpout /proc/vmstat information field pswpout. +# TYPE node_vmstat_pswpout untyped +node_vmstat_pswpout{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 88387 +# HELP node_disk_flush_requests_time_seconds_total This is the total number of seconds spent by all flush requests. +# TYPE node_disk_flush_requests_time_seconds_total counter +node_disk_flush_requests_time_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme0n1"} 0 +node_disk_flush_requests_time_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme1n1"} 0 +# HELP node_disk_written_bytes_total The total number of bytes written successfully. +# TYPE node_disk_written_bytes_total counter +node_disk_written_bytes_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme0n1"} 1.005322752e+09 +node_disk_written_bytes_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme1n1"} 7.5563008e+07 +# HELP node_network_receive_errs_total Network device statistic receive_errs. +# TYPE node_network_receive_errs_total counter +node_network_receive_errs_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="ens5"} 0 +# HELP node_disk_io_time_weighted_seconds_total The weighted # of seconds spent doing I/Os. +# TYPE node_disk_io_time_weighted_seconds_total counter +node_disk_io_time_weighted_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme0n1"} 172.749 +node_disk_io_time_weighted_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme1n1"} 7.019 +# HELP node_filesystem_avail_bytes Filesystem space available to non-root users in bytes. +# TYPE node_filesystem_avail_bytes gauge +node_filesystem_avail_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="/dev/nvme0n1p2",fstype="ext4",mountpoint="/"} 5.318856704e+09 +node_filesystem_avail_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="/dev/nvme1n1",fstype="ext4",mountpoint="/data"} 8.064999424e+09 +# HELP node_memory_Mapped_bytes Memory information field Mapped_bytes. +# TYPE node_memory_Mapped_bytes gauge +node_memory_Mapped_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 1.0776576e+08 +# HELP postgresql_restarts_total Number of times postgresql has been restarted +# TYPE postgresql_restarts_total counter +postgresql_restarts_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_disk_io_now The number of I/Os currently in progress. +# TYPE node_disk_io_now gauge +node_disk_io_now{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme0n1"} 0 +node_disk_io_now{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme1n1"} 0 +# HELP node_memory_SReclaimable_bytes Memory information field SReclaimable_bytes. +# TYPE node_memory_SReclaimable_bytes gauge +node_memory_SReclaimable_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 3.7298176e+07 +# HELP node_memory_KernelStack_bytes Memory information field KernelStack_bytes. +# TYPE node_memory_KernelStack_bytes gauge +node_memory_KernelStack_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 3.670016e+06 +# HELP node_memory_FilePmdMapped_bytes Memory information field FilePmdMapped_bytes. +# TYPE node_memory_FilePmdMapped_bytes gauge +node_memory_FilePmdMapped_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_network_receive_drop_total Network device statistic receive_drop. +# TYPE node_network_receive_drop_total counter +node_network_receive_drop_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="ens5"} 0 +# HELP node_memory_HugePages_Total Memory information field HugePages_Total. +# TYPE node_memory_HugePages_Total gauge +node_memory_HugePages_Total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_filesystem_size_bytes Filesystem size in bytes. +# TYPE node_filesystem_size_bytes gauge +node_filesystem_size_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="/dev/nvme0n1p2",fstype="ext4",mountpoint="/"} 1.0359754752e+10 +node_filesystem_size_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="/dev/nvme1n1",fstype="ext4",mountpoint="/data"} 8.350298112e+09 +# HELP node_load5 5m load average. +# TYPE node_load5 gauge +node_load5{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_memory_Inactive_bytes Memory information field Inactive_bytes. +# TYPE node_memory_Inactive_bytes gauge +node_memory_Inactive_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 1.72179456e+08 +# HELP node_memory_PageTables_bytes Memory information field PageTables_bytes. +# TYPE node_memory_PageTables_bytes gauge +node_memory_PageTables_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 6.483968e+06 +# HELP node_network_receive_compressed_total Network device statistic receive_compressed. +# TYPE node_network_receive_compressed_total counter +node_network_receive_compressed_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="ens5"} 0 +# HELP node_disk_flush_requests_total The total number of flush requests completed successfully +# TYPE node_disk_flush_requests_total counter +node_disk_flush_requests_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme0n1"} 0 +node_disk_flush_requests_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme1n1"} 0 +# HELP node_disk_discarded_sectors_total The total number of sectors discarded successfully. +# TYPE node_disk_discarded_sectors_total counter +node_disk_discarded_sectors_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme0n1"} 0 +node_disk_discarded_sectors_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme1n1"} 0 +# HELP node_memory_AnonHugePages_bytes Memory information field AnonHugePages_bytes. +# TYPE node_memory_AnonHugePages_bytes gauge +node_memory_AnonHugePages_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_memory_Bounce_bytes Memory information field Bounce_bytes. +# TYPE node_memory_Bounce_bytes gauge +node_memory_Bounce_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_memory_MemTotal_bytes Memory information field MemTotal_bytes. +# TYPE node_memory_MemTotal_bytes gauge +node_memory_MemTotal_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 4.43674624e+08 +# HELP node_network_receive_fifo_total Network device statistic receive_fifo. +# TYPE node_network_receive_fifo_total counter +node_network_receive_fifo_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="ens5"} 0 +# HELP node_cpu_seconds_total Seconds the CPUs spent in each mode. +# TYPE node_cpu_seconds_total counter +node_cpu_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="0",mode="idle"} 2263.65 +node_cpu_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="0",mode="iowait"} 33.6 +node_cpu_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="0",mode="irq"} 0 +node_cpu_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="0",mode="nice"} 0.04 +node_cpu_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="0",mode="softirq"} 0.37 +node_cpu_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="0",mode="steal"} 1.45 +node_cpu_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="0",mode="system"} 13.61 +node_cpu_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="0",mode="user"} 27.02 +node_cpu_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="1",mode="idle"} 2265.34 +node_cpu_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="1",mode="iowait"} 32.49 +node_cpu_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="1",mode="irq"} 0 +node_cpu_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="1",mode="nice"} 0.06 +node_cpu_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="1",mode="softirq"} 0.3 +node_cpu_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="1",mode="steal"} 1.61 +node_cpu_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="1",mode="system"} 12.2 +node_cpu_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="1",mode="user"} 27.08 +# HELP node_memory_VmallocUsed_bytes Memory information field VmallocUsed_bytes. +# TYPE node_memory_VmallocUsed_bytes gauge +node_memory_VmallocUsed_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 1.0760192e+07 +# HELP node_memory_Writeback_bytes Memory information field Writeback_bytes. +# TYPE node_memory_Writeback_bytes gauge +node_memory_Writeback_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_memory_Inactive_anon_bytes Memory information field Inactive_anon_bytes. +# TYPE node_memory_Inactive_anon_bytes gauge +node_memory_Inactive_anon_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 9.3097984e+07 +# HELP node_memory_Active_bytes Memory information field Active_bytes. +# TYPE node_memory_Active_bytes gauge +node_memory_Active_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 1.42127104e+08 +# HELP node_memory_CommitLimit_bytes Memory information field CommitLimit_bytes. +# TYPE node_memory_CommitLimit_bytes gauge +node_memory_CommitLimit_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 1.295572992e+09 +# HELP node_network_receive_packets_total Network device statistic receive_packets. +# TYPE node_network_receive_packets_total counter +node_network_receive_packets_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="ens5"} 33418 +# HELP node_network_transmit_compressed_total Network device statistic transmit_compressed. +# TYPE node_network_transmit_compressed_total counter +node_network_transmit_compressed_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="ens5"} 0 +# HELP node_disk_writes_merged_total The number of writes merged. +# TYPE node_disk_writes_merged_total counter +node_disk_writes_merged_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme0n1"} 88093 +node_disk_writes_merged_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme1n1"} 3408 +# HELP node_memory_Active_anon_bytes Memory information field Active_anon_bytes. +# TYPE node_memory_Active_anon_bytes gauge +node_memory_Active_anon_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 5.8032128e+07 +# HELP node_disk_reads_completed_total The total number of reads completed successfully. +# TYPE node_disk_reads_completed_total counter +node_disk_reads_completed_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme0n1"} 88478 +node_disk_reads_completed_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme1n1"} 2670 +# HELP node_disk_discard_time_seconds_total This is the total number of seconds spent by all discards. +# TYPE node_disk_discard_time_seconds_total counter +node_disk_discard_time_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme0n1"} 0 +node_disk_discard_time_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme1n1"} 0 +# HELP node_disk_reads_merged_total The total number of reads merged. +# TYPE node_disk_reads_merged_total counter +node_disk_reads_merged_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme0n1"} 30022 +node_disk_reads_merged_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme1n1"} 342 +# HELP node_filesystem_files_free Filesystem total free file nodes. +# TYPE node_filesystem_files_free gauge +node_filesystem_files_free{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="/dev/nvme0n1p2",fstype="ext4",mountpoint="/"} 466013 +node_filesystem_files_free{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="/dev/nvme1n1",fstype="ext4",mountpoint="/data"} 523105 +# HELP node_memory_HugePages_Free Memory information field HugePages_Free. +# TYPE node_memory_HugePages_Free gauge +node_memory_HugePages_Free{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP db_transmit_bytes postgres and pgbouncer network transmit bytes +# TYPE db_transmit_bytes counter +db_transmit_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 89799 +# HELP node_memory_VmallocTotal_bytes Memory information field VmallocTotal_bytes. +# TYPE node_memory_VmallocTotal_bytes gauge +node_memory_VmallocTotal_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 1.3633903919104e+14 +# HELP node_network_transmit_bytes_total Network device statistic transmit_bytes. +# TYPE node_network_transmit_bytes_total counter +node_network_transmit_bytes_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="ens5"} 2.726347e+06 +# HELP node_load1 1m load average. +# TYPE node_load1 gauge +node_load1{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_network_receive_multicast_total Network device statistic receive_multicast. +# TYPE node_network_receive_multicast_total counter +node_network_receive_multicast_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="ens5"} 0 +# HELP node_disk_discards_merged_total The total number of discards merged. +# TYPE node_disk_discards_merged_total counter +node_disk_discards_merged_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme0n1"} 0 +node_disk_discards_merged_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme1n1"} 0 +# HELP node_memory_Committed_AS_bytes Memory information field Committed_AS_bytes. +# TYPE node_memory_Committed_AS_bytes gauge +node_memory_Committed_AS_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 1.148878848e+09 +# HELP node_memory_VmallocChunk_bytes Memory information field VmallocChunk_bytes. +# TYPE node_memory_VmallocChunk_bytes gauge +node_memory_VmallocChunk_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_filesystem_free_bytes Filesystem free space in bytes. +# TYPE node_filesystem_free_bytes gauge +node_filesystem_free_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="/dev/nvme0n1p2",fstype="ext4",mountpoint="/"} 5.34626304e+09 +node_filesystem_free_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="/dev/nvme1n1",fstype="ext4",mountpoint="/data"} 8.18663424e+09 +# HELP node_disk_writes_completed_total The total number of writes completed successfully. +# TYPE node_disk_writes_completed_total counter +node_disk_writes_completed_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme0n1"} 23492 +node_disk_writes_completed_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme1n1"} 3503 +# HELP node_memory_ShmemPmdMapped_bytes Memory information field ShmemPmdMapped_bytes. +# TYPE node_memory_ShmemPmdMapped_bytes gauge +node_memory_ShmemPmdMapped_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_network_transmit_drop_total Network device statistic transmit_drop. +# TYPE node_network_transmit_drop_total counter +node_network_transmit_drop_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="ens5"} 0 +# HELP node_network_transmit_errs_total Network device statistic transmit_errs. +# TYPE node_network_transmit_errs_total counter +node_network_transmit_errs_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="ens5"} 0 +# HELP node_disk_io_time_seconds_total Total seconds spent doing I/Os. +# TYPE node_disk_io_time_seconds_total counter +node_disk_io_time_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme0n1"} 81.48 +node_disk_io_time_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme1n1"} 6.908 +# HELP node_memory_SUnreclaim_bytes Memory information field SUnreclaim_bytes. +# TYPE node_memory_SUnreclaim_bytes gauge +node_memory_SUnreclaim_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 3.5139584e+07 +# HELP node_vmstat_pswpin /proc/vmstat information field pswpin. +# TYPE node_vmstat_pswpin untyped +node_vmstat_pswpin{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 43053 +# HELP node_disk_read_time_seconds_total The total number of seconds spent by all reads. +# TYPE node_disk_read_time_seconds_total counter +node_disk_read_time_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme0n1"} 120.031 +node_disk_read_time_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme1n1"} 3.342 +# HELP node_vmstat_pgmajfault /proc/vmstat information field pgmajfault. +# TYPE node_vmstat_pgmajfault untyped +node_vmstat_pgmajfault{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 40869 +# HELP node_disk_write_time_seconds_total This is the total number of seconds spent by all writes. +# TYPE node_disk_write_time_seconds_total counter +node_disk_write_time_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme0n1"} 52.717 +node_disk_write_time_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme1n1"} 3.676 +# HELP node_memory_Hugetlb_bytes Memory information field Hugetlb_bytes. +# TYPE node_memory_Hugetlb_bytes gauge +node_memory_Hugetlb_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_memory_MemAvailable_bytes Memory information field MemAvailable_bytes. +# TYPE node_memory_MemAvailable_bytes gauge +node_memory_MemAvailable_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 2.1239808e+08 +# HELP node_memory_WritebackTmp_bytes Memory information field WritebackTmp_bytes. +# TYPE node_memory_WritebackTmp_bytes gauge +node_memory_WritebackTmp_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_memory_Buffers_bytes Memory information field Buffers_bytes. +# TYPE node_memory_Buffers_bytes gauge +node_memory_Buffers_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 8.97024e+06 +# HELP node_disk_read_bytes_total The total number of bytes read successfully. +# TYPE node_disk_read_bytes_total counter +node_disk_read_bytes_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme0n1"} 2.429791232e+09 +node_disk_read_bytes_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme1n1"} 4.7137792e+07 +# HELP node_memory_HardwareCorrupted_bytes Memory information field HardwareCorrupted_bytes. +# TYPE node_memory_HardwareCorrupted_bytes gauge +node_memory_HardwareCorrupted_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_memory_Hugepagesize_bytes Memory information field Hugepagesize_bytes. +# TYPE node_memory_Hugepagesize_bytes gauge +node_memory_Hugepagesize_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 2.097152e+06 +# HELP node_memory_KReclaimable_bytes Memory information field KReclaimable_bytes. +# TYPE node_memory_KReclaimable_bytes gauge +node_memory_KReclaimable_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 3.7298176e+07 +# HELP node_memory_Shmem_bytes Memory information field Shmem_bytes. +# TYPE node_memory_Shmem_bytes gauge +node_memory_Shmem_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 1.3119488e+07 +# HELP node_network_receive_bytes_total Network device statistic receive_bytes. +# TYPE node_network_receive_bytes_total counter +node_network_receive_bytes_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="ens5"} 4.0247816e+07 +# HELP node_disk_discards_completed_total The total number of discards completed successfully. +# TYPE node_disk_discards_completed_total counter +node_disk_discards_completed_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme0n1"} 0 +node_disk_discards_completed_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="nvme1n1"} 0 +# HELP node_memory_HugePages_Rsvd Memory information field HugePages_Rsvd. +# TYPE node_memory_HugePages_Rsvd gauge +node_memory_HugePages_Rsvd{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 0 +# HELP node_memory_Mlocked_bytes Memory information field Mlocked_bytes. +# TYPE node_memory_Mlocked_bytes gauge +node_memory_Mlocked_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 4096 +# HELP node_filesystem_files Filesystem total file nodes. +# TYPE node_filesystem_files gauge +node_filesystem_files{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="/dev/nvme0n1p2",fstype="ext4",mountpoint="/"} 648960 +node_filesystem_files{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="/dev/nvme1n1",fstype="ext4",mountpoint="/data"} 524288 +# HELP node_filesystem_readonly Filesystem read-only status. +# TYPE node_filesystem_readonly gauge +node_filesystem_readonly{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="/dev/nvme0n1p2",fstype="ext4",mountpoint="/"} 0 +node_filesystem_readonly{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",device="/dev/nvme1n1",fstype="ext4",mountpoint="/data"} 0 +# HELP node_memory_Active_file_bytes Memory information field Active_file_bytes. +# TYPE node_memory_Active_file_bytes gauge +node_memory_Active_file_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 8.4094976e+07 +# HELP node_memory_Slab_bytes Memory information field Slab_bytes. +# TYPE node_memory_Slab_bytes gauge +node_memory_Slab_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 7.243776e+07 +# HELP node_vmstat_pgfault /proc/vmstat information field pgfault. +# TYPE node_vmstat_pgfault untyped +node_vmstat_pgfault{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db"} 2.18115e+06 +# HELP node_cpu_guest_seconds_total Seconds the CPUs spent in guests (VMs) for each mode. +# TYPE node_cpu_guest_seconds_total counter +node_cpu_guest_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="0",mode="nice"} 0 +node_cpu_guest_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="0",mode="user"} 0 +node_cpu_guest_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="1",mode="nice"} 0 +node_cpu_guest_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="db",cpu="1",mode="user"} 0 +# HELP pg_stat_database_xact_commit_total Transactions committed +# TYPE pg_stat_database_xact_commit_total counter +pg_stat_database_xact_commit_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 2472 +# HELP physical_replication_lag_is_wal_replay_paused Check if WAL replay has been paused +# TYPE physical_replication_lag_is_wal_replay_paused gauge +physical_replication_lag_is_wal_replay_paused{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP postgres_exporter_config_last_reload_success_timestamp_seconds Timestamp of the last successful configuration reload. +# TYPE postgres_exporter_config_last_reload_success_timestamp_seconds gauge +postgres_exporter_config_last_reload_success_timestamp_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 0 +# HELP process_virtual_memory_bytes Virtual memory size in bytes. +# TYPE process_virtual_memory_bytes gauge +process_virtual_memory_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 1.266716672e+09 +# HELP realtime_postgres_changes_total_subscriptions Total subscription records listening for Postgres changes +# TYPE realtime_postgres_changes_total_subscriptions gauge +realtime_postgres_changes_total_subscriptions{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP pg_stat_bgwriter_checkpoint_sync_time_total Time spent synchronizing checkpoint files to disk +# TYPE pg_stat_bgwriter_checkpoint_sync_time_total counter +pg_stat_bgwriter_checkpoint_sync_time_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 67 +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. +# TYPE go_memstats_mcache_inuse_bytes gauge +go_memstats_mcache_inuse_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 2400 +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. +# TYPE go_memstats_mcache_sys_bytes gauge +go_memstats_mcache_sys_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 15600 +# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE process_start_time_seconds gauge +process_start_time_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 1.73323902162e+09 +# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE go_memstats_last_gc_time_seconds gauge +go_memstats_last_gc_time_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 1.7332411937058074e+09 +# HELP pg_stat_bgwriter_buffers_alloc_total Buffers allocated +# TYPE pg_stat_bgwriter_buffers_alloc_total counter +pg_stat_bgwriter_buffers_alloc_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 12444 +# HELP pg_stat_bgwriter_buffers_backend_fsync_total fsync calls executed by a backend directly +# TYPE pg_stat_bgwriter_buffers_backend_fsync_total counter +pg_stat_bgwriter_buffers_backend_fsync_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP pg_stat_statements_total_time_seconds Total time spent, in seconds +# TYPE pg_stat_statements_total_time_seconds counter +pg_stat_statements_total_time_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 3.2134043250000004 +# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. +# TYPE process_cpu_seconds_total counter +process_cpu_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 0.27 +# HELP auth_users_user_count Number of users in the project db +# TYPE auth_users_user_count gauge +auth_users_user_count{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP pg_stat_database_xact_rollback_total Transactions rolled back +# TYPE pg_stat_database_xact_rollback_total counter +pg_stat_database_xact_rollback_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 5 +# HELP go_info Information about the Go environment. +# TYPE go_info gauge +go_info{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",version="go1.21.3"} 1 +# HELP pg_exporter_user_queries_load_error Whether the user queries file was loaded and parsed successfully (1 for error, 0 for success). +# TYPE pg_exporter_user_queries_load_error gauge +pg_exporter_user_queries_load_error{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",filename="/opt/postgres_exporter/queries.yml",hashsum="0af5dd9317f7a5209580dc72ebbb795a1468c3e4cf68eabd2e423388bcf2546f"} 0 +# HELP pg_stat_bgwriter_checkpoints_timed_total Scheduled checkpoints performed +# TYPE pg_stat_bgwriter_checkpoints_timed_total counter +pg_stat_bgwriter_checkpoints_timed_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 7 +# HELP postgres_exporter_build_info A metric with a constant '1' value labeled by version, revision, branch, goversion from which postgres_exporter was built, and the goos and goarch for the build. +# TYPE postgres_exporter_build_info gauge +postgres_exporter_build_info{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",branch="HEAD",goarch="arm64",goos="linux",goversion="go1.21.3",revision="68c176b8833b7580bf847cecf60f8e0ad5923f9a",tags="unknown",version="0.15.0"} 1 +# HELP go_memstats_frees_total Total number of frees. +# TYPE go_memstats_frees_total counter +go_memstats_frees_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 86197 +# HELP pg_exporter_last_scrape_error Whether the last scrape of metrics from PostgreSQL resulted in an error (1 for error, 0 for success). +# TYPE pg_exporter_last_scrape_error gauge +pg_exporter_last_scrape_error{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 0 +# HELP pg_exporter_scrapes_total Total number of times PostgreSQL was scraped for metrics. +# TYPE pg_exporter_scrapes_total counter +pg_exporter_scrapes_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 38 +# HELP pg_stat_bgwriter_stats_reset Most recent stat reset time +# TYPE pg_stat_bgwriter_stats_reset counter +pg_stat_bgwriter_stats_reset{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 1.732551629e+09 +# HELP pg_status_in_recovery Database in recovery +# TYPE pg_status_in_recovery gauge +pg_status_in_recovery{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP postgres_exporter_config_last_reload_successful Postgres exporter config loaded successfully. +# TYPE postgres_exporter_config_last_reload_successful gauge +postgres_exporter_config_last_reload_successful{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 0 +# HELP go_memstats_mallocs_total Total number of mallocs. +# TYPE go_memstats_mallocs_total counter +go_memstats_mallocs_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 91002 +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. +# TYPE go_memstats_other_sys_bytes gauge +go_memstats_other_sys_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 527816 +# HELP go_threads Number of OS threads created. +# TYPE go_threads gauge +go_threads{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 5 +# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code. +# TYPE promhttp_metric_handler_requests_total counter +promhttp_metric_handler_requests_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",code="200"} 37 +promhttp_metric_handler_requests_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",code="500"} 0 +promhttp_metric_handler_requests_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",code="503"} 0 +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. +# TYPE go_memstats_mspan_inuse_bytes gauge +go_memstats_mspan_inuse_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 65184 +# HELP go_memstats_sys_bytes Number of bytes obtained from system. +# TYPE go_memstats_sys_bytes gauge +go_memstats_sys_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 1.2819472e+07 +# HELP pg_stat_database_blks_read_total Number of disk blocks read +# TYPE pg_stat_database_blks_read_total counter +pg_stat_database_blks_read_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 2465 +# HELP pg_stat_database_conflicts_confl_lock_total Queries cancelled due to lock timeouts +# TYPE pg_stat_database_conflicts_confl_lock_total counter +pg_stat_database_conflicts_confl_lock_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. +# TYPE go_memstats_buck_hash_sys_bytes gauge +go_memstats_buck_hash_sys_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 5048 +# HELP pg_scrape_collector_duration_seconds postgres_exporter: Duration of a collector scrape. +# TYPE pg_scrape_collector_duration_seconds gauge +pg_scrape_collector_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",collector="database"} 0.003614192 +# HELP pg_stat_bgwriter_checkpoints_req_total Requested checkpoints performed +# TYPE pg_stat_bgwriter_checkpoints_req_total counter +pg_stat_bgwriter_checkpoints_req_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 6 +# HELP pg_stat_database_most_recent_reset The most recent time one of the databases had its statistics reset +# TYPE pg_stat_database_most_recent_reset counter +pg_stat_database_most_recent_reset{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} NaN +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. +# TYPE go_memstats_mspan_sys_bytes gauge +go_memstats_mspan_sys_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 81480 +# HELP storage_storage_size_mb The total size used for all storage buckets, in mb +# TYPE storage_storage_size_mb gauge +storage_storage_size_mb{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} NaN +# HELP process_open_fds Number of open file descriptors. +# TYPE process_open_fds gauge +process_open_fds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 12 +# HELP physical_replication_lag_is_connected_to_primary Monitor connection to the primary database +# TYPE physical_replication_lag_is_connected_to_primary gauge +physical_replication_lag_is_connected_to_primary{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. +# TYPE go_memstats_alloc_bytes gauge +go_memstats_alloc_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 2.412352e+06 +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. +# TYPE go_memstats_gc_sys_bytes gauge +go_memstats_gc_sys_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 3.80092e+06 +# HELP go_memstats_heap_objects Number of allocated objects. +# TYPE go_memstats_heap_objects gauge +go_memstats_heap_objects{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 4805 +# HELP pg_ls_archive_statusdir_wal_pending_count Number of not yet archived WAL files +# TYPE pg_ls_archive_statusdir_wal_pending_count counter +pg_ls_archive_statusdir_wal_pending_count{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP pg_stat_database_temp_files_total Temp files created by queries +# TYPE pg_stat_database_temp_files_total counter +pg_stat_database_temp_files_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",quantile="0"} 1.9857e-05 +go_gc_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",quantile="0.25"} 5.2274e-05 +go_gc_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",quantile="0.5"} 9.5878e-05 +go_gc_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",quantile="0.75"} 0.000117195 +go_gc_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",quantile="1"} 0.041046092 +go_gc_duration_seconds_sum{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 0.055028914 +go_gc_duration_seconds_count{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 16 +# HELP pg_stat_database_conflicts_confl_tablespace_total Queries cancelled due to dropped tablespaces +# TYPE pg_stat_database_conflicts_confl_tablespace_total counter +pg_stat_database_conflicts_confl_tablespace_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP pg_stat_bgwriter_maxwritten_clean_total Number of times bg writer stopped a cleaning scan because it had written too many buffers +# TYPE pg_stat_bgwriter_maxwritten_clean_total counter +pg_stat_bgwriter_maxwritten_clean_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP pg_stat_bgwriter_checkpoint_write_time_total Time spent writing checkpoint files to disk +# TYPE pg_stat_bgwriter_checkpoint_write_time_total counter +pg_stat_bgwriter_checkpoint_write_time_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 17715 +# HELP pg_stat_database_tup_deleted_total Rows deleted +# TYPE pg_stat_database_tup_deleted_total counter +pg_stat_database_tup_deleted_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 1436 +# HELP physical_replication_lag_physical_replication_lag_seconds Physical replication lag in seconds +# TYPE physical_replication_lag_physical_replication_lag_seconds gauge +physical_replication_lag_physical_replication_lag_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP pg_stat_bgwriter_buffers_clean_total Buffers written by bg writter +# TYPE pg_stat_bgwriter_buffers_clean_total counter +pg_stat_bgwriter_buffers_clean_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP pg_database_size_bytes Disk space used by the database +# TYPE pg_database_size_bytes gauge +pg_database_size_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",datname="postgres"} 1.0212143e+07 +pg_database_size_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",datname="template0"} 7.631663e+06 +pg_database_size_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",datname="template1"} 7.631663e+06 +# HELP pg_stat_database_conflicts_confl_deadlock_total Queries cancelled due to deadlocks +# TYPE pg_stat_database_conflicts_confl_deadlock_total counter +pg_stat_database_conflicts_confl_deadlock_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 10 +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. +# TYPE go_memstats_heap_inuse_bytes gauge +go_memstats_heap_inuse_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 3.710976e+06 +# HELP pg_scrape_collector_success postgres_exporter: Whether a collector succeeded. +# TYPE pg_scrape_collector_success gauge +pg_scrape_collector_success{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",collector="database"} 1 +# HELP pg_stat_database_conflicts_confl_bufferpin_total Queries cancelled due to pinned buffers +# TYPE pg_stat_database_conflicts_confl_bufferpin_total counter +pg_stat_database_conflicts_confl_bufferpin_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP pg_stat_database_tup_fetched_total Rows fetched by queries +# TYPE pg_stat_database_tup_fetched_total counter +pg_stat_database_tup_fetched_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 253932 +# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes. +# TYPE process_virtual_memory_max_bytes gauge +process_virtual_memory_max_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 1.8446744073709552e+19 +# HELP pg_stat_database_temp_bytes_total Temp data written by queries +# TYPE pg_stat_database_temp_bytes_total counter +pg_stat_database_temp_bytes_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP pg_stat_bgwriter_buffers_checkpoint_total Buffers written during checkpoints +# TYPE pg_stat_bgwriter_buffers_checkpoint_total counter +pg_stat_bgwriter_buffers_checkpoint_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 4888 +# HELP process_max_fds Maximum number of open file descriptors. +# TYPE process_max_fds gauge +process_max_fds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 524288 +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. +# TYPE go_memstats_heap_idle_bytes gauge +go_memstats_heap_idle_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 4.21888e+06 +# HELP pg_stat_database_deadlocks_total Deadlocks detected +# TYPE pg_stat_database_deadlocks_total counter +pg_stat_database_deadlocks_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP pg_stat_replication_replay_lag Max replay lag +# TYPE pg_stat_replication_replay_lag gauge +pg_stat_replication_replay_lag{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} NaN +# HELP pg_stat_statements_total_queries Number of times executed +# TYPE pg_stat_statements_total_queries counter +pg_stat_statements_total_queries{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 482 +# HELP realtime_postgres_changes_client_subscriptions Client subscriptions listening for Postgres changes +# TYPE realtime_postgres_changes_client_subscriptions gauge +realtime_postgres_changes_client_subscriptions{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP supabase_usage_metrics_user_queries_total The total number of user queries executed +# TYPE supabase_usage_metrics_user_queries_total counter +supabase_usage_metrics_user_queries_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 358 +# HELP pg_stat_database_tup_returned_total Rows returned by queries +# TYPE pg_stat_database_tup_returned_total counter +pg_stat_database_tup_returned_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 715303 +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. +# TYPE go_memstats_next_gc_bytes gauge +go_memstats_next_gc_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 4.194304e+06 +# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. +# TYPE go_memstats_stack_inuse_bytes gauge +go_memstats_stack_inuse_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 458752 +# HELP pg_database_size_mb Disk space used by the database +# TYPE pg_database_size_mb gauge +pg_database_size_mb{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 24.295300483703613 +# HELP pg_stat_database_num_backends The number of active backends +# TYPE pg_stat_database_num_backends gauge +pg_stat_database_num_backends{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 6 +# HELP process_resident_memory_bytes Resident memory size in bytes. +# TYPE process_resident_memory_bytes gauge +process_resident_memory_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 9.146368e+06 +# HELP pg_stat_replication_send_lag Max send lag +# TYPE pg_stat_replication_send_lag gauge +pg_stat_replication_send_lag{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} NaN +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. +# TYPE go_memstats_stack_sys_bytes gauge +go_memstats_stack_sys_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 458752 +# HELP pg_exporter_last_scrape_duration_seconds Duration of the last scrape of metrics from PostgreSQL. +# TYPE pg_exporter_last_scrape_duration_seconds gauge +pg_exporter_last_scrape_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 0.004154578 +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. +# TYPE go_memstats_heap_sys_bytes gauge +go_memstats_heap_sys_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 7.929856e+06 +# HELP pg_stat_database_conflicts_confl_snapshot_total Queries cancelled due to old snapshots +# TYPE pg_stat_database_conflicts_confl_snapshot_total counter +pg_stat_database_conflicts_confl_snapshot_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP pg_stat_database_tup_inserted_total Rows inserted +# TYPE pg_stat_database_tup_inserted_total counter +pg_stat_database_tup_inserted_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 22231 +# HELP pg_stat_database_tup_updated_total Rows updated +# TYPE pg_stat_database_tup_updated_total counter +pg_stat_database_tup_updated_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 1959 +# HELP pg_up Whether the last scrape of metrics from PostgreSQL was able to connect to the server (1 for yes, 0 for no). +# TYPE pg_up gauge +pg_up{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 1 +# HELP pg_wal_size_mb Disk space used by WAL files +# TYPE pg_wal_size_mb gauge +pg_wal_size_mb{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 80 +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. +# TYPE go_memstats_heap_alloc_bytes gauge +go_memstats_heap_alloc_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 2.412352e+06 +# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. +# TYPE go_memstats_heap_released_bytes gauge +go_memstats_heap_released_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 3.940352e+06 +# HELP pg_settings_default_transaction_read_only Default transaction mode set to read only +# TYPE pg_settings_default_transaction_read_only gauge +pg_settings_default_transaction_read_only{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP pg_stat_database_conflicts_total Queries canceled due to conflicts with recovery +# TYPE pg_stat_database_conflicts_total counter +pg_stat_database_conflicts_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served. +# TYPE promhttp_metric_handler_requests_in_flight gauge +promhttp_metric_handler_requests_in_flight{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 1 +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. +# TYPE go_memstats_alloc_bytes_total counter +go_memstats_alloc_bytes_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 1.6002264e+07 +# HELP pg_stat_bgwriter_buffers_backend_total Buffers written directly by a backend +# TYPE pg_stat_bgwriter_buffers_backend_total counter +pg_stat_bgwriter_buffers_backend_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 9360 +# HELP replication_slots_max_lag_bytes Max Replication Lag +# TYPE replication_slots_max_lag_bytes gauge +replication_slots_max_lag_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 1000 +# HELP pg_stat_activity_xact_runtime Transaction Runtime +# TYPE pg_stat_activity_xact_runtime gauge +pg_stat_activity_xact_runtime{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 0 +# HELP pg_stat_database_blks_hit_total Disk blocks found in buffer cache +# TYPE pg_stat_database_blks_hit_total counter +pg_stat_database_blks_hit_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql",server="localhost:5432"} 490933 +# HELP go_memstats_lookups_total Total number of pointer lookups. +# TYPE go_memstats_lookups_total counter +go_memstats_lookups_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgresql"} 0 +# HELP pgrst_schema_cache_query_time_seconds The query time in seconds of the last schema cache load +# TYPE pgrst_schema_cache_query_time_seconds gauge +pgrst_schema_cache_query_time_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgrest"} 0.000981169 +# HELP pgrst_schema_cache_loads_total The total number of times the schema cache was loaded +# TYPE pgrst_schema_cache_loads_total counter +pgrst_schema_cache_loads_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgrest",status="FAIL"} 1 +pgrst_schema_cache_loads_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgrest",status="SUCCESS"} 6 +# HELP pgrst_db_pool_max Max pool connections +# TYPE pgrst_db_pool_max gauge +pgrst_db_pool_max{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgrest"} 10 +# HELP pgrst_db_pool_waiting Requests waiting to acquire a pool connection +# TYPE pgrst_db_pool_waiting gauge +pgrst_db_pool_waiting{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgrest"} 0 +# HELP pgrst_db_pool_available Available connections in the pool +# TYPE pgrst_db_pool_available gauge +pgrst_db_pool_available{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgrest"} -3 +# HELP pgrst_db_pool_timeouts_total The total number of pool connection timeouts +# TYPE pgrst_db_pool_timeouts_total counter +pgrst_db_pool_timeouts_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="postgrest"} 0 +# HELP go_memstats_lookups_total Total number of pointer lookups. +# TYPE go_memstats_lookups_total counter +go_memstats_lookups_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 0 +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. +# TYPE go_memstats_mspan_sys_bytes gauge +go_memstats_mspan_sys_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 81600 +# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. +# TYPE go_memstats_stack_inuse_bytes gauge +go_memstats_stack_inuse_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 622592 +# HELP process_open_fds Number of open file descriptors. +# TYPE process_open_fds gauge +process_open_fds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 10 +# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served. +# TYPE promhttp_metric_handler_requests_in_flight gauge +promhttp_metric_handler_requests_in_flight{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 1 +# HELP db_sql_connection_max_open Maximum number of open connections to the database +# TYPE db_sql_connection_max_open gauge +db_sql_connection_max_open{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="github.com/XSAM/otelsql",otel_scope_version="0.26.0"} 10 +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 41 +# HELP process_runtime_go_mem_live_objects Number of live objects is the number of cumulative Mallocs - Frees +# TYPE process_runtime_go_mem_live_objects gauge +process_runtime_go_mem_live_objects{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0"} 10167 +# HELP db_sql_connection_closed_max_lifetime_total The total number of connections closed due to SetConnMaxLifetime +# TYPE db_sql_connection_closed_max_lifetime_total counter +db_sql_connection_closed_max_lifetime_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="github.com/XSAM/otelsql",otel_scope_version="0.26.0"} 0 +# HELP process_runtime_go_mem_heap_sys_bytes Bytes of heap memory obtained from the OS +# TYPE process_runtime_go_mem_heap_sys_bytes gauge +process_runtime_go_mem_heap_sys_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0"} 7.798784e+06 +# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes. +# TYPE process_virtual_memory_max_bytes gauge +process_virtual_memory_max_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 1.8446744073709552e+19 +# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. +# TYPE process_cpu_seconds_total counter +process_cpu_seconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 0.13 +# HELP process_runtime_go_goroutines Number of goroutines that currently exist +# TYPE process_runtime_go_goroutines gauge +process_runtime_go_goroutines{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0"} 40 +# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code. +# TYPE promhttp_metric_handler_requests_total counter +promhttp_metric_handler_requests_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",code="200"} 0 +promhttp_metric_handler_requests_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",code="500"} 0 +promhttp_metric_handler_requests_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",code="503"} 0 +# HELP http_status_codes_total Number of returned HTTP status codes +# TYPE http_status_codes_total counter +http_status_codes_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",code="200",http_route="/health",otel_scope_name="gotrue",otel_scope_version=""} 1 +# HELP process_runtime_go_mem_heap_inuse_bytes Bytes in in-use spans +# TYPE process_runtime_go_mem_heap_inuse_bytes gauge +process_runtime_go_mem_heap_inuse_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0"} 3.4816e+06 +# HELP go_memstats_heap_objects Number of allocated objects. +# TYPE go_memstats_heap_objects gauge +go_memstats_heap_objects{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 9763 +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. +# TYPE go_memstats_mspan_inuse_bytes gauge +go_memstats_mspan_inuse_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 70880 +# HELP http_server_response_size_bytes_total Measures the size of HTTP response messages. +# TYPE http_server_response_size_bytes_total counter +http_server_response_size_bytes_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0"} 107 +# HELP process_runtime_go_mem_lookups_total Number of pointer lookups performed by the runtime +# TYPE process_runtime_go_mem_lookups_total counter +process_runtime_go_mem_lookups_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0"} 0 +# HELP db_sql_connection_closed_max_idle_time_total The total number of connections closed due to SetConnMaxIdleTime +# TYPE db_sql_connection_closed_max_idle_time_total counter +db_sql_connection_closed_max_idle_time_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="github.com/XSAM/otelsql",otel_scope_version="0.26.0"} 0 +# HELP db_sql_connection_open The number of established connections both in use and idle +# TYPE db_sql_connection_open gauge +db_sql_connection_open{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="github.com/XSAM/otelsql",otel_scope_version="0.26.0",status="idle"} 0 +db_sql_connection_open{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="github.com/XSAM/otelsql",otel_scope_version="0.26.0",status="inuse"} 0 +# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",quantile="0"} 4.5579e-05 +go_gc_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",quantile="0.25"} 5.4385e-05 +go_gc_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",quantile="0.5"} 0.000106201 +go_gc_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",quantile="0.75"} 0.001139404 +go_gc_duration_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",quantile="1"} 0.008502312 +go_gc_duration_seconds_sum{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 0.019999787 +go_gc_duration_seconds_count{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 20 +# HELP target_info Target metadata +# TYPE target_info gauge +target_info{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",service_name="unknown_service:auth",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="1.26.0"} 1 +# HELP http_server_request_size_bytes_total Measures the size of HTTP request messages. +# TYPE http_server_request_size_bytes_total counter +http_server_request_size_bytes_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0"} 0 +# HELP process_runtime_go_gc_pause_total_ns_total Cumulative nanoseconds in GC stop-the-world pauses since the program started +# TYPE process_runtime_go_gc_pause_total_ns_total counter +process_runtime_go_gc_pause_total_ns_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0"} 1.9999787e+07 +# HELP go_info Information about the Go environment. +# TYPE go_info gauge +go_info{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",version="go1.23.3"} 1 +# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE go_memstats_last_gc_time_seconds gauge +go_memstats_last_gc_time_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 1.7332412644466076e+09 +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. +# TYPE go_memstats_heap_sys_bytes gauge +go_memstats_heap_sys_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 7.766016e+06 +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. +# TYPE go_memstats_stack_sys_bytes gauge +go_memstats_stack_sys_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 622592 +# HELP process_runtime_go_mem_heap_released_bytes Bytes of idle spans whose physical memory has been returned to the OS +# TYPE process_runtime_go_mem_heap_released_bytes gauge +process_runtime_go_mem_heap_released_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0"} 4.13696e+06 +# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. +# TYPE go_memstats_alloc_bytes gauge +go_memstats_alloc_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 1.839616e+06 +# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. +# TYPE go_memstats_heap_released_bytes gauge +go_memstats_heap_released_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 4.13696e+06 +# HELP process_virtual_memory_bytes Virtual memory size in bytes. +# TYPE process_virtual_memory_bytes gauge +process_virtual_memory_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 1.28167936e+09 +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. +# TYPE go_memstats_next_gc_bytes gauge +go_memstats_next_gc_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 4.194304e+06 +# HELP process_runtime_go_cgo_calls Number of cgo calls made by the current process +# TYPE process_runtime_go_cgo_calls gauge +process_runtime_go_cgo_calls{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0"} 0 +# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE process_start_time_seconds gauge +process_start_time_seconds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 1.73323901352e+09 +# HELP runtime_uptime_milliseconds_total Milliseconds since application was initialized +# TYPE runtime_uptime_milliseconds_total counter +runtime_uptime_milliseconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0"} 2.250715e+06 +# HELP go_threads Number of OS threads created. +# TYPE go_threads gauge +go_threads{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 8 +# HELP http_server_duration_milliseconds Measures the duration of inbound HTTP requests. +# TYPE http_server_duration_milliseconds histogram +http_server_duration_milliseconds_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0",le="0"} 0 +http_server_duration_milliseconds_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0",le="5"} 0 +http_server_duration_milliseconds_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0",le="10"} 0 +http_server_duration_milliseconds_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0",le="25"} 1 +http_server_duration_milliseconds_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0",le="50"} 1 +http_server_duration_milliseconds_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0",le="75"} 1 +http_server_duration_milliseconds_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0",le="100"} 1 +http_server_duration_milliseconds_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0",le="250"} 1 +http_server_duration_milliseconds_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0",le="500"} 1 +http_server_duration_milliseconds_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0",le="750"} 1 +http_server_duration_milliseconds_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0",le="1000"} 1 +http_server_duration_milliseconds_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0",le="2500"} 1 +http_server_duration_milliseconds_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0",le="5000"} 1 +http_server_duration_milliseconds_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0",le="7500"} 1 +http_server_duration_milliseconds_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0",le="10000"} 1 +http_server_duration_milliseconds_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0",le="+Inf"} 1 +http_server_duration_milliseconds_sum{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0"} 18.807627 +http_server_duration_milliseconds_count{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",http_method="GET",http_scheme="http",http_status_code="200",net_host_name="xdgggqomhgiilfjtepts.supabase.co",net_protocol_name="http",net_protocol_version="1.1",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0"} 1 +# HELP process_resident_memory_bytes Resident memory size in bytes. +# TYPE process_resident_memory_bytes gauge +process_resident_memory_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 1.0022912e+07 +# HELP db_sql_connection_wait_duration_milliseconds_total The total time blocked waiting for a new connection +# TYPE db_sql_connection_wait_duration_milliseconds_total counter +db_sql_connection_wait_duration_milliseconds_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="github.com/XSAM/otelsql",otel_scope_version="0.26.0"} 0 +# HELP db_sql_connection_wait_total The total number of connections waited for +# TYPE db_sql_connection_wait_total counter +db_sql_connection_wait_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="github.com/XSAM/otelsql",otel_scope_version="0.26.0"} 0 +# HELP gotrue_running Whether GoTrue is running (always 1) +# TYPE gotrue_running gauge +gotrue_running{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="gotrue",otel_scope_version=""} 1 +# HELP process_max_fds Maximum number of open file descriptors. +# TYPE process_max_fds gauge +process_max_fds{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 524288 +# HELP process_runtime_go_mem_heap_alloc_bytes Bytes of allocated heap objects +# TYPE process_runtime_go_mem_heap_alloc_bytes gauge +process_runtime_go_mem_heap_alloc_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0"} 1.919776e+06 +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. +# TYPE go_memstats_alloc_bytes_total counter +go_memstats_alloc_bytes_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 9.748968e+06 +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. +# TYPE go_memstats_heap_inuse_bytes gauge +go_memstats_heap_inuse_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 3.4816e+06 +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. +# TYPE go_memstats_heap_idle_bytes gauge +go_memstats_heap_idle_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 4.284416e+06 +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. +# TYPE go_memstats_mcache_inuse_bytes gauge +go_memstats_mcache_inuse_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 2400 +# HELP process_runtime_go_gc_pause_ns Amount of nanoseconds in GC stop-the-world pauses +# TYPE process_runtime_go_gc_pause_ns histogram +process_runtime_go_gc_pause_ns_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0",le="0"} 0 +process_runtime_go_gc_pause_ns_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0",le="5"} 0 +process_runtime_go_gc_pause_ns_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0",le="10"} 0 +process_runtime_go_gc_pause_ns_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0",le="25"} 0 +process_runtime_go_gc_pause_ns_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0",le="50"} 0 +process_runtime_go_gc_pause_ns_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0",le="75"} 0 +process_runtime_go_gc_pause_ns_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0",le="100"} 0 +process_runtime_go_gc_pause_ns_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0",le="250"} 0 +process_runtime_go_gc_pause_ns_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0",le="500"} 0 +process_runtime_go_gc_pause_ns_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0",le="750"} 0 +process_runtime_go_gc_pause_ns_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0",le="1000"} 0 +process_runtime_go_gc_pause_ns_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0",le="2500"} 0 +process_runtime_go_gc_pause_ns_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0",le="5000"} 0 +process_runtime_go_gc_pause_ns_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0",le="7500"} 0 +process_runtime_go_gc_pause_ns_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0",le="10000"} 0 +process_runtime_go_gc_pause_ns_bucket{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0",le="+Inf"} 20 +process_runtime_go_gc_pause_ns_sum{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0"} 1.9999787e+07 +process_runtime_go_gc_pause_ns_count{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0"} 20 +# HELP process_runtime_go_mem_heap_idle_bytes Bytes in idle (unused) spans +# TYPE process_runtime_go_mem_heap_idle_bytes gauge +process_runtime_go_mem_heap_idle_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0"} 4.317184e+06 +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. +# TYPE go_memstats_buck_hash_sys_bytes gauge +go_memstats_buck_hash_sys_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 1.452855e+06 +# HELP go_memstats_frees_total Total number of frees. +# TYPE go_memstats_frees_total counter +go_memstats_frees_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 34970 +# HELP process_runtime_go_mem_heap_objects Number of allocated heap objects +# TYPE process_runtime_go_mem_heap_objects gauge +process_runtime_go_mem_heap_objects{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0"} 10167 +# HELP otel_scope_info Instrumentation Scope metadata +# TYPE otel_scope_info gauge +otel_scope_info{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="github.com/XSAM/otelsql",otel_scope_version="0.26.0"} 1 +otel_scope_info{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",otel_scope_version="0.51.0"} 1 +otel_scope_info{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0"} 1 +otel_scope_info{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="gotrue",otel_scope_version=""} 1 +# HELP process_runtime_go_gc_count_total Number of completed garbage collection cycles +# TYPE process_runtime_go_gc_count_total counter +process_runtime_go_gc_count_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="go.opentelemetry.io/contrib/instrumentation/runtime",otel_scope_version="0.45.0"} 20 +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. +# TYPE go_memstats_other_sys_bytes gauge +go_memstats_other_sys_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 670129 +# HELP go_memstats_sys_bytes Number of bytes obtained from system. +# TYPE go_memstats_sys_bytes gauge +go_memstats_sys_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 1.3718792e+07 +# HELP db_sql_connection_closed_max_idle_total The total number of connections closed due to SetMaxIdleConns +# TYPE db_sql_connection_closed_max_idle_total counter +db_sql_connection_closed_max_idle_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue",otel_scope_name="github.com/XSAM/otelsql",otel_scope_version="0.26.0"} 0 +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. +# TYPE go_memstats_heap_alloc_bytes gauge +go_memstats_heap_alloc_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 1.839616e+06 +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. +# TYPE go_memstats_mcache_sys_bytes gauge +go_memstats_mcache_sys_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 15600 +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. +# TYPE go_memstats_gc_sys_bytes gauge +go_memstats_gc_sys_bytes{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 3.11e+06 +# HELP go_memstats_mallocs_total Total number of mallocs. +# TYPE go_memstats_mallocs_total counter +go_memstats_mallocs_total{supabase_project_ref="xdgggqomhgiilfjtepts",service_type="gotrue"} 44733 \ No newline at end of file diff --git a/supabase/tests/fixtures/storage_api_metrics.txt b/supabase/tests/fixtures/storage_api_metrics.txt new file mode 100644 index 0000000000000..171cba34e6309 --- /dev/null +++ b/supabase/tests/fixtures/storage_api_metrics.txt @@ -0,0 +1,274 @@ + +# HELP storage_api_upload_started Upload started +# TYPE storage_api_upload_started gauge + +# HELP storage_api_upload_success Successful uploads +# TYPE storage_api_upload_success gauge + +# HELP storage_api_database_query_performance Database query performance +# TYPE storage_api_database_query_performance histogram +storage_api_database_query_performance_bucket{le="0.005",name="ListBuckets"} 0 +storage_api_database_query_performance_bucket{le="0.01",name="ListBuckets"} 0 +storage_api_database_query_performance_bucket{le="0.025",name="ListBuckets"} 0 +storage_api_database_query_performance_bucket{le="0.05",name="ListBuckets"} 1 +storage_api_database_query_performance_bucket{le="0.1",name="ListBuckets"} 1 +storage_api_database_query_performance_bucket{le="0.25",name="ListBuckets"} 1 +storage_api_database_query_performance_bucket{le="0.5",name="ListBuckets"} 1 +storage_api_database_query_performance_bucket{le="1",name="ListBuckets"} 1 +storage_api_database_query_performance_bucket{le="2.5",name="ListBuckets"} 1 +storage_api_database_query_performance_bucket{le="5",name="ListBuckets"} 1 +storage_api_database_query_performance_bucket{le="10",name="ListBuckets"} 1 +storage_api_database_query_performance_bucket{le="+Inf",name="ListBuckets"} 1 +storage_api_database_query_performance_sum{name="ListBuckets"} 0.031662833 +storage_api_database_query_performance_count{name="ListBuckets"} 1 + +# HELP storage_api_queue_job_scheduled_time Time taken to schedule a job in the queue +# TYPE storage_api_queue_job_scheduled_time histogram + +# HELP storage_api_queue_job_scheduled Current number of pending messages in the queue +# TYPE storage_api_queue_job_scheduled gauge + +# HELP storage_api_queue_job_completed Current number of processed messages in the queue +# TYPE storage_api_queue_job_completed gauge + +# HELP storage_api_queue_job_retry_failed Current number of failed attempts messages in the queue +# TYPE storage_api_queue_job_retry_failed gauge + +# HELP storage_api_queue_job_error Current number of errored messages in the queue +# TYPE storage_api_queue_job_error gauge + +# HELP storage_api_s3_upload_part S3 upload part performance +# TYPE storage_api_s3_upload_part histogram + +# HELP storage_api_db_pool Number of database pools created +# TYPE storage_api_db_pool gauge +storage_api_db_pool{is_external="false"} 1 + +# HELP storage_api_db_connections Number of database connections +# TYPE storage_api_db_connections gauge +storage_api_db_connections{is_external="false"} 0 + +# HELP storage_api_http_pool_busy_sockets Number of busy sockets currently in use +# TYPE storage_api_http_pool_busy_sockets gauge +storage_api_http_pool_busy_sockets{name="s3_tus",region="stub",protocol="https"} 0 + +# HELP storage_api_http_pool_free_sockets Number of free sockets available for reuse +# TYPE storage_api_http_pool_free_sockets gauge +storage_api_http_pool_free_sockets{name="s3_tus",region="stub",protocol="https"} 0 + +# HELP storage_api_http_pool_requests Number of pending requests waiting for a socket +# TYPE storage_api_http_pool_requests gauge +storage_api_http_pool_requests{name="s3_tus",region="stub"} 0 + +# HELP storage_api_http_pool_errors Number of pending requests waiting for a socket +# TYPE storage_api_http_pool_errors gauge +storage_api_http_pool_errors{name="s3_tus",region="stub",type="socket_error",protocol="https"} 0 +storage_api_http_pool_errors{name="s3_tus",region="stub",type="timeout_socket_error",protocol="https"} 0 +storage_api_http_pool_errors{name="s3_tus",region="stub",type="create_socket_error",protocol="https"} 0 + +# HELP storage_api_http_request_duration_seconds request duration in seconds +# TYPE storage_api_http_request_duration_seconds histogram +storage_api_http_request_duration_seconds_bucket{le="0.005",method="GET",route="/bucket",status_code="2xx"} 0 +storage_api_http_request_duration_seconds_bucket{le="0.01",method="GET",route="/bucket",status_code="2xx"} 0 +storage_api_http_request_duration_seconds_bucket{le="0.025",method="GET",route="/bucket",status_code="2xx"} 0 +storage_api_http_request_duration_seconds_bucket{le="0.05",method="GET",route="/bucket",status_code="2xx"} 1 +storage_api_http_request_duration_seconds_bucket{le="0.1",method="GET",route="/bucket",status_code="2xx"} 1 +storage_api_http_request_duration_seconds_bucket{le="0.25",method="GET",route="/bucket",status_code="2xx"} 1 +storage_api_http_request_duration_seconds_bucket{le="0.5",method="GET",route="/bucket",status_code="2xx"} 1 +storage_api_http_request_duration_seconds_bucket{le="1",method="GET",route="/bucket",status_code="2xx"} 1 +storage_api_http_request_duration_seconds_bucket{le="2.5",method="GET",route="/bucket",status_code="2xx"} 1 +storage_api_http_request_duration_seconds_bucket{le="5",method="GET",route="/bucket",status_code="2xx"} 1 +storage_api_http_request_duration_seconds_bucket{le="10",method="GET",route="/bucket",status_code="2xx"} 1 +storage_api_http_request_duration_seconds_bucket{le="+Inf",method="GET",route="/bucket",status_code="2xx"} 1 +storage_api_http_request_duration_seconds_sum{method="GET",route="/bucket",status_code="2xx"} 0.043428125 +storage_api_http_request_duration_seconds_count{method="GET",route="/bucket",status_code="2xx"} 1 + +# HELP storage_api_http_request_summary_seconds request duration in seconds summary +# TYPE storage_api_http_request_summary_seconds summary +storage_api_http_request_summary_seconds{quantile="0.01",method="GET",route="/bucket",status_code="2xx"} 0.042737459 +storage_api_http_request_summary_seconds{quantile="0.05",method="GET",route="/bucket",status_code="2xx"} 0.042737459 +storage_api_http_request_summary_seconds{quantile="0.5",method="GET",route="/bucket",status_code="2xx"} 0.042737459 +storage_api_http_request_summary_seconds{quantile="0.9",method="GET",route="/bucket",status_code="2xx"} 0.042737459 +storage_api_http_request_summary_seconds{quantile="0.95",method="GET",route="/bucket",status_code="2xx"} 0.042737459 +storage_api_http_request_summary_seconds{quantile="0.99",method="GET",route="/bucket",status_code="2xx"} 0.042737459 +storage_api_http_request_summary_seconds{quantile="0.999",method="GET",route="/bucket",status_code="2xx"} 0.042737459 +storage_api_http_request_summary_seconds_sum{method="GET",route="/bucket",status_code="2xx"} 0.042737459 +storage_api_http_request_summary_seconds_count{method="GET",route="/bucket",status_code="2xx"} 1 + +# HELP storage_api_process_cpu_user_seconds_total Total user CPU time spent in seconds. +# TYPE storage_api_process_cpu_user_seconds_total counter +storage_api_process_cpu_user_seconds_total{region="stub"} 361.14234300000004 + +# HELP storage_api_process_cpu_system_seconds_total Total system CPU time spent in seconds. +# TYPE storage_api_process_cpu_system_seconds_total counter +storage_api_process_cpu_system_seconds_total{region="stub"} 143.664084 + +# HELP storage_api_process_cpu_seconds_total Total user and system CPU time spent in seconds. +# TYPE storage_api_process_cpu_seconds_total counter +storage_api_process_cpu_seconds_total{region="stub"} 504.80642700000004 + +# HELP storage_api_process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE storage_api_process_start_time_seconds gauge +storage_api_process_start_time_seconds{region="stub"} 1733450910 + +# HELP storage_api_process_resident_memory_bytes Resident memory size in bytes. +# TYPE storage_api_process_resident_memory_bytes gauge +storage_api_process_resident_memory_bytes{region="stub"} 103641088 + +# HELP storage_api_process_virtual_memory_bytes Virtual memory size in bytes. +# TYPE storage_api_process_virtual_memory_bytes gauge +storage_api_process_virtual_memory_bytes{region="stub"} 4783030272 + +# HELP storage_api_process_heap_bytes Process heap size in bytes. +# TYPE storage_api_process_heap_bytes gauge +storage_api_process_heap_bytes{region="stub"} 132231168 + +# HELP storage_api_process_open_fds Number of open file descriptors. +# TYPE storage_api_process_open_fds gauge +storage_api_process_open_fds{region="stub"} 21 + +# HELP storage_api_process_max_fds Maximum number of open file descriptors. +# TYPE storage_api_process_max_fds gauge +storage_api_process_max_fds{region="stub"} 1048576 + +# HELP storage_api_nodejs_eventloop_lag_seconds Lag of event loop in seconds. +# TYPE storage_api_nodejs_eventloop_lag_seconds gauge +storage_api_nodejs_eventloop_lag_seconds{region="stub"} 0.0089925 + +# HELP storage_api_nodejs_eventloop_lag_min_seconds The minimum recorded event loop delay. +# TYPE storage_api_nodejs_eventloop_lag_min_seconds gauge +storage_api_nodejs_eventloop_lag_min_seconds{region="stub"} 0.000014848 + +# HELP storage_api_nodejs_eventloop_lag_max_seconds The maximum recorded event loop delay. +# TYPE storage_api_nodejs_eventloop_lag_max_seconds gauge +storage_api_nodejs_eventloop_lag_max_seconds{region="stub"} 1.198522367 + +# HELP storage_api_nodejs_eventloop_lag_mean_seconds The mean of the recorded event loop delays. +# TYPE storage_api_nodejs_eventloop_lag_mean_seconds gauge +storage_api_nodejs_eventloop_lag_mean_seconds{region="stub"} 0.011911191714967564 + +# HELP storage_api_nodejs_eventloop_lag_stddev_seconds The standard deviation of the recorded event loop delays. +# TYPE storage_api_nodejs_eventloop_lag_stddev_seconds gauge +storage_api_nodejs_eventloop_lag_stddev_seconds{region="stub"} 0.0035951748338251626 + +# HELP storage_api_nodejs_eventloop_lag_p50_seconds The 50th percentile of the recorded event loop delays. +# TYPE storage_api_nodejs_eventloop_lag_p50_seconds gauge +storage_api_nodejs_eventloop_lag_p50_seconds{region="stub"} 0.011395071 + +# HELP storage_api_nodejs_eventloop_lag_p90_seconds The 90th percentile of the recorded event loop delays. +# TYPE storage_api_nodejs_eventloop_lag_p90_seconds gauge +storage_api_nodejs_eventloop_lag_p90_seconds{region="stub"} 0.014335999 + +# HELP storage_api_nodejs_eventloop_lag_p99_seconds The 99th percentile of the recorded event loop delays. +# TYPE storage_api_nodejs_eventloop_lag_p99_seconds gauge +storage_api_nodejs_eventloop_lag_p99_seconds{region="stub"} 0.017448959 + +# HELP storage_api_nodejs_active_resources Number of active resources that are currently keeping the event loop alive, grouped by async resource type. +# TYPE storage_api_nodejs_active_resources gauge +storage_api_nodejs_active_resources{type="PipeWrap",region="stub"} 2 +storage_api_nodejs_active_resources{type="TCPSocketWrap",region="stub"} 2 +storage_api_nodejs_active_resources{type="TCPServerWrap",region="stub"} 1 +storage_api_nodejs_active_resources{type="Timeout",region="stub"} 2 +storage_api_nodejs_active_resources{type="Immediate",region="stub"} 1 + +# HELP storage_api_nodejs_active_resources_total Total number of active resources. +# TYPE storage_api_nodejs_active_resources_total gauge +storage_api_nodejs_active_resources_total{region="stub"} 8 + +# HELP storage_api_nodejs_active_handles Number of active libuv handles grouped by handle type. Every handle type is C++ class name. +# TYPE storage_api_nodejs_active_handles gauge +storage_api_nodejs_active_handles{type="Socket",region="stub"} 4 +storage_api_nodejs_active_handles{type="Server",region="stub"} 1 + +# HELP storage_api_nodejs_active_handles_total Total number of active handles. +# TYPE storage_api_nodejs_active_handles_total gauge +storage_api_nodejs_active_handles_total{region="stub"} 5 + +# HELP storage_api_nodejs_active_requests Number of active libuv requests grouped by request type. Every request type is C++ class name. +# TYPE storage_api_nodejs_active_requests gauge + +# HELP storage_api_nodejs_active_requests_total Total number of active requests. +# TYPE storage_api_nodejs_active_requests_total gauge +storage_api_nodejs_active_requests_total{region="stub"} 0 + +# HELP storage_api_nodejs_heap_size_total_bytes Process heap size from Node.js in bytes. +# TYPE storage_api_nodejs_heap_size_total_bytes gauge +storage_api_nodejs_heap_size_total_bytes{region="stub"} 51707904 + +# HELP storage_api_nodejs_heap_size_used_bytes Process heap size used from Node.js in bytes. +# TYPE storage_api_nodejs_heap_size_used_bytes gauge +storage_api_nodejs_heap_size_used_bytes{region="stub"} 43002696 + +# HELP storage_api_nodejs_external_memory_bytes Node.js external memory size in bytes. +# TYPE storage_api_nodejs_external_memory_bytes gauge +storage_api_nodejs_external_memory_bytes{region="stub"} 3568105 + +# HELP storage_api_nodejs_heap_space_size_total_bytes Process heap space size total from Node.js in bytes. +# TYPE storage_api_nodejs_heap_space_size_total_bytes gauge +storage_api_nodejs_heap_space_size_total_bytes{space="read_only",region="stub"} 0 +storage_api_nodejs_heap_space_size_total_bytes{space="new",region="stub"} 1048576 +storage_api_nodejs_heap_space_size_total_bytes{space="old",region="stub"} 44597248 +storage_api_nodejs_heap_space_size_total_bytes{space="code",region="stub"} 3670016 +storage_api_nodejs_heap_space_size_total_bytes{space="shared",region="stub"} 0 +storage_api_nodejs_heap_space_size_total_bytes{space="new_large_object",region="stub"} 0 +storage_api_nodejs_heap_space_size_total_bytes{space="large_object",region="stub"} 2220032 +storage_api_nodejs_heap_space_size_total_bytes{space="code_large_object",region="stub"} 172032 +storage_api_nodejs_heap_space_size_total_bytes{space="shared_large_object",region="stub"} 0 + +# HELP storage_api_nodejs_heap_space_size_used_bytes Process heap space size used from Node.js in bytes. +# TYPE storage_api_nodejs_heap_space_size_used_bytes gauge +storage_api_nodejs_heap_space_size_used_bytes{space="read_only",region="stub"} 0 +storage_api_nodejs_heap_space_size_used_bytes{space="new",region="stub"} 310136 +storage_api_nodejs_heap_space_size_used_bytes{space="old",region="stub"} 37172576 +storage_api_nodejs_heap_space_size_used_bytes{space="code",region="stub"} 3194320 +storage_api_nodejs_heap_space_size_used_bytes{space="shared",region="stub"} 0 +storage_api_nodejs_heap_space_size_used_bytes{space="new_large_object",region="stub"} 0 +storage_api_nodejs_heap_space_size_used_bytes{space="large_object",region="stub"} 2186264 +storage_api_nodejs_heap_space_size_used_bytes{space="code_large_object",region="stub"} 155296 +storage_api_nodejs_heap_space_size_used_bytes{space="shared_large_object",region="stub"} 0 + +# HELP storage_api_nodejs_heap_space_size_available_bytes Process heap space size available from Node.js in bytes. +# TYPE storage_api_nodejs_heap_space_size_available_bytes gauge +storage_api_nodejs_heap_space_size_available_bytes{space="read_only",region="stub"} 0 +storage_api_nodejs_heap_space_size_available_bytes{space="new",region="stub"} 720744 +storage_api_nodejs_heap_space_size_available_bytes{space="old",region="stub"} 6558624 +storage_api_nodejs_heap_space_size_available_bytes{space="code",region="stub"} 246096 +storage_api_nodejs_heap_space_size_available_bytes{space="shared",region="stub"} 0 +storage_api_nodejs_heap_space_size_available_bytes{space="new_large_object",region="stub"} 1048576 +storage_api_nodejs_heap_space_size_available_bytes{space="large_object",region="stub"} 0 +storage_api_nodejs_heap_space_size_available_bytes{space="code_large_object",region="stub"} 0 +storage_api_nodejs_heap_space_size_available_bytes{space="shared_large_object",region="stub"} 0 + +# HELP storage_api_nodejs_version_info Node.js version info. +# TYPE storage_api_nodejs_version_info gauge +storage_api_nodejs_version_info{version="v20.18.0",major="20",minor="18",patch="0",region="stub"} 1 + +# HELP storage_api_nodejs_gc_duration_seconds Garbage collection duration by kind, one of major, minor, incremental or weakcb. +# TYPE storage_api_nodejs_gc_duration_seconds histogram +storage_api_nodejs_gc_duration_seconds_bucket{le="0.001",kind="minor",region="stub"} 544 +storage_api_nodejs_gc_duration_seconds_bucket{le="0.01",kind="minor",region="stub"} 1002 +storage_api_nodejs_gc_duration_seconds_bucket{le="0.1",kind="minor",region="stub"} 1006 +storage_api_nodejs_gc_duration_seconds_bucket{le="1",kind="minor",region="stub"} 1006 +storage_api_nodejs_gc_duration_seconds_bucket{le="2",kind="minor",region="stub"} 1006 +storage_api_nodejs_gc_duration_seconds_bucket{le="5",kind="minor",region="stub"} 1006 +storage_api_nodejs_gc_duration_seconds_bucket{le="+Inf",kind="minor",region="stub"} 1006 +storage_api_nodejs_gc_duration_seconds_sum{kind="minor",region="stub"} 1.4403644915223157 +storage_api_nodejs_gc_duration_seconds_count{kind="minor",region="stub"} 1006 +storage_api_nodejs_gc_duration_seconds_bucket{le="0.001",kind="incremental",region="stub"} 4 +storage_api_nodejs_gc_duration_seconds_bucket{le="0.01",kind="incremental",region="stub"} 8 +storage_api_nodejs_gc_duration_seconds_bucket{le="0.1",kind="incremental",region="stub"} 12 +storage_api_nodejs_gc_duration_seconds_bucket{le="1",kind="incremental",region="stub"} 12 +storage_api_nodejs_gc_duration_seconds_bucket{le="2",kind="incremental",region="stub"} 12 +storage_api_nodejs_gc_duration_seconds_bucket{le="5",kind="incremental",region="stub"} 12 +storage_api_nodejs_gc_duration_seconds_bucket{le="+Inf",kind="incremental",region="stub"} 12 +storage_api_nodejs_gc_duration_seconds_sum{kind="incremental",region="stub"} 0.07946879202127458 +storage_api_nodejs_gc_duration_seconds_count{kind="incremental",region="stub"} 12 +storage_api_nodejs_gc_duration_seconds_bucket{le="0.001",kind="major",region="stub"} 0 +storage_api_nodejs_gc_duration_seconds_bucket{le="0.01",kind="major",region="stub"} 10 +storage_api_nodejs_gc_duration_seconds_bucket{le="0.1",kind="major",region="stub"} 11 +storage_api_nodejs_gc_duration_seconds_bucket{le="1",kind="major",region="stub"} 11 +storage_api_nodejs_gc_duration_seconds_bucket{le="2",kind="major",region="stub"} 11 +storage_api_nodejs_gc_duration_seconds_bucket{le="5",kind="major",region="stub"} 11 +storage_api_nodejs_gc_duration_seconds_bucket{le="+Inf",kind="major",region="stub"} 11 +storage_api_nodejs_gc_duration_seconds_sum{kind="major",region="stub"} 0.04609945893287658 +storage_api_nodejs_gc_duration_seconds_count{kind="major",region="stub"} 11 \ No newline at end of file diff --git a/supabase/tests/test_e2e.py b/supabase/tests/test_e2e.py new file mode 100644 index 0000000000000..3357d5a6d148f --- /dev/null +++ b/supabase/tests/test_e2e.py @@ -0,0 +1,13 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from datadog_checks.base.constants import ServiceCheck +from datadog_checks.dev.utils import assert_service_checks + + +def test_e2e_openmetrics_v2(dd_agent_check): + aggregator = dd_agent_check() + + aggregator.assert_service_check('supabase.openmetrics.health', ServiceCheck.OK, count=1) + aggregator.assert_service_check('supabase.storage_api.openmetrics.health', ServiceCheck.OK, count=1) + assert_service_checks(aggregator) diff --git a/supabase/tests/test_unit.py b/supabase/tests/test_unit.py new file mode 100644 index 0000000000000..774eec9646831 --- /dev/null +++ b/supabase/tests/test_unit.py @@ -0,0 +1,50 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +import pytest + +from datadog_checks.base.constants import ServiceCheck +from datadog_checks.dev.utils import get_metadata_metrics +from datadog_checks.supabase import SupabaseCheck + +from .common import ( + PRIVILEGED_METRICS, + PRIVILEGED_METRICS_INSTANCE, + PRIVILEGED_METRICS_NAMESPACE, + STORAGE_API_INSTANCE, + STORAGE_API_METRICS, + STORAGE_API_METRICS_NAMESPACE, + get_fixture_path, +) + + +@pytest.mark.parametrize( + 'namespace, instance, metrics, fixture_name,', + [ + (PRIVILEGED_METRICS_NAMESPACE, PRIVILEGED_METRICS_INSTANCE, PRIVILEGED_METRICS, 'privileged_metrics.txt'), + (STORAGE_API_METRICS_NAMESPACE, STORAGE_API_INSTANCE, STORAGE_API_METRICS, 'storage_api_metrics.txt'), + ], +) +def test_check_mock_supabase_openmetrics( + dd_run_check, instance, aggregator, fixture_name, metrics, mock_http_response, namespace +): + mock_http_response(file_path=get_fixture_path(fixture_name)) + check = SupabaseCheck('supabase', {}, [instance]) + dd_run_check(check) + + for metric in metrics: + aggregator.assert_metric(metric) + aggregator.assert_metric_has_tag(metric, 'test:test') + + aggregator.assert_all_metrics_covered() + aggregator.assert_metrics_using_metadata(get_metadata_metrics()) + aggregator.assert_service_check(f'{namespace}.openmetrics.health', ServiceCheck.OK) + + +def test_empty_instance(dd_run_check): + with pytest.raises( + Exception, + match='Must specify at least one of the following:`privileged_metrics_endpoint` or `storage_api_endpoint`.', + ): + check = SupabaseCheck('supabase', {}, [{}]) + dd_run_check(check) diff --git a/tls/tests/conftest.py b/tls/tests/conftest.py index 12483f28b2dab..b606a64d0770e 100644 --- a/tls/tests/conftest.py +++ b/tls/tests/conftest.py @@ -29,13 +29,28 @@ } -@pytest.fixture(scope='session', autouse=True) +@pytest.fixture(scope="function") +def clean_fips_environment(): + os.environ["GOFIPS"] = "0" + os.environ["OPENSSL_CONF"] = "" + os.environ["OPENSSL_MODULES"] = "" + yield + + +@pytest.fixture(scope='session') def dd_environment(instance_e2e, mock_local_tls_dns): with docker_run(os.path.join(HERE, 'compose', 'docker-compose.yml'), build=True, sleep=20): e2e_metadata = {'docker_volumes': ['{}:{}'.format(CA_CERT, CA_CERT_MOUNT_PATH)]} yield instance_e2e, e2e_metadata +@pytest.fixture(scope='session') +def dd_fips_environment(instance_e2e_fips, mock_local_tls_dns): + with docker_run(os.path.join(HERE, 'fips', 'docker-compose.yml'), build=True, sleep=20): + e2e_metadata = {'docker_volumes': ['{}:{}'.format(CA_CERT, CA_CERT_MOUNT_PATH)]} + yield instance_e2e_fips, instance_e2e_non_fips, e2e_metadata + + @pytest.fixture(scope='session') def mock_local_tls_dns(): with mock_local(HOSTNAME_TO_PORT_MAPPING): @@ -158,6 +173,28 @@ def instance_e2e(): } +@pytest.fixture(scope='session') +def instance_e2e_fips(): + return { + 'server': 'https://localhost', + 'port': 8443, + 'tls_ca_cert': CA_CERT_MOUNT_PATH, + 'tls_verify': False, + 'tls_validate_hostname': False, + } + + +@pytest.fixture(scope='session') +def instance_e2e_non_fips(): + return { + 'server': 'https://localhost', + 'port': 9443, + 'tls_ca_cert': CA_CERT_MOUNT_PATH, + 'tls_verify': False, + 'tls_validate_hostname': False, + } + + @pytest.fixture def instance_remote_ok_ip(): return {'server': '1.1.1.1', 'tls_validate_hostname': False, 'days_warning': 1, 'days_critical': 1} diff --git a/tls/tests/fips/Dockerfile b/tls/tests/fips/Dockerfile new file mode 100644 index 0000000000000..dbf827d8f9d66 --- /dev/null +++ b/tls/tests/fips/Dockerfile @@ -0,0 +1,11 @@ +FROM alpine:3.18 + +# Install OpenSSL and necessary tools +RUN apk add --no-cache openssl bash + +COPY start-server.sh /usr/local/bin/start-server.sh +COPY ca.* /tmp/ +RUN chmod +x /usr/local/bin/start-server.sh + +# Expose port 443 +EXPOSE 443 diff --git a/tls/tests/fips/ca.crt b/tls/tests/fips/ca.crt new file mode 100644 index 0000000000000..189746cb2a227 --- /dev/null +++ b/tls/tests/fips/ca.crt @@ -0,0 +1,33 @@ +-----BEGIN CERTIFICATE----- +MIIFwzCCA6ugAwIBAgIULsStz9lSmEN4m9GOJbAcvBD26qMwDQYJKoZIhvcNAQEL +BQAwcDELMAkGA1UEBhMCVVMxDDAKBgNVBAgMA2ZvbzEMMAoGA1UEBwwDZm9vMQww +CgYDVQQKDANmb28xDDAKBgNVBAsMA2ZvbzEVMBMGA1UEAwwMZXhwaXJlZC5tb2Nr +MRIwEAYJKoZIhvcNAQkBFgNmb28wIBcNMjAwMzA1MTEwNDIyWhgPOTk5OTEyMzEx +MTA0MjJaMHAxCzAJBgNVBAYTAlVTMQwwCgYDVQQIDANmb28xDDAKBgNVBAcMA2Zv +bzEMMAoGA1UECgwDZm9vMQwwCgYDVQQLDANmb28xFTATBgNVBAMMDGV4cGlyZWQu +bW9jazESMBAGCSqGSIb3DQEJARYDZm9vMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEA5Ws3yh++mg+fnvwp2UbW1jpO8MslpdudgPkaZOtdO40lg4teYTqA +rc770P+sfNha/0Gv1eel9LtdgsvyoHhABQfWoUYDUAU5ZbCalOHWOgZulZuw8Ff8 +zEiS+7ccBQs2ayGclIUVZo7PR3eURwmsCfd1CgJw60DQVegJhD/G4p0UhJRfArZ3 +OYvsYziE20QmoRNubntlR7gyzF30378pp8tP6661rFoZMuDx1ajvD+ExBCzUCiwV +5ly/21FdXaEOVqmjxkn6nYQX35SH7ypyVJCYLe7sSW+ExT5FrvSsVOU+0GzAO291 +b+xJQf82/wWbwXgcauzpZYidkhmJahlNKUSGvHQBIvzSbSe198PFWug99Evs/Ux6 +xacyPCxdZ90xYVqah1zoeCsJgi+xa+4zRSk+OreP+TOe6Ph78Ne9+rn/R6eTKzPY +JTNWgi6nt5os8rKijTMpTHx24BjekFOTtIfMC9geNGYWnamTfEplMq7M86d6kgiq +UOeLdXywVvoyMkbVg7oAMU2wbwycu3HTgP4P41esWjNp1Z36YUzRVJoavcZtEJtO +3ii1Elz/SFlqktLDwl27kh4GzvGEXwgFYjW+ucY4FrinTYmmZBCHQwuD3ZPPFVAu +n1XEdYJf/suIqInH7eN2x/0RTsJbVodtMmke+XO5DdynUWEuK+/bmjMCAwEAAaNT +MFEwHQYDVR0OBBYEFBEJJjHmDC+yd7Dem4SQg25HJ/mIMB8GA1UdIwQYMBaAFBEJ +JjHmDC+yd7Dem4SQg25HJ/mIMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL +BQADggIBAGfXMDCJ3IpnLl3CF6zDkV8Jvv8MPx/61iQ5nNikuMEkP7smImfGx7bh +WJhPiOQPaWU67Cr29WQ0bt0lKneBAcfvqLQ2Ub6wV4QCldhc95MfOo1ReqqDQkA+ +mi9ILwlaDevYEpBlpqa6v3L/EtaDof/Q8dpGm0jX+8yKqAZShpRVWRZUZch9iJOF +XDS2Kee8wFP04jtt3isgXu6j+0JMytd834CoBHCGdjQsNFBnfTCKogg7iSweUoEa +jnfYfRzxAzFachM/kB3CSpDifKtQabagKNfkEw2gyJDdaj3q3LO7jFCwySu6K/7n +MeJSA6PJNSXouImUweNUjG+NJKFcXLHMW7s4lqbfnnaQVGq+n3nofhm11uasAOF1 +HdQRmyJQPKcZKTN19fz6ageJqJI6hCol2OENuTcmVH++KlJAK4R4z/S9/Er/9yOn +UIOvwuBbcfS9uxoRlUge8jXWCVTFzolq6DVGvCOwgLlNsQDT4oEsh7unAXE7E2w9 +blkFskCImPaUpKqXwdLXPtESyJK6ri5nC5qY72cE3MndQ/HsrdbpjyvliYBXxcAP +HxBgKj1HzUdME4OIHd7tIlndsBoaGnAwGdR018EkxaYZj4OhdbVSUwZop+NvtA8q +MutrU4WG95lqaWnhnt6UnL/rbf3zbxzP9xyJIp3NYW8p7juYUeik +-----END CERTIFICATE----- diff --git a/tls/tests/fips/ca.key b/tls/tests/fips/ca.key new file mode 100644 index 0000000000000..ab3351d26fb6a --- /dev/null +++ b/tls/tests/fips/ca.key @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKgIBAAKCAgEA5Ws3yh++mg+fnvwp2UbW1jpO8MslpdudgPkaZOtdO40lg4te +YTqArc770P+sfNha/0Gv1eel9LtdgsvyoHhABQfWoUYDUAU5ZbCalOHWOgZulZuw +8Ff8zEiS+7ccBQs2ayGclIUVZo7PR3eURwmsCfd1CgJw60DQVegJhD/G4p0UhJRf +ArZ3OYvsYziE20QmoRNubntlR7gyzF30378pp8tP6661rFoZMuDx1ajvD+ExBCzU +CiwV5ly/21FdXaEOVqmjxkn6nYQX35SH7ypyVJCYLe7sSW+ExT5FrvSsVOU+0GzA +O291b+xJQf82/wWbwXgcauzpZYidkhmJahlNKUSGvHQBIvzSbSe198PFWug99Evs +/Ux6xacyPCxdZ90xYVqah1zoeCsJgi+xa+4zRSk+OreP+TOe6Ph78Ne9+rn/R6eT +KzPYJTNWgi6nt5os8rKijTMpTHx24BjekFOTtIfMC9geNGYWnamTfEplMq7M86d6 +kgiqUOeLdXywVvoyMkbVg7oAMU2wbwycu3HTgP4P41esWjNp1Z36YUzRVJoavcZt +EJtO3ii1Elz/SFlqktLDwl27kh4GzvGEXwgFYjW+ucY4FrinTYmmZBCHQwuD3ZPP +FVAun1XEdYJf/suIqInH7eN2x/0RTsJbVodtMmke+XO5DdynUWEuK+/bmjMCAwEA +AQKCAgBiqhbHNZnKNffm7vmsePvCDDeQ9T0OtIFrSzZfup1RFCXTCeggFoHqvf6s +Zpiu5HlWF9DgRIyp0L6plr6U5sJZp4JVv3+DNYv2wNnqN6njMIr0io2w/5Y34Opd +mOVTAfx4XG8zrHyEq9xXFwVM+8riSmsqT9xyQGCY4X7eQnNtWDLPxeOKjiB+Pj/+ +d/sjNY8LbQjsOONY0c3++FVsuJDYmeaYLH3zTphRuk4nHk0Z3jYvXnwel9EfTjow +vzBRKQe6m6BUkdJXVczSmdVQNUgQOFNKRduWxYArN9nOIWnP5Pt7tZmCti+iX9Mt +r+3eBXJz5Q0LqzHBOdzSTWs8lv8IdLNIrzxuiWlBqlBNrnIkyzzvcT0XZzk3ZILT +MTW+j0HCEsaVOdG7T1l664BMHC+9EEbUnwdqZoQFntu9ja8+xBFOzmbAcCCxQhJT +W7sF0WKtwOaByoJK07XJT97JQD0nd1LdQ0bkuiN9JFjSPdKaBW3NpzYiwfC5pv0c +L8jQbLwNYnjywlabHks0Pg3daXRutefydr42dhWe8/5108If/OyFRLVDfepeRP7J +K4lH05IZeZWvh/DifW0e0hepl0AGeN9s8KR4PcOgKd5r2EOBhK9FguEewL5Mr2Ab +xlmsb06q2l2t5IDeQlIa2ElHxeWTGGsrUDt2/PfZMqmIo0a08QKCAQEA/3/ORQqI +1rlSIgSxJKfKrgmnAJYEJVAWTpiIdbeEKWn8Z7PXHQNB15b4e8snm8W4yG3IJcFT +t4ZtJ8Hbwwc8xYdLmbiTO1l8+fCWgT2JUG4+XtaKeG0/cTyQJebRb4QN8BpH8A02 +zoE71Wa7GV6coUZYggPNYj0foWezG+TQ6+MIF7zILA+3iALRt7MUUX4bRvlB5zEC +hHmo9gExPypaxsolkgCzVgPUsY84hFbWULNDZPp15hyHGzCXhYg5M7xmSZjHasV+ +Zziqm9Yh465i7aCmadUysJfmx08frPcW0panzGpqISNNW4NycwwbExltlgfL8W2k +J0cikpiLX3EvWQKCAQEA5d5Tm1teih7IGYnt1HXrTC6E4TX/J2WzHjIbE3BdyOcX +KJMk+ooE7sZqYEyLEq1DIkxIsuxFUPLC6Gli+LaXtEnj6iBAXiPC91xMSDN/Qgdy +mT3y3I7u885/rGlMXXMeot0A3DY67/d7wLOX3ie/wHyYk2VBuF6uicEp1SoXo8yp +T9GvPEpT/16WxYMDL+cqmdhBX3j6p3ZWOQXOLwXnKzkaxMNXAHuVRjx5jl/XzLKB +SZb4N9sc8BM97Fbev2g49ck7EJVGO/rNtriCa8dt5fK+0J0EONoqjufAIpUo40Eq +4kFGFbxspEs6MhRrl4UkrQfwbqjT9vuLHH2QY6lQawKCAQEA7KFYz7au/3eJqvn/ +ejxwyXklE7TcyBYBYu+ASs4ZeCWx3/W+wB8ZJuuq3TRNTcb0maSbXuRLfcjhZZX0 +zA7y6NqiWQ+KRMhfqDh7m9z7ROOlnj5C5r//pwd5VbENvaNnKT2d7KTAYsdsO/u2 +QwvOMsutA1U0Lc0Ac4NQbgAgwqd1Ak8UcUJpL220/9a6dbM+3h/SOqW4eCsZIiX+ +j0rR4hSscSl27q4DmiNUK7UlLn6oZ2hUMzeupa7+VmzQ1aqibX0zDB83L7BaypOq +tWz4MK6+EXykQsucsHEGnDIIECf55qeT/XbtmVkHcaf/l1PQm8hCySpLgxkt2umv +A0kXmQKCAQEAwoGZH2l2GsunKC0VKOVK4CCw4dyN9ilRImjljlJTmW8ponS4IqPt +Ppp4YtoCCCDCIOCJ15SjprZ0hLLAQ5JF1hF8IJ1/uejqzeK0zZd9xEKHDvJjcYwk +1/rQ//Pt4VlLACaf1rawsmMovUt6Y0ohMGB9vqM8tCSx7fCcVDqU3E6OqfLVI+AF +KZ5BWAnfBYbG/n0F/CJjfpZcqU2nRRTqoiFk7EAae1cXXeJIPVgOQ7B8Q+fSS3lG +CENP748mVrJ7GEdZilO1pYU40rdX7JlI/f2kxuNGMlExF9E0PE8Y1QtMnTz8b+IL +0A2zxbKPxBuuiBeitBB5o5EACnCZYCth1wKCAQEAj8wpxR7HKE51Bl6a48il1Lhj +5g6/wXa1a3lGbos4DaJ5YzGREjnxFzt9EjoAKvebsxJ+ScC/s1b0IKitfzfPu+U3 +6ukR7zL+X18QbL2WYouVBd99SZ6FF+EUFgrYNfM+3ZKTNQpUZt3K4D2l6WCzcjkO +qU8/QRdl4a4AXO4czGn3qQXiTVdr7T31vFryhAbSJhOhIEaIo41NLzr7s3vEKz4j +ir7bP7YYYx+LXD0kROa6nCUO66U/FCMHWXQvp7MrxxJVwBN6nePL7dVOSc82OaYW +Le+VcKGfLbE5Othruf12kbfWg8Pu5xU3KPA73VtrZ/BzWYbbelsPj8TtJ9T8Aw== +-----END RSA PRIVATE KEY----- diff --git a/tls/tests/fips/docker-compose.yml b/tls/tests/fips/docker-compose.yml new file mode 100644 index 0000000000000..8315ee02e0868 --- /dev/null +++ b/tls/tests/fips/docker-compose.yml @@ -0,0 +1,28 @@ +services: + fips-server: + build: . + ports: + - "8443:443" + volumes: + - ./ca.crt:/etc/ssl/certs/server.crt + - ./ca.key:/etc/ssl/private/server.key + command: ["/usr/local/bin/start-server.sh", "ECDHE-RSA-AES128-SHA256"] + healthcheck: + test: ["CMD", "curl", "-f", "https://localhost:443"] + interval: 30s + timeout: 10s + retries: 3 + + non-fips-server: + build: . + ports: + - "9443:443" + volumes: + - ./ca.crt:/etc/ssl/certs/server.crt + - ./ca.key:/etc/ssl/private/server.key + command: ["/usr/local/bin/start-server.sh", "ECDHE-RSA-CHACHA20-POLY1305"] + healthcheck: + test: ["CMD", "curl", "-f", "https://localhost:443"] + interval: 30s + timeout: 10s + retries: 3 diff --git a/tls/tests/fips/start-server.sh b/tls/tests/fips/start-server.sh new file mode 100644 index 0000000000000..a6b40f97b5041 --- /dev/null +++ b/tls/tests/fips/start-server.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +if [ ! -f /etc/ssl/certs/server.crt ] || [ ! -f /etc/ssl/private/server.key ]; then + echo "Generating self-signed certificate..." + mkdir -p /etc/ssl/private + openssl req -x509 -newkey rsa:2048 -keyout /etc/ssl/private/server.key -out /etc/ssl/certs/server.crt -days 365 -nodes -subj "/CN=localhost" +fi + +CIPHER=$1 + +echo "Starting OpenSSL server on port 443 with cipher $CIPHER..." +openssl s_server \ + -accept 443 \ + -cert /etc/ssl/certs/server.crt \ + -key /etc/ssl/private/server.key \ + -cipher $CIPHER \ + -no_tls1_3 \ + -WWW diff --git a/tls/tests/test_e2e.py b/tls/tests/test_e2e.py index 1d4ff7b8e29fe..8e34480828826 100644 --- a/tls/tests/test_e2e.py +++ b/tls/tests/test_e2e.py @@ -13,7 +13,7 @@ @pytest.mark.e2e -def test_e2e(dd_agent_check, instance_e2e): +def test_e2e(dd_environment, dd_agent_check, instance_e2e): aggregator = dd_agent_check(instance_e2e) aggregator.assert_service_check(SERVICE_CHECK_CAN_CONNECT, status=TLSCheck.OK, count=1) diff --git a/tls/tests/test_fips.py b/tls/tests/test_fips.py new file mode 100644 index 0000000000000..75b2f84a9f724 --- /dev/null +++ b/tls/tests/test_fips.py @@ -0,0 +1,58 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from typing import Any # noqa: F401 + +import pytest + +from datadog_checks.tls import TLSCheck +from datadog_checks.tls.const import ( + SERVICE_CHECK_CAN_CONNECT, + SERVICE_CHECK_VALIDATION, +) + + +@pytest.mark.e2e +@pytest.mark.fips_off +def test_connection_before_fips(clean_fips_environment, dd_fips_environment, dd_agent_check, instance_e2e_fips): + """ + Connection to the FIPS server before enabling FIPS mode should succeed. + """ + aggregator = dd_agent_check(instance_e2e_fips) + aggregator.assert_service_check(SERVICE_CHECK_CAN_CONNECT, status=TLSCheck.OK, count=1) + aggregator.assert_service_check(SERVICE_CHECK_VALIDATION, status=TLSCheck.OK, count=1) + + +@pytest.mark.e2e +@pytest.mark.fips_off +def test_connection_before_non_fips(clean_fips_environment, dd_fips_environment, dd_agent_check, instance_e2e_non_fips): + """ + Connection to the non-FIPS server before enabling FIPS mode should succeed. + """ + aggregator = dd_agent_check(instance_e2e_non_fips) + aggregator.assert_service_check(SERVICE_CHECK_CAN_CONNECT, status=TLSCheck.OK, count=1) + aggregator.assert_service_check(SERVICE_CHECK_VALIDATION, status=TLSCheck.OK, count=1) + + +@pytest.mark.e2e +@pytest.mark.fips_on +def test_connection_after_fips(clean_fips_environment, dd_fips_environment, dd_agent_check, instance_e2e_fips): + """ + Connection to the FIPS server after enabling FIPS mode should succeed. + """ + aggregator = dd_agent_check(instance_e2e_fips) + aggregator.assert_service_check(SERVICE_CHECK_CAN_CONNECT, status=TLSCheck.OK, count=1) + aggregator.assert_service_check(SERVICE_CHECK_VALIDATION, status=TLSCheck.OK, count=1) + + +@pytest.mark.e2e +@pytest.mark.fips_on +def test_connection_after_non_fips(clean_fips_environment, dd_fips_environment, dd_agent_check, instance_e2e_non_fips): + """ + Connection to the non-FIPS server after enabling FIPS mode should fail. + """ + aggregator = dd_agent_check(instance_e2e_non_fips) + aggregator.assert_service_check( + SERVICE_CHECK_VALIDATION, + message="[SSL: SSLV3_ALERT_HANDSHAKE_FAILURE] ssl/tls alert handshake failure (_ssl.c:1000)", + )