diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 6a5db93053e3b..1e3b913c5cb5a 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -28,3 +28,5 @@ BWC_VERSION: - "2.11.1" - "2.11.2" - "2.12.0" + - "2.12.1" + - "2.13.0" diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 68d02d5f7d544..bb12121cd3d8f 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -11,7 +11,7 @@ # 3. Use the command palette to run the CODEOWNERS: Show owners of current file command, which will display all code owners for the current file. # Default ownership for all repo files -* @abbashus @adnapibar @anasalkouz @andrross @Bukhtawar @CEHENKLE @dblock @dbwiddis @dreamer-89 @gbbafna @kartg @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @ryanbogan @sachinpkale @saratvemulapalli @setiah @shwetathareja @sohami @tlfeng @VachaShah +* @abbashus @adnapibar @anasalkouz @andrross @Bukhtawar @CEHENKLE @dblock @dbwiddis @dreamer-89 @gbbafna @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @ryanbogan @sachinpkale @saratvemulapalli @setiah @shwetathareja @sohami @tlfeng @VachaShah /modules/transport-netty4/ @peternied @@ -24,4 +24,4 @@ /.github/ @peternied -/MAINTAINERS.md @abbashus @adnapibar @anasalkouz @andrross @Bukhtawar @CEHENKLE @dblock @dbwiddis @dreamer-89 @gbbafna @kartg @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @peternied @reta @Rishikesh1159 @ryanbogan @sachinpkale @saratvemulapalli @setiah @shwetathareja @sohami @tlfeng @VachaShah +/MAINTAINERS.md @abbashus @adnapibar @anasalkouz @andrross @Bukhtawar @CEHENKLE @dblock @dbwiddis @dreamer-89 @gbbafna @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @peternied @reta @Rishikesh1159 @ryanbogan @sachinpkale @saratvemulapalli @setiah @shwetathareja @sohami @tlfeng @VachaShah diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml index 0159e771f7f80..c9df17bad9576 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -47,6 +47,10 @@ body: - Storage:Remote - Storage:Snapshots - Storage + - ShardManagement:Placement + - ShardManagement:Performance + - ShardManagement:Resiliency + - ShardManagement:Insights validations: required: true - type: textarea diff --git a/.github/ISSUE_TEMPLATE/meta.yml b/.github/ISSUE_TEMPLATE/meta.yml new file mode 100644 index 0000000000000..b766a26bc3ff2 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/meta.yml @@ -0,0 +1,58 @@ +name: ✨ Meta Issue +description: An issue that collects other issues together to describe a larger project or activity. +title: '[META] ' +labels: ['Meta, untriaged'] +body: + - type: textarea + attributes: + label: Please describe the end goal of this project + description: A clear and concise description of this project/endeavor. This should be understandable to someone with no context. + placeholder: Ex. Views is a way to project indices in OpenSearch, these views act as a focal point for describing the underlying data and how the data is accessed. It allows for restricting the scope and filtering the response consistently. + validations: + required: true + - type: textarea + attributes: + label: Supporting References + description: Please provide links (and descriptions!) to RFCs, design docs, etc + validations: + required: true + - type: textarea + attributes: + label: Issues + description: Please create a list of issues that should be tracked by this meta issue, including a short description. The purpose is to provide everyone on the project with an "at a glance" update of the state us the work being tracked. If you use the format "- [ ]" it will put your list into a checklist. + placeholder: Ex. - [ ] https://github.com/opensearch-project/security/issues/3888 Add views to the cluster metadata schema + validations: + required: true + - type: dropdown + attributes: + label: Related component + description: Choose a specific OpenSearch component your project belongs to. If you are unsure of which component to select or if the component is not present, select "Other". + multiple: false + options: + - # Empty first option to force selection + - Build + - Clients + - Cluster Manager + - Extensions + - Indexing:Performance + - Indexing:Replication + - Indexing + - Libraries + - Other + - Plugins + - Search:Aggregations + - Search:Performance + - Search:Query Capabilities + - Search:Query Insights + - Search:Relevance + - Search:Remote Search + - Search:Resiliency + - Search:Searchable Snapshots + - Search + - Storage:Durability + - Storage:Performance + - Storage:Remote + - Storage:Snapshots + - Storage + validations: + required: true diff --git a/.github/workflows/check-compatibility.yml b/.github/workflows/check-compatibility.yml index d6c65ddd446cd..b2f22a90938cc 100644 --- a/.github/workflows/check-compatibility.yml +++ b/.github/workflows/check-compatibility.yml @@ -53,7 +53,7 @@ jobs: name: results.txt - name: Find Comment - uses: peter-evans/find-comment@v2 + uses: peter-evans/find-comment@v3 id: fc with: issue-number: ${{ github.event.number }} @@ -61,7 +61,7 @@ jobs: body-includes: 'Compatibility status:' - name: Add comment on the PR - uses: peter-evans/create-or-update-comment@v3 + uses: peter-evans/create-or-update-comment@v4 with: comment-id: ${{ steps.fc.outputs.comment-id }} issue-number: ${{ github.event.number }} diff --git a/.github/workflows/create-documentation-issue.yml b/.github/workflows/create-documentation-issue.yml index df63847f8afca..b45e053cc25c2 100644 --- a/.github/workflows/create-documentation-issue.yml +++ b/.github/workflows/create-documentation-issue.yml @@ -29,7 +29,7 @@ jobs: - name: Create Issue From File id: create-issue - uses: peter-evans/create-issue-from-file@v4 + uses: peter-evans/create-issue-from-file@v5 with: title: Add documentation related to new feature content-filepath: ./ci/documentation/issue.md diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index 8c33d41c6b2b4..1f5c187c28e7d 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -72,13 +72,13 @@ jobs: - name: Upload Coverage Report if: success() - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4 with: files: ./codeCoverage.xml - name: Create Comment Success if: ${{ github.event_name == 'pull_request_target' && success() && env.result == 'SUCCESS' }} - uses: peter-evans/create-or-update-comment@v3 + uses: peter-evans/create-or-update-comment@v4 with: issue-number: ${{ env.pr_number }} body: | @@ -101,7 +101,7 @@ jobs: - name: Create Comment Flaky if: ${{ github.event_name == 'pull_request_target' && success() && env.result != 'SUCCESS' }} - uses: peter-evans/create-or-update-comment@v3 + uses: peter-evans/create-or-update-comment@v4 with: issue-number: ${{ env.pr_number }} body: | @@ -111,7 +111,7 @@ jobs: - name: Create Comment Failure if: ${{ github.event_name == 'pull_request_target' && failure() }} - uses: peter-evans/create-or-update-comment@v3 + uses: peter-evans/create-or-update-comment@v4 with: issue-number: ${{ env.pr_number }} body: | diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index 61962c91b4903..1c83821e22804 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -13,7 +13,7 @@ jobs: - uses: actions/checkout@v4 - name: lychee Link Checker id: lychee - uses: lycheeverse/lychee-action@v1.9.1 + uses: lycheeverse/lychee-action@v1.9.3 with: args: --accept=200,403,429 --exclude-mail **/*.html **/*.md **/*.txt **/*.json --exclude-file .lychee.excludes fail: true diff --git a/.github/workflows/maintainer-approval.yml b/.github/workflows/maintainer-approval.yml index 34e8f57cc1878..fdc2bf16937b4 100644 --- a/.github/workflows/maintainer-approval.yml +++ b/.github/workflows/maintainer-approval.yml @@ -9,7 +9,7 @@ jobs: runs-on: ubuntu-latest steps: - id: find-maintainers - uses: actions/github-script@v7 + uses: actions/github-script@v7.0.1 with: github-token: ${{ secrets.GITHUB_TOKEN }} result-encoding: string diff --git a/.github/workflows/poc-checklist.yml b/.github/workflows/poc-checklist.yml index 3d014e000a487..1b4f6b31e02f8 100644 --- a/.github/workflows/poc-checklist.yml +++ b/.github/workflows/poc-checklist.yml @@ -11,7 +11,7 @@ jobs: issues: write steps: - name: Add comment - uses: peter-evans/create-or-update-comment@v3 + uses: peter-evans/create-or-update-comment@v4 with: issue-number: ${{ github.event.issue.number }} body: | diff --git a/.github/workflows/pull-request-checks.yml b/.github/workflows/pull-request-checks.yml index 11998e36c2dbb..7efcf529588ed 100644 --- a/.github/workflows/pull-request-checks.yml +++ b/.github/workflows/pull-request-checks.yml @@ -17,7 +17,7 @@ jobs: name: Verify Description Checklist runs-on: ubuntu-latest steps: - - uses: peternied/check-pull-request-description-checklist@v1 + - uses: peternied/check-pull-request-description-checklist@v1.1 with: checklist-items: | New functionality includes testing. diff --git a/.github/workflows/triage.yml b/.github/workflows/triage.yml index c305818bdb0a9..83bf4926a8c2d 100644 --- a/.github/workflows/triage.yml +++ b/.github/workflows/triage.yml @@ -9,7 +9,7 @@ jobs: if: github.repository == 'opensearch-project/OpenSearch' runs-on: ubuntu-latest steps: - - uses: actions/github-script@v7 + - uses: actions/github-script@v7.0.1 with: script: | const { issue, repository } = context.payload; diff --git a/.github/workflows/version.yml b/.github/workflows/version.yml index a20c671c137b2..be2a89ac931e9 100644 --- a/.github/workflows/version.yml +++ b/.github/workflows/version.yml @@ -1,28 +1,32 @@ name: Increment Version on: + workflow_dispatch: + inputs: + tag: + description: 'the tag' + required: true + type: string push: tags: - '*.*.*' -permissions: {} +permissions: + contents: write + issues: write + pull-requests: write + jobs: build: if: github.repository == 'opensearch-project/OpenSearch' runs-on: ubuntu-latest steps: - - name: GitHub App token - id: github_app_token - uses: tibdex/github-app-token@v2.1.0 - with: - app_id: ${{ secrets.APP_ID }} - private_key: ${{ secrets.APP_PRIVATE_KEY }} - installation_id: 22958780 - - - uses: actions/checkout@v4 - - name: Fetch Tag and Version Information + - name: Fetch tag and version information run: | TAG=$(echo "${GITHUB_REF#refs/*/}") + if [ -n ${{ github.event.inputs.tag }} ]; then + TAG=${{ github.event.inputs.tag }} + fi CURRENT_VERSION_ARRAY=($(echo "$TAG" | tr . '\n')) BASE=$(IFS=. ; echo "${CURRENT_VERSION_ARRAY[*]:0:2}") BASE_X=$(IFS=. ; echo "${CURRENT_VERSION_ARRAY[*]:0:1}.x") @@ -44,24 +48,22 @@ jobs: echo "NEXT_VERSION=$NEXT_VERSION" >> $GITHUB_ENV echo "NEXT_VERSION_UNDERSCORE=$NEXT_VERSION_UNDERSCORE" >> $GITHUB_ENV echo "NEXT_VERSION_ID=$NEXT_VERSION_ID" >> $GITHUB_ENV + - uses: actions/checkout@v4 with: ref: ${{ env.BASE }} - token: ${{ steps.github_app_token.outputs.token }} - - name: Increment Patch Version - run: | - echo Incrementing $CURRENT_VERSION to $NEXT_VERSION - echo " - \"$CURRENT_VERSION\"" >> .ci/bwcVersions - sed -i "s/opensearch = $CURRENT_VERSION/opensearch = $NEXT_VERSION/g" buildSrc/version.properties - echo Adding $NEXT_VERSION_UNDERSCORE after $CURRENT_VERSION_UNDERSCORE - sed -i "s/public static final Version $CURRENT_VERSION_UNDERSCORE = new Version(\([[:digit:]]\+\)\(.*\));/\0\n public static final Version $NEXT_VERSION_UNDERSCORE = new Version($NEXT_VERSION_ID\2);/g" libs/core/src/main/java/org/opensearch/Version.java - sed -i "s/CURRENT = $CURRENT_VERSION_UNDERSCORE;/CURRENT = $NEXT_VERSION_UNDERSCORE;/g" libs/core/src/main/java/org/opensearch/Version.java + - name: Increment Patch Version on Major.Minor branch + uses: peternied/opensearch-core-version-updater@v1 + with: + previous-version: ${{ env.CURRENT_VERSION }} + new-version: ${{ env.NEXT_VERSION }} + update-current: true - - name: Create Pull Request + - name: Create PR for BASE + id: base_pr uses: peter-evans/create-pull-request@v5 with: - token: ${{ steps.github_app_token.outputs.token }} base: ${{ env.BASE }} branch: 'create-pull-request/patch-${{ env.BASE }}' commit-message: Increment version to ${{ env.NEXT_VERSION }} @@ -76,19 +78,18 @@ jobs: - uses: actions/checkout@v4 with: ref: ${{ env.BASE_X }} - token: ${{ steps.github_app_token.outputs.token }} - - name: Add bwc version to .X branch - run: | - echo Adding bwc version $NEXT_VERSION after $CURRENT_VERSION - sed -i "s/- \"$CURRENT_VERSION\"/\0\n - \"$NEXT_VERSION\"/g" .ci/bwcVersions - echo Adding $NEXT_VERSION_UNDERSCORE after $CURRENT_VERSION_UNDERSCORE - sed -i "s/public static final Version $CURRENT_VERSION_UNDERSCORE = new Version(\([[:digit:]]\+\)\(.*\));/\0\n public static final Version $NEXT_VERSION_UNDERSCORE = new Version($NEXT_VERSION_ID\2);/g" libs/core/src/main/java/org/opensearch/Version.java + - name: Add Patch Version on Major.X branch + uses: peternied/opensearch-core-version-updater@v1 + with: + previous-version: ${{ env.CURRENT_VERSION }} + new-version: ${{ env.NEXT_VERSION }} + update-current: false - - name: Create Pull Request + - name: Create PR for BASE_X + id: base_x_pr uses: peter-evans/create-pull-request@v5 with: - token: ${{ steps.github_app_token.outputs.token }} base: ${{ env.BASE_X }} branch: 'create-pull-request/patch-${{ env.BASE_X }}' commit-message: Add bwc version ${{ env.NEXT_VERSION }} @@ -103,19 +104,18 @@ jobs: - uses: actions/checkout@v4 with: ref: main - token: ${{ steps.github_app_token.outputs.token }} - - name: Add bwc version to main branch - run: | - echo Adding bwc version $NEXT_VERSION after $CURRENT_VERSION - sed -i "s/- \"$CURRENT_VERSION\"/\0\n - \"$NEXT_VERSION\"/g" .ci/bwcVersions - echo Adding $NEXT_VERSION_UNDERSCORE after $CURRENT_VERSION_UNDERSCORE - sed -i "s/public static final Version $CURRENT_VERSION_UNDERSCORE = new Version(\([[:digit:]]\+\)\(.*\));/\0\n public static final Version $NEXT_VERSION_UNDERSCORE = new Version($NEXT_VERSION_ID\2);/g" libs/core/src/main/java/org/opensearch/Version.java + - name: Add Patch Version on main branch + uses: peternied/opensearch-core-version-updater@v1 + with: + previous-version: ${{ env.CURRENT_VERSION }} + new-version: ${{ env.NEXT_VERSION }} + update-current: false - - name: Create Pull Request + - name: Create PR for main + id: main_pr uses: peter-evans/create-pull-request@v5 with: - token: ${{ steps.github_app_token.outputs.token }} base: main branch: 'create-pull-request/patch-main' commit-message: Add bwc version ${{ env.NEXT_VERSION }} @@ -126,3 +126,32 @@ jobs: title: '[AUTO] [main] Add bwc version ${{ env.NEXT_VERSION }}.' body: | I've noticed that a new tag ${{ env.TAG }} was pushed, and added a bwc version ${{ env.NEXT_VERSION }}. + + - name: Create tracking issue + id: create-issue + uses: actions/github-script@v7.0.1 + with: + script: | + const body = ` + ### Description + A new version of OpenSearch was released, to prepare for the next release new version numbers need to be updated in all active branches of development. + + ### Exit Criteria + Review and merged the following pull requests + - [ ] ${{ steps.base_pr.outputs.pull-request-url }} + - [ ] ${{ steps.base_x_pr.outputs.pull-request-url }} + - [ ] ${{ steps.main_pr.outputs.pull-request-url }} + + ### Additional Context + See project wide guidance on branching and versions [[link]](https://github.com/opensearch-project/.github/blob/main/RELEASING.md). + ` + const { data: issue }= await github.rest.issues.create({ + owner: context.repo.owner, + repo: context.repo.repo, + labels: ["Build"], + title: "Increment version for ${{ env.NEXT_VERSION }}", + body: body + }); + console.error(JSON.stringify(issue)); + return issue.number; + result-encoding: string diff --git a/.github/workflows/wrapper.yml b/.github/workflows/wrapper.yml index 6dd48ca15eaa9..dcf2a09717d28 100644 --- a/.github/workflows/wrapper.yml +++ b/.github/workflows/wrapper.yml @@ -8,4 +8,4 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: gradle/wrapper-validation-action@v1 + - uses: gradle/wrapper-validation-action@v2 diff --git a/CHANGELOG.md b/CHANGELOG.md index 9ff8dcf623681..c4558cf3fe251 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,10 +11,14 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add events correlation engine plugin ([#6854](https://github.com/opensearch-project/OpenSearch/issues/6854)) - Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679), [#10664](https://github.com/opensearch-project/OpenSearch/pull/10664)) - Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618)) -- [AdmissionControl] Added changes for AdmissionControl Interceptor and AdmissionControlService for RateLimiting ([#9286](https://github.com/opensearch-project/OpenSearch/pull/9286)) - GHA to verify checklist items completion in PR descriptions ([#10800](https://github.com/opensearch-project/OpenSearch/pull/10800)) - Allow to pass the list settings through environment variables (like [], ["a", "b", "c"], ...) ([#10625](https://github.com/opensearch-project/OpenSearch/pull/10625)) -- [Admission Control] Integrate CPU AC with ResourceUsageCollector and add CPU AC stats to nodes/stats ([#10887](https://github.com/opensearch-project/OpenSearch/pull/10887)) +- [S3 Repository] Add setting to control connection count for sync client ([#12028](https://github.com/opensearch-project/OpenSearch/pull/12028)) +- Views, simplify data access and manipulation by providing a virtual layer over one or more indices ([#11957](https://github.com/opensearch-project/OpenSearch/pull/11957)) +- Add Remote Store Migration Experimental flag and allow mixed mode clusters under same ([#11986](https://github.com/opensearch-project/OpenSearch/pull/11986)) +- Remote reindex: Add support for configurable retry mechanism ([#12561](https://github.com/opensearch-project/OpenSearch/pull/12561)) +- [Admission Control] Integrate IO Usage Tracker to the Resource Usage Collector Service and Emit IO Usage Stats ([#11880](https://github.com/opensearch-project/OpenSearch/pull/11880)) +- Tracing for deep search path ([#12103](https://github.com/opensearch-project/OpenSearch/pull/12103)) ### Dependencies - Bump `log4j-core` from 2.18.0 to 2.19.0 @@ -22,7 +26,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `avro` from 1.11.1 to 1.11.2 - Bump `woodstox-core` from 6.3.0 to 6.3.1 - Bump `xmlbeans` from 5.1.0 to 5.1.1 ([#4354](https://github.com/opensearch-project/OpenSearch/pull/4354)) -- Bump `reactor-netty-core` from 1.0.19 to 1.0.22 ([#4447](https://github.com/opensearch-project/OpenSearch/pull/4447)) - Bump `reactive-streams` from 1.0.3 to 1.0.4 ([#4488](https://github.com/opensearch-project/OpenSearch/pull/4488)) - Bump `jempbox` from 1.8.16 to 1.8.17 ([#4550](https://github.com/opensearch-project/OpenSearch/pull/4550)) - Update to Gradle 7.6 and JDK-19 ([#4973](https://github.com/opensearch-project/OpenSearch/pull/4973)) @@ -47,6 +50,11 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.eclipse.jgit` from 6.5.0 to 6.7.0 ([#10147](https://github.com/opensearch-project/OpenSearch/pull/10147)) - Bump OpenTelemetry from 1.30.1 to 1.31.0 ([#10617](https://github.com/opensearch-project/OpenSearch/pull/10617)) - Bump OpenTelemetry from 1.31.0 to 1.32.0 and OpenTelemetry Semconv from 1.21.0-alpha to 1.23.1-alpha ([#11305](https://github.com/opensearch-project/OpenSearch/pull/11305)) +- Bump `org.bouncycastle:bcprov-jdk15to18` to `org.bouncycastle:bcprov-jdk18on` version 1.77 ([#12317](https://github.com/opensearch-project/OpenSearch/pull/12317)) +- Bump `org.bouncycastle:bcmail-jdk15to18` to `org.bouncycastle:bcmail-jdk18on` version 1.77 ([#12317](https://github.com/opensearch-project/OpenSearch/pull/12317)) +- Bump `org.bouncycastle:bcpkix-jdk15to18` to `org.bouncycastle:bcpkix-jdk18on` version 1.77 ([#12317](https://github.com/opensearch-project/OpenSearch/pull/12317)) +- Bump Jackson version from 2.16.1 to 2.16.2 ([#12611](https://github.com/opensearch-project/OpenSearch/pull/12611)) +- Bump `aws-sdk-java` from 2.20.55 to 2.20.86 ([#12251](https://github.com/opensearch-project/OpenSearch/pull/12251)) ### Changed - [CCR] Add getHistoryOperationsFromTranslog method to fetch the history snapshot from translogs ([#3948](https://github.com/opensearch-project/OpenSearch/pull/3948)) @@ -58,7 +66,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add task completion count in search backpressure stats API ([#10028](https://github.com/opensearch-project/OpenSearch/pull/10028/)) - Deprecate CamelCase `PathHierarchy` tokenizer name in favor to lowercase `path_hierarchy` ([#10894](https://github.com/opensearch-project/OpenSearch/pull/10894)) - Switched to more reliable OpenSearch Lucene snapshot location([#11728](https://github.com/opensearch-project/OpenSearch/pull/11728)) -- Added support for Google Application Default Credentials in repository-gcs ([#8394](https://github.com/opensearch-project/OpenSearch/pull/8394)) +- Breaking change: Do not request "search_pipelines" metrics by default in NodesInfoRequest ([#12497](https://github.com/opensearch-project/OpenSearch/pull/12497)) ### Deprecated @@ -85,153 +93,76 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Update supported version for max_shard_size parameter in Shrink API ([#11439](https://github.com/opensearch-project/OpenSearch/pull/11439)) - Fix typo in API annotation check message ([11836](https://github.com/opensearch-project/OpenSearch/pull/11836)) - Update supported version for must_exist parameter in update aliases API ([#11872](https://github.com/opensearch-project/OpenSearch/pull/11872)) +- [Bug] Check phase name before SearchRequestOperationsListener onPhaseStart ([#12035](https://github.com/opensearch-project/OpenSearch/pull/12035)) +- Fix Span operation names generated from RestActions ([#12005](https://github.com/opensearch-project/OpenSearch/pull/12005)) +- Fix error in RemoteSegmentStoreDirectory when debug logging is enabled ([#12328](https://github.com/opensearch-project/OpenSearch/pull/12328)) ### Security ## [Unreleased 2.x] ### Added -- [Admission control] Add Resource usage collector service and resource usage tracker ([#9890](https://github.com/opensearch-project/OpenSearch/pull/9890)) -- [Admission control] Add enhancements to FS stats to include read/write time, queue size and IO time ([#10541](https://github.com/opensearch-project/OpenSearch/pull/10541)) -- [Remote cluster state] Change file names for remote cluster state ([#10557](https://github.com/opensearch-project/OpenSearch/pull/10557)) -- [Search Pipelines] Add request-scoped state shared between processors (and three new processors) ([#9405](https://github.com/opensearch-project/OpenSearch/pull/9405)) -- Per request phase latency ([#10351](https://github.com/opensearch-project/OpenSearch/issues/10351)) -- [Remote Store] Add repository stats for remote store([#10567](https://github.com/opensearch-project/OpenSearch/pull/10567)) -- [Remote cluster state] Upload global metadata in cluster state to remote store([#10404](https://github.com/opensearch-project/OpenSearch/pull/10404)) -- [Remote cluster state] Download functionality of global metadata from remote store ([#10535](https://github.com/opensearch-project/OpenSearch/pull/10535)) -- [Remote cluster state] Restore global metadata from remote store when local state is lost after quorum loss ([#10404](https://github.com/opensearch-project/OpenSearch/pull/10404)) -- [Remote cluster state] Make index and global metadata upload timeout dynamic cluster settings ([#10814](https://github.com/opensearch-project/OpenSearch/pull/10814)) -- Add search query categorizer ([#10255](https://github.com/opensearch-project/OpenSearch/pull/10255)) -- Per request phase latency ([#10351](https://github.com/opensearch-project/OpenSearch/issues/10351)) -- Add cluster state stats ([#10670](https://github.com/opensearch-project/OpenSearch/pull/10670)) -- Remove ingest processor supports excluding fields ([#10967](https://github.com/opensearch-project/OpenSearch/pull/10967)) -- [Tiered caching] Enabling serialization for IndicesRequestCache key object ([#10275](https://github.com/opensearch-project/OpenSearch/pull/10275)) -- [Tiered caching] Defining interfaces, listeners and extending IndicesRequestCache with Tiered cache support ([#10753](https://github.com/opensearch-project/OpenSearch/pull/10753)) -- [Remote cluster state] Restore cluster state version during remote state auto restore ([#10853](https://github.com/opensearch-project/OpenSearch/pull/10853)) -- Update the indexRandom function to create more segments for concurrent search tests ([10247](https://github.com/opensearch-project/OpenSearch/pull/10247)) -- Add support for query profiler with concurrent aggregation ([#9248](https://github.com/opensearch-project/OpenSearch/pull/9248)) -- Introduce ConcurrentQueryProfiler to profile query using concurrent segment search path and support concurrency during rewrite and create weight ([10352](https://github.com/opensearch-project/OpenSearch/pull/10352)) -- Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679)) -- Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618)) -- [Streaming Indexing] Introduce new experimental server HTTP transport based on Netty 4 and Project Reactor (Reactor Netty) ([#9672](https://github.com/opensearch-project/OpenSearch/pull/9672)) -- Enable must_exist parameter for update aliases API ([#11210](https://github.com/opensearch-project/OpenSearch/pull/11210)) -- Add back half_float BKD based sort query optimization ([#11024](https://github.com/opensearch-project/OpenSearch/pull/11024)) -- Request level coordinator slow logs ([#10650](https://github.com/opensearch-project/OpenSearch/pull/10650)) -- Add template snippets support for field and target_field in KV ingest processor ([#10040](https://github.com/opensearch-project/OpenSearch/pull/10040)) -- Allowing pipeline processors to access index mapping info by passing ingest service ref as part of the processor factory parameters ([#10307](https://github.com/opensearch-project/OpenSearch/pull/10307)) -- Make number of segment metadata files in remote segment store configurable ([#11329](https://github.com/opensearch-project/OpenSearch/pull/11329)) -- Allow changing number of replicas of searchable snapshot index ([#11317](https://github.com/opensearch-project/OpenSearch/pull/11317)) -- Adding slf4j license header to LoggerMessageFormat.java ([#11069](https://github.com/opensearch-project/OpenSearch/pull/11069)) -- [BWC and API enforcement] Introduce checks for enforcing the API restrictions ([#11175](https://github.com/opensearch-project/OpenSearch/pull/11175)) -- Maintainer approval check ([#11378](https://github.com/opensearch-project/OpenSearch/pull/11378)) -- Create separate transport action for render search template action ([#11170](https://github.com/opensearch-project/OpenSearch/pull/11170)) -- Add additional handling in SearchTemplateRequest when simulate is set to true ([#11591](https://github.com/opensearch-project/OpenSearch/pull/11591)) -- Introduce cluster level setting `cluster.index.restrict.replication.type` to prevent replication type setting override during index creations([#11583](https://github.com/opensearch-project/OpenSearch/pull/11583)) -- Add match_only_text field that is optimized for storage by trading off positional queries performance ([#6836](https://github.com/opensearch-project/OpenSearch/pull/11039)) -- Add copy ingest processor ([#11870](https://github.com/opensearch-project/OpenSearch/pull/11870)) -- Introduce new feature flag "WRITEABLE_REMOTE_INDEX" to gate the writeable remote index functionality ([#11717](https://github.com/opensearch-project/OpenSearch/pull/11170)) -- Bump OpenTelemetry from 1.32.0 to 1.34.1 ([#11891](https://github.com/opensearch-project/OpenSearch/pull/11891)) +- [Tiered caching] Introducing cache plugins and exposing Ehcache as one of the pluggable disk cache option ([#11874](https://github.com/opensearch-project/OpenSearch/pull/11874)) +- Add support for dependencies in plugin descriptor properties with semver range ([#11441](https://github.com/opensearch-project/OpenSearch/pull/11441)) +- Add community_id ingest processor ([#12121](https://github.com/opensearch-project/OpenSearch/pull/12121)) +- Introduce query level setting `index.query.max_nested_depth` limiting nested queries ([#3268](https://github.com/opensearch-project/OpenSearch/issues/3268) +- Add toString methods to MultiSearchRequest, MultiGetRequest and CreateIndexRequest ([#12163](https://github.com/opensearch-project/OpenSearch/pull/12163)) +- Support for returning scores in matched queries ([#11626](https://github.com/opensearch-project/OpenSearch/pull/11626)) +- Add shard id property to SearchLookup for use in field types provided by plugins ([#1063](https://github.com/opensearch-project/OpenSearch/pull/1063)) +- Force merge API supports performing on primary shards only ([#11269](https://github.com/opensearch-project/OpenSearch/pull/11269)) +- [Tiered caching] Make IndicesRequestCache implementation configurable [EXPERIMENTAL] ([#12533](https://github.com/opensearch-project/OpenSearch/pull/12533)) +- Add kuromoji_completion analyzer and filter ([#4835](https://github.com/opensearch-project/OpenSearch/issues/4835)) +- The org.opensearch.bootstrap.Security should support codebase for JAR files with classifiers ([#12586](https://github.com/opensearch-project/OpenSearch/issues/12586)) +- [Metrics Framework] Adds support for asynchronous gauge metric type. ([#12642](https://github.com/opensearch-project/OpenSearch/issues/12642)) +- Make search query counters dynamic to support all query types ([#12601](https://github.com/opensearch-project/OpenSearch/pull/12601)) +- [Tiered caching] Add policies controlling which values can enter pluggable caches [EXPERIMENTAL] ([#12542](https://github.com/opensearch-project/OpenSearch/pull/12542)) +- [Tiered caching] Add Stale keys Management and CacheCleaner to IndicesRequestCache ([#12625](https://github.com/opensearch-project/OpenSearch/pull/12625)) +- [Tiered caching] Add serializer integration to allow ehcache disk cache to use non-primitive values ([#12709](https://github.com/opensearch-project/OpenSearch/pull/12709)) +- [Admission Control] Integrated IO Based AdmissionController to AdmissionControl Framework ([#12583](https://github.com/opensearch-project/OpenSearch/pull/12583)) ### Dependencies -- Bumps jetty version to 9.4.52.v20230823 to fix GMS-2023-1857 ([#9822](https://github.com/opensearch-project/OpenSearch/pull/9822)) -- Bump Lucene from 9.7.0 to 9.8.0 ([10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) -- Bump `commons-io:commons-io` from 2.13.0 to 2.15.1 ([#10294](https://github.com/opensearch-project/OpenSearch/pull/10294), [#11001](https://github.com/opensearch-project/OpenSearch/pull/11001), [#11002](https://github.com/opensearch-project/OpenSearch/pull/11002), [#11446](https://github.com/opensearch-project/OpenSearch/pull/11446), [#11554](https://github.com/opensearch-project/OpenSearch/pull/11554), [#11560](https://github.com/opensearch-project/OpenSearch/pull/11560), [#11796](https://github.com/opensearch-project/OpenSearch/pull/11796)) -- Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298)) -- Bump `com.netflix.nebula.ospackage-base` from 11.4.0 to 11.6.0 ([#10295](https://github.com/opensearch-project/OpenSearch/pull/10295), [#11630](https://github.com/opensearch-project/OpenSearch/pull/11630)) -- Bump `org.apache.zookeeper:zookeeper` from 3.9.0 to 3.9.1 ([#10506](https://github.com/opensearch-project/OpenSearch/pull/10506)) -- Bump `de.thetaphi:forbiddenapis` from 3.5.1 to 3.6 ([#10508](https://github.com/opensearch-project/OpenSearch/pull/10508)) -- Bump `org.codehaus.woodstox:stax2-api` from 4.2.1 to 4.2.2 ([#10639](https://github.com/opensearch-project/OpenSearch/pull/10639)) -- Bump `org.bouncycastle:bc-fips` from 1.0.2.3 to 1.0.2.4 ([#10297](https://github.com/opensearch-project/OpenSearch/pull/10297)) -- Bump `com.google.http-client:google-http-client` from 1.43.2 to 1.43.3 ([#10635](https://github.com/opensearch-project/OpenSearch/pull/10635)) -- Bump `com.squareup.okio:okio` from 3.5.0 to 3.7.0 ([#10637](https://github.com/opensearch-project/OpenSearch/pull/10637), [#11632](https://github.com/opensearch-project/OpenSearch/pull/11632)) -- Bump `org.apache.logging.log4j:log4j-core` from 2.20.0 to 2.22.1 ([#10858](https://github.com/opensearch-project/OpenSearch/pull/10858), [#11000](https://github.com/opensearch-project/OpenSearch/pull/11000), [#11270](https://github.com/opensearch-project/OpenSearch/pull/11270), [#11695](https://github.com/opensearch-project/OpenSearch/pull/11695)) -- Bump `aws-actions/configure-aws-credentials` from 2 to 4 ([#10504](https://github.com/opensearch-project/OpenSearch/pull/10504)) -- Bump `stefanzweifel/git-auto-commit-action` from 4 to 5 ([#11171](https://github.com/opensearch-project/OpenSearch/pull/11171)) -- Bump `actions/github-script` from 6 to 7 ([#11271](https://github.com/opensearch-project/OpenSearch/pull/11271)) -- Bump `jackson` and `jackson_databind` from 2.15.2 to 2.16.0 ([#11273](https://github.com/opensearch-project/OpenSearch/pull/11273)) -- Bump `netty` from 4.1.100.Final to 4.1.104.Final ([#11294](https://github.com/opensearch-project/OpenSearch/pull/11294), [#11775](https://github.com/opensearch-project/OpenSearch/pull/11775)) -- Bump `com.avast.gradle:gradle-docker-compose-plugin` from 0.16.12 to 0.17.6 ([#10163](https://github.com/opensearch-project/OpenSearch/pull/10163), [#11692](https://github.com/opensearch-project/OpenSearch/pull/11692)) -- Bump `com.squareup.okhttp3:okhttp` from 4.11.0 to 4.12.0 ([#10861](https://github.com/opensearch-project/OpenSearch/pull/10861)) -- Bump `org.apache.commons:commons-text` from 1.10.0 to 1.11.0 ([#11344](https://github.com/opensearch-project/OpenSearch/pull/11344)) -- Bump `reactor-netty-core` from 1.1.12 to 1.1.13 ([#11350](https://github.com/opensearch-project/OpenSearch/pull/11350)) -- Bump `com.gradle.enterprise` from 3.14.1 to 3.16.1 ([#11339](https://github.com/opensearch-project/OpenSearch/pull/11339), [#11629](https://github.com/opensearch-project/OpenSearch/pull/11629)) -- Bump `actions/setup-java` from 3 to 4 ([#11447](https://github.com/opensearch-project/OpenSearch/pull/11447)) -- Bump `commons-net:commons-net` from 3.9.0 to 3.10.0 ([#11450](https://github.com/opensearch-project/OpenSearch/pull/11450)) -- Bump `org.apache.maven:maven-model` from 3.9.4 to 3.9.6 ([#11445](https://github.com/opensearch-project/OpenSearch/pull/11445)) -- Bump `org.apache.xmlbeans:xmlbeans` from 5.1.1 to 5.2.0 ([#11448](https://github.com/opensearch-project/OpenSearch/pull/11448)) -- Bump `logback-core` and `logback-classic` to 1.2.13 ([#11521](https://github.com/opensearch-project/OpenSearch/pull/11521)) -- Bumps `jetty` version from 9.4.52.v20230823 to 9.4.53.v20231009 ([#11539](https://github.com/opensearch-project/OpenSearch/pull/11539)) -- Bump `org.wiremock:wiremock-standalone` from 3.1.0 to 3.3.1 ([#11555](https://github.com/opensearch-project/OpenSearch/pull/11555)) -- Bump `org.apache.commons:commons-compress` from 1.24.0 to 1.25.0 ([#11556](https://github.com/opensearch-project/OpenSearch/pull/11556)) -- Bump `actions/stale` from 8 to 9 ([#11557](https://github.com/opensearch-project/OpenSearch/pull/11557)) -- Bump `com.netflix.nebula:nebula-publishing-plugin` from 20.3.0 to 21.0.0 ([#11671](https://github.com/opensearch-project/OpenSearch/pull/11671)) -- Bump `commons-cli:commons-cli` from 1.5.0 to 1.6.0 ([#10996](https://github.com/opensearch-project/OpenSearch/pull/10996)) -- Bump `com.maxmind.geoip2:geoip2` from 4.1.0 to 4.2.0 ([#11559](https://github.com/opensearch-project/OpenSearch/pull/11559)) -- Bump `org.apache.commons:commons-lang3` from 3.13.0 to 3.14.0 ([#11691](https://github.com/opensearch-project/OpenSearch/pull/11691)) -- Bump `com.maxmind.db:maxmind-db` from 3.0.0 to 3.1.0 ([#11693](https://github.com/opensearch-project/OpenSearch/pull/11693)) -- Bump `net.java.dev.jna:jna` from 5.13.0 to 5.14.0 ([#11798](https://github.com/opensearch-project/OpenSearch/pull/11798)) -- Bump `lycheeverse/lychee-action` from 1.8.0 to 1.9.1 ([#11795](https://github.com/opensearch-project/OpenSearch/pull/11795), [#11887](https://github.com/opensearch-project/OpenSearch/pull/11887)) -- Bump `Lucene` from 9.8.0 to 9.9.1 ([#11421](https://github.com/opensearch-project/OpenSearch/pull/11421)) -- Bump `com.networknt:json-schema-validator` from 1.0.86 to 1.1.0 ([#11886](https://github.com/opensearch-project/OpenSearch/pull/11886)) -- Bump `com.google.api:gax-httpjson` from 0.103.1 to 2.39.0 ([#11794](https://github.com/opensearch-project/OpenSearch/pull/11794)) +- Bump `peter-evans/find-comment` from 2 to 3 ([#12288](https://github.com/opensearch-project/OpenSearch/pull/12288)) +- Bump `com.google.api.grpc:proto-google-common-protos` from 2.25.1 to 2.33.0 ([#12289](https://github.com/opensearch-project/OpenSearch/pull/12289)) +- Bump `com.squareup.okio:okio` from 3.7.0 to 3.8.0 ([#12290](https://github.com/opensearch-project/OpenSearch/pull/12290)) +- Bump `gradle/wrapper-validation-action` from 1 to 2 ([#12367](https://github.com/opensearch-project/OpenSearch/pull/12367)) +- Bump `netty` from 4.1.106.Final to 4.1.107.Final ([#12372](https://github.com/opensearch-project/OpenSearch/pull/12372)) +- Bump `opentelemetry` from 1.34.1 to 1.36.0 ([#12388](https://github.com/opensearch-project/OpenSearch/pull/12388), [#12618](https://github.com/opensearch-project/OpenSearch/pull/12618)) +- Bump Apache Lucene from 9.9.2 to 9.10.0 ([#12392](https://github.com/opensearch-project/OpenSearch/pull/12392)) +- Bump `org.apache.logging.log4j:log4j-core` from 2.22.1 to 2.23.1 ([#12464](https://github.com/opensearch-project/OpenSearch/pull/12464), [#12587](https://github.com/opensearch-project/OpenSearch/pull/12587)) +- Bump `antlr4` from 4.11.1 to 4.13.1 ([#12445](https://github.com/opensearch-project/OpenSearch/pull/12445)) +- Bump `com.netflix.nebula.ospackage-base` from 11.8.0 to 11.8.1 ([#12461](https://github.com/opensearch-project/OpenSearch/pull/12461)) +- Bump `peter-evans/create-or-update-comment` from 3 to 4 ([#12462](https://github.com/opensearch-project/OpenSearch/pull/12462)) +- Bump `lycheeverse/lychee-action` from 1.9.1 to 1.9.3 ([#12521](https://github.com/opensearch-project/OpenSearch/pull/12521)) +- Bump `com.azure:azure-core` from 1.39.0 to 1.47.0 ([#12520](https://github.com/opensearch-project/OpenSearch/pull/12520)) +- Bump `ch.qos.logback:logback-core` from 1.2.13 to 1.5.3 ([#12519](https://github.com/opensearch-project/OpenSearch/pull/12519)) +- Bump `codecov/codecov-action` from 3 to 4 ([#12585](https://github.com/opensearch-project/OpenSearch/pull/12585)) +- Bump `org.apache.zookeeper:zookeeper` from 3.9.1 to 3.9.2 ([#12580](https://github.com/opensearch-project/OpenSearch/pull/12580)) +- Bump `org.codehaus.woodstox:stax2-api` from 4.2.1 to 4.2.2 ([#12579](https://github.com/opensearch-project/OpenSearch/pull/12579)) +- Bump Jackson version from 2.16.1 to 2.17.0 ([#12611](https://github.com/opensearch-project/OpenSearch/pull/12611), [#12662](https://github.com/opensearch-project/OpenSearch/pull/12662)) +- Bump `aws-sdk-java` from 2.20.55 to 2.20.86 ([#12251](https://github.com/opensearch-project/OpenSearch/pull/12251)) +- Bump `reactor-netty` from 1.1.15 to 1.1.17 ([#12633](https://github.com/opensearch-project/OpenSearch/pull/12633)) +- Bump `reactor` from 3.5.14 to 3.5.15 ([#12633](https://github.com/opensearch-project/OpenSearch/pull/12633)) ### Changed -- Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) -- Force merge with `only_expunge_deletes` honors max segment size ([#10036](https://github.com/opensearch-project/OpenSearch/pull/10036)) -- Add the means to extract the contextual properties from HttpChannel, TcpCChannel and TrasportChannel without excessive typecasting ([#10562](https://github.com/opensearch-project/OpenSearch/pull/10562)), ([#11751](https://github.com/opensearch-project/OpenSearch/pull/11751)) -- Introduce new dynamic cluster setting to control slice computation for concurrent segment search ([#9107](https://github.com/opensearch-project/OpenSearch/pull/9107)) -- Search pipelines now support asynchronous request and response processors to avoid blocking on a transport thread ([#10598](https://github.com/opensearch-project/OpenSearch/pull/10598)) -- [Remote Store] Add Remote Store backpressure rejection stats to `_nodes/stats` ([#10524](https://github.com/opensearch-project/OpenSearch/pull/10524)) -- [BUG] Fix java.lang.SecurityException in repository-gcs plugin ([#10642](https://github.com/opensearch-project/OpenSearch/pull/10642)) -- Add telemetry tracer/metric enable flag and integ test. ([#10395](https://github.com/opensearch-project/OpenSearch/pull/10395)) -- Performance improvement for Datetime field caching ([#4558](https://github.com/opensearch-project/OpenSearch/issues/4558)) -- Add instrumentation for indexing in transport bulk action and transport shard bulk action. ([#10273](https://github.com/opensearch-project/OpenSearch/pull/10273)) -- Disallow removing some metadata fields by remove ingest processor ([#10895](https://github.com/opensearch-project/OpenSearch/pull/10895), [#11607](https://github.com/opensearch-project/OpenSearch/pull/11607)) -- Performance improvement for MultiTerm Queries on Keyword fields ([#7057](https://github.com/opensearch-project/OpenSearch/issues/7057)) -- Refactor common parts from the Rounding class into a separate 'round' package ([#11023](https://github.com/opensearch-project/OpenSearch/issues/11023)) -- Performance improvement for date histogram aggregations without sub-aggregations ([#11083](https://github.com/opensearch-project/OpenSearch/pull/11083)) -- Apply the fast filter optimization to composite aggregation of date histogram source ([#11505](https://github.com/opensearch-project/OpenSearch/pull/11083)) -- Disable concurrent aggs for Diversified Sampler and Sampler aggs ([#11087](https://github.com/opensearch-project/OpenSearch/issues/11087)) -- Made leader/follower check timeout setting dynamic ([#10528](https://github.com/opensearch-project/OpenSearch/pull/10528)) -- Improved performance of numeric exact-match queries ([#11209](https://github.com/opensearch-project/OpenSearch/pull/11209)) -- Change error message when per shard document limit is breached ([#11312](https://github.com/opensearch-project/OpenSearch/pull/11312)) -- Improve boolean parsing performance ([#11308](https://github.com/opensearch-project/OpenSearch/pull/11308)) -- Interpret byte array as primitive using VarHandles ([#11362](https://github.com/opensearch-project/OpenSearch/pull/11362)) -- Automatically add scheme to discovery.ec2.endpoint ([#11512](https://github.com/opensearch-project/OpenSearch/pull/11512)) -- Restore support for Java 8 for RestClient ([#11562](https://github.com/opensearch-project/OpenSearch/pull/11562)) -- Add deleted doc count in _cat/shards ([#11678](https://github.com/opensearch-project/OpenSearch/pull/11678)) -- Capture information for additional query types and aggregation types ([#11582](https://github.com/opensearch-project/OpenSearch/pull/11582)) -- Use slice_size == shard_size heuristic in terms aggs for concurrent segment search and properly calculate the doc_count_error ([#11732](https://github.com/opensearch-project/OpenSearch/pull/11732)) -- Added Support for dynamically adding SearchRequestOperationsListeners with SearchRequestOperationsCompositeListenerFactory ([#11526](https://github.com/opensearch-project/OpenSearch/pull/11526)) -- Ensure Jackson default maximums introduced in 2.16.0 do not conflict with OpenSearch settings ([#11890](https://github.com/opensearch-project/OpenSearch/pull/11890)) +- Allow composite aggregation to run under a parent filter aggregation ([#11499](https://github.com/opensearch-project/OpenSearch/pull/11499)) +- Quickly compute terms aggregations when the top-level query is functionally match-all for a segment ([#11643](https://github.com/opensearch-project/OpenSearch/pull/11643)) +- Mark fuzzy filter GA and remove experimental setting ([12631](https://github.com/opensearch-project/OpenSearch/pull/12631)) ### Deprecated ### Removed -- Remove deprecated classes for Rounding ([#10956](https://github.com/opensearch-project/OpenSearch/issues/10956)) ### Fixed -- Fix failure in dissect ingest processor parsing empty brackets ([#9225](https://github.com/opensearch-project/OpenSearch/pull/9255)) -- Fix `class_cast_exception` when passing int to `_version` and other metadata fields in ingest simulate API ([#10101](https://github.com/opensearch-project/OpenSearch/pull/10101)) -- Fix Segment Replication ShardLockObtainFailedException bug during index corruption ([10370](https://github.com/opensearch-project/OpenSearch/pull/10370)) -- Fix some test methods in SimulatePipelineRequestParsingTests never run and fix test failure ([#10496](https://github.com/opensearch-project/OpenSearch/pull/10496)) -- Fix passing wrong parameter when calling newConfigurationException() in DotExpanderProcessor ([#10737](https://github.com/opensearch-project/OpenSearch/pull/10737)) -- Delegating CachingWeightWrapper#count to internal weight object ([#10543](https://github.com/opensearch-project/OpenSearch/pull/10543)) -- Fix per request latency last phase not tracked ([#10934](https://github.com/opensearch-project/OpenSearch/pull/10934)) -- Fix SuggestSearch.testSkipDuplicates by forcing refresh when indexing its test documents ([#11068](https://github.com/opensearch-project/OpenSearch/pull/11068)) -- [BUG] Fix the thread context that is not properly cleared and messes up the traces ([#10873](https://github.com/opensearch-project/OpenSearch/pull/10873)) -- Handle canMatchSearchAfter for frozen context scenario ([#11249](https://github.com/opensearch-project/OpenSearch/pull/11249)) -- Fix the issue with DefaultSpanScope restoring wrong span in the TracerContextStorage upon detach ([#11316](https://github.com/opensearch-project/OpenSearch/issues/11316)) -- Remove shadowJar from `lang-painless` module publication ([#11369](https://github.com/opensearch-project/OpenSearch/issues/11369)) -- Fix remote shards balancer and remove unused variables ([#11167](https://github.com/opensearch-project/OpenSearch/pull/11167)) -- Fix parsing of flat object fields with dots in keys ([#11425](https://github.com/opensearch-project/OpenSearch/pull/11425)) -- Fix bug where replication lag grows post primary relocation ([#11238](https://github.com/opensearch-project/OpenSearch/pull/11238)) -- Fix noop_update_total metric in indexing stats cannot be updated by bulk API ([#11485](https://github.com/opensearch-project/OpenSearch/pull/11485)) -- Fix for stuck update action in a bulk with `retry_on_conflict` property ([#11152](https://github.com/opensearch-project/OpenSearch/issues/11152)) -- Fix template setting override for replication type ([#11417](https://github.com/opensearch-project/OpenSearch/pull/11417)) -- Fix Automatic addition of protocol broken in #11512 ([#11609](https://github.com/opensearch-project/OpenSearch/pull/11609)) -- Fix issue when calling Delete PIT endpoint and no PITs exist ([#11711](https://github.com/opensearch-project/OpenSearch/pull/11711)) -- Fix tracing context propagation for local transport instrumentation ([#11490](https://github.com/opensearch-project/OpenSearch/pull/11490)) -- Fix parsing of single line comments in `lang-painless` ([#11815](https://github.com/opensearch-project/OpenSearch/issues/11815)) +- Fix for deserilization bug in weighted round-robin metadata ([#11679](https://github.com/opensearch-project/OpenSearch/pull/11679)) +- [Revert] [Bug] Check phase name before SearchRequestOperationsListener onPhaseStart ([#12035](https://github.com/opensearch-project/OpenSearch/pull/12035)) +- Add support of special WrappingSearchAsyncActionPhase so the onPhaseStart() will always be followed by onPhaseEnd() within AbstractSearchAsyncAction ([#12293](https://github.com/opensearch-project/OpenSearch/pull/12293)) +- Add a system property to configure YamlParser codepoint limits ([#12298](https://github.com/opensearch-project/OpenSearch/pull/12298)) +- Prevent read beyond slice boundary in ByteArrayIndexInput ([#10481](https://github.com/opensearch-project/OpenSearch/issues/10481)) +- Fix the "highlight.max_analyzer_offset" request parameter with "plain" highlighter ([#10919](https://github.com/opensearch-project/OpenSearch/pull/10919)) +- Prevent unnecessary fetch sub phase processor initialization during fetch phase execution ([#12503](https://github.com/opensearch-project/OpenSearch/pull/12503)) +- Warn about deprecated and ignored index.mapper.dynamic index setting ([#11193](https://github.com/opensearch-project/OpenSearch/pull/11193)) +- Fix `terms` query on `float` field when `doc_values` are turned off by reverting back to `FloatPoint` from `FloatField` ([#12499](https://github.com/opensearch-project/OpenSearch/pull/12499)) +- Fix get task API does not refresh resource stats ([#11531](https://github.com/opensearch-project/OpenSearch/pull/11531)) +- onShardResult and onShardFailure are executed on one shard causes opensearch jvm crashed ([#12158](https://github.com/opensearch-project/OpenSearch/pull/12158)) ### Security diff --git a/DEVELOPER_GUIDE.md b/DEVELOPER_GUIDE.md index 21adbb0305ab1..f0851fc58d444 100644 --- a/DEVELOPER_GUIDE.md +++ b/DEVELOPER_GUIDE.md @@ -348,7 +348,7 @@ Please follow these formatting guidelines: * Wildcard imports (`import foo.bar.baz.*`) are forbidden and will cause the build to fail. * If *absolutely* necessary, you can disable formatting for regions of code with the `// tag::NAME` and `// end::NAME` directives, but note that these are intended for use in documentation, so please make it clear what you have done, and only do this where the benefit clearly outweighs the decrease in consistency. * Note that JavaDoc and block comments i.e. `/* ... */` are not formatted, but line comments i.e `// ...` are. -* There is an implicit rule that negative boolean expressions should use the form `foo == false` instead of `!foo` for better readability of the code. While this isn't strictly enforced, if might get called out in PR reviews as something to change. +* There is an implicit rule that negative boolean expressions should use the form `foo == false` instead of `!foo` for better readability of the code. While this isn't strictly enforced, it might get called out in PR reviews as something to change. ## Adding Dependencies diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 42a8a439445ca..5535c2fa26eae 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -16,7 +16,6 @@ This document contains a list of maintainers in this repo. See [opensearch-proje | Daniel "dB." Doubrovkine | [dblock](https://github.com/dblock) | Amazon | | Gaurav Bafna | [gbbafna](https://github.com/gbbafna) | Amazon | | Himanshu Setia | [setiah](https://github.com/setiah) | Amazon | -| Kartik Ganesh | [kartg](https://github.com/kartg) | Amazon | | Kunal Kotwani | [kotwanikunal](https://github.com/kotwanikunal) | Amazon | | Marc Handalian | [mch2](https://github.com/mch2) | Amazon | | Michael Froh | [msfroh](https://github.com/msfroh) | Amazon | @@ -40,3 +39,4 @@ This document contains a list of maintainers in this repo. See [opensearch-proje |-------------------------|---------------------------------------------|-------------| | Megha Sai Kavikondala | [meghasaik](https://github.com/meghasaik) | Amazon | | Xue Zhou | [xuezhou25](https://github.com/xuezhou25) | Amazon | +| Kartik Ganesh | [kartg](https://github.com/kartg) | Amazon | diff --git a/NOTICE.txt b/NOTICE.txt index 6c7dc983f8c7a..d463b8f28561f 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -10,3 +10,6 @@ Foundation (http://www.apache.org/). This product includes software developed by Joda.org (http://www.joda.org/). + +This product includes software developed by +Morten Haraldsen (ethlo) (https://github.com/ethlo) under the Apache License, version 2.0. diff --git a/TRIAGING.md b/TRIAGING.md index 47cb44a4f5ba2..3917f1e1442b9 100644 --- a/TRIAGING.md +++ b/TRIAGING.md @@ -68,7 +68,7 @@ Yes, there are several labels that are used to identify the 'state' of issues fi | Outcome | Label | Description | Canned Response | |--------------|------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Accepted | `-untriaged` | The issue has the details needed to be directed towards area owners. | "Thanks for filing this issue, please feel free to submit a pull request." | -| Rejected | N/A | The issue will be closed with a reason for why it was rejected. Reasons might include lack of details, or being outside the scope of the project. | "Thanks for creating this issue; however, it isn't being accepted due to {REASON}. Please feel free to re-open after addressing the reason." | +| Rejected | N/A | The issue will be closed with a reason for why it was rejected. Reasons might include lack of details, or being outside the scope of the project. | "Thanks for creating this issue; however, it isn't being accepted due to {REASON}. Please feel free to open a new issue after addressing the reason." | | Area Triage | `+{AREALABEL}` | OpenSearch has many different areas. If it's unclear whether an issue should be accepted, it will be labeled with the area and an owner will be @mentioned for follow-up. | "Thanks for creating this issue; the triage meeting was unsure if this issue should be accepted, @{PERSON} or someone from the area please review and then accept or reject this issue?" | | Transfer | N/A | If the issue applies to another repository within the OpenSearch Project, it will be transferred accordingly. | "@opensearch-project/triage, can you please transfer this issue to project {REPOSITORY}." Or, if someone at the meeting has permissions, they can start the transfer. | diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle index 6b4634c7e791c..be4579b4e5324 100644 --- a/benchmarks/build.gradle +++ b/benchmarks/build.gradle @@ -84,3 +84,45 @@ spotless { targetExclude 'src/main/generated/**/*.java' } } + +if (BuildParams.runtimeJavaVersion >= JavaVersion.VERSION_20) { + // Add support for incubator modules on supported Java versions. + run.jvmArgs += ['--add-modules=jdk.incubator.vector'] + run.classpath += files(jar.archiveFile) + run.classpath -= sourceSets.main.output + evaluationDependsOn(':libs:opensearch-common') + + sourceSets { + java20 { + java { + srcDirs = ['src/main/java20'] + } + } + } + + configurations { + java20Implementation.extendsFrom(implementation) + } + + dependencies { + java20Implementation sourceSets.main.output + java20Implementation project(':libs:opensearch-common').sourceSets.java20.output + java20AnnotationProcessor "org.openjdk.jmh:jmh-generator-annprocess:$versions.jmh" + } + + compileJava20Java { + targetCompatibility = JavaVersion.VERSION_20 + options.compilerArgs.addAll(["-processor", "org.openjdk.jmh.generators.BenchmarkProcessor"]) + } + + jar { + metaInf { + into 'versions/20' + from sourceSets.java20.output + } + manifest.attributes('Multi-Release': 'true') + } + + // classes generated by JMH can use all sorts of forbidden APIs but we have no influence at all and cannot exclude these classes + disableTasks('forbiddenApisJava20') +} diff --git a/benchmarks/src/main/java/org/opensearch/benchmark/index/codec/fuzzy/FilterConstructionBenchmark.java b/benchmarks/src/main/java/org/opensearch/benchmark/index/codec/fuzzy/FilterConstructionBenchmark.java new file mode 100644 index 0000000000000..4e995f5a5067c --- /dev/null +++ b/benchmarks/src/main/java/org/opensearch/benchmark/index/codec/fuzzy/FilterConstructionBenchmark.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.benchmark.index.codec.fuzzy; + +import org.apache.lucene.util.BytesRef; +import org.opensearch.common.UUIDs; +import org.opensearch.index.codec.fuzzy.FuzzySet; +import org.opensearch.index.codec.fuzzy.FuzzySetFactory; +import org.opensearch.index.codec.fuzzy.FuzzySetParameters; +import org.opensearch.index.mapper.IdFieldMapper; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +@Fork(3) +@Warmup(iterations = 2) +@Measurement(iterations = 5, time = 60, timeUnit = TimeUnit.SECONDS) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(TimeUnit.MILLISECONDS) +@State(Scope.Benchmark) +public class FilterConstructionBenchmark { + + private List<BytesRef> items; + + @Param({ "1000000", "10000000", "50000000" }) + private int numIds; + + @Param({ "0.0511", "0.1023", "0.2047" }) + private double fpp; + + private FuzzySetFactory fuzzySetFactory; + private String fieldName; + + @Setup + public void setupIds() { + this.fieldName = IdFieldMapper.NAME; + this.items = IntStream.range(0, numIds).mapToObj(i -> new BytesRef(UUIDs.base64UUID())).collect(Collectors.toList()); + FuzzySetParameters parameters = new FuzzySetParameters(() -> fpp); + this.fuzzySetFactory = new FuzzySetFactory(Map.of(fieldName, parameters)); + } + + @Benchmark + public FuzzySet buildFilter() throws IOException { + return fuzzySetFactory.createFuzzySet(items.size(), fieldName, () -> items.iterator()); + } +} diff --git a/benchmarks/src/main/java/org/opensearch/benchmark/index/codec/fuzzy/FilterLookupBenchmark.java b/benchmarks/src/main/java/org/opensearch/benchmark/index/codec/fuzzy/FilterLookupBenchmark.java new file mode 100644 index 0000000000000..383539219830e --- /dev/null +++ b/benchmarks/src/main/java/org/opensearch/benchmark/index/codec/fuzzy/FilterLookupBenchmark.java @@ -0,0 +1,80 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.benchmark.index.codec.fuzzy; + +import org.apache.lucene.util.BytesRef; +import org.opensearch.common.UUIDs; +import org.opensearch.index.codec.fuzzy.FuzzySet; +import org.opensearch.index.codec.fuzzy.FuzzySetFactory; +import org.opensearch.index.codec.fuzzy.FuzzySetParameters; +import org.opensearch.index.mapper.IdFieldMapper; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +@Fork(3) +@Warmup(iterations = 2) +@Measurement(iterations = 5, time = 60, timeUnit = TimeUnit.SECONDS) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(TimeUnit.MILLISECONDS) +@State(Scope.Benchmark) +public class FilterLookupBenchmark { + + @Param({ "50000000", "1000000" }) + private int numItems; + + @Param({ "1000000" }) + private int searchKeyCount; + + @Param({ "0.0511", "0.1023", "0.2047" }) + private double fpp; + + private FuzzySet fuzzySet; + private List<BytesRef> items; + private Random random = new Random(); + + @Setup + public void setupFilter() throws IOException { + String fieldName = IdFieldMapper.NAME; + items = IntStream.range(0, numItems).mapToObj(i -> new BytesRef(UUIDs.base64UUID())).collect(Collectors.toList()); + FuzzySetParameters parameters = new FuzzySetParameters(() -> fpp); + fuzzySet = new FuzzySetFactory(Map.of(fieldName, parameters)).createFuzzySet(numItems, fieldName, () -> items.iterator()); + } + + @Benchmark + public void contains_withExistingKeys(Blackhole blackhole) throws IOException { + for (int i = 0; i < searchKeyCount; i++) { + blackhole.consume(fuzzySet.contains(items.get(random.nextInt(items.size()))) == FuzzySet.Result.MAYBE); + } + } + + @Benchmark + public void contains_withRandomKeys(Blackhole blackhole) throws IOException { + for (int i = 0; i < searchKeyCount; i++) { + blackhole.consume(fuzzySet.contains(new BytesRef(UUIDs.base64UUID()))); + } + } +} diff --git a/benchmarks/src/main/java/org/opensearch/common/round/RoundableBenchmark.java b/benchmarks/src/main/java/org/opensearch/common/round/RoundableBenchmark.java index 4e07af452968b..3909a3f4eb8fc 100644 --- a/benchmarks/src/main/java/org/opensearch/common/round/RoundableBenchmark.java +++ b/benchmarks/src/main/java/org/opensearch/common/round/RoundableBenchmark.java @@ -21,7 +21,6 @@ import org.openjdk.jmh.infra.Blackhole; import java.util.Random; -import java.util.function.Supplier; @Fork(value = 3) @Warmup(iterations = 3, time = 1) @@ -83,17 +82,17 @@ public static class Options { "256" }) public Integer size; - @Param({ "binary", "linear" }) + @Param({ "binary", "linear", "btree" }) public String type; @Param({ "uniform", "skewed_edge", "skewed_center" }) public String distribution; public long[] queries; - public Supplier<Roundable> supplier; + public RoundableSupplier supplier; @Setup - public void setup() { + public void setup() throws ClassNotFoundException { Random random = new Random(size); long[] values = new long[size]; for (int i = 1; i < values.length; i++) { @@ -128,16 +127,7 @@ public void setup() { throw new IllegalArgumentException("invalid distribution: " + distribution); } - switch (type) { - case "binary": - supplier = () -> new BinarySearcher(values, size); - break; - case "linear": - supplier = () -> new BidirectionalLinearSearcher(values, size); - break; - default: - throw new IllegalArgumentException("invalid type: " + type); - } + supplier = new RoundableSupplier(type, values, size); } private static long nextPositiveLong(Random random) { diff --git a/benchmarks/src/main/java/org/opensearch/common/round/RoundableSupplier.java b/benchmarks/src/main/java/org/opensearch/common/round/RoundableSupplier.java new file mode 100644 index 0000000000000..44ac42810996f --- /dev/null +++ b/benchmarks/src/main/java/org/opensearch/common/round/RoundableSupplier.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.round; + +import java.util.function.Supplier; + +public class RoundableSupplier implements Supplier<Roundable> { + private final Supplier<Roundable> delegate; + + RoundableSupplier(String type, long[] values, int size) throws ClassNotFoundException { + switch (type) { + case "binary": + delegate = () -> new BinarySearcher(values, size); + break; + case "linear": + delegate = () -> new BidirectionalLinearSearcher(values, size); + break; + case "btree": + throw new ClassNotFoundException("BtreeSearcher is not supported below JDK 20"); + default: + throw new IllegalArgumentException("invalid type: " + type); + } + } + + @Override + public Roundable get() { + return delegate.get(); + } +} diff --git a/benchmarks/src/main/java20/org/opensearch/common/round/RoundableSupplier.java b/benchmarks/src/main/java20/org/opensearch/common/round/RoundableSupplier.java new file mode 100644 index 0000000000000..e81c1b137bd30 --- /dev/null +++ b/benchmarks/src/main/java20/org/opensearch/common/round/RoundableSupplier.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.round; + +import java.util.function.Supplier; + +public class RoundableSupplier implements Supplier<Roundable> { + private final Supplier<Roundable> delegate; + + RoundableSupplier(String type, long[] values, int size) { + switch (type) { + case "binary": + delegate = () -> new BinarySearcher(values, size); + break; + case "linear": + delegate = () -> new BidirectionalLinearSearcher(values, size); + break; + case "btree": + delegate = () -> new BtreeSearcher(values, size); + break; + default: + throw new IllegalArgumentException("invalid type: " + type); + } + } + + @Override + public Roundable get() { + return delegate.get(); + } +} diff --git a/build.gradle b/build.gradle index 296c30391af09..2aac4a1e893e9 100644 --- a/build.gradle +++ b/build.gradle @@ -54,8 +54,8 @@ plugins { id 'lifecycle-base' id 'opensearch.docker-support' id 'opensearch.global-build-info' - id "com.diffplug.spotless" version "6.23.2" apply false - id "org.gradle.test-retry" version "1.5.4" apply false + id "com.diffplug.spotless" version "6.25.0" apply false + id "org.gradle.test-retry" version "1.5.8" apply false id "test-report-aggregation" id 'jacoco-report-aggregation' } @@ -516,7 +516,6 @@ subprojects { includeClasses.add("org.opensearch.remotestore.CreateRemoteIndexClusterDefaultDocRep") includeClasses.add("org.opensearch.remotestore.CreateRemoteIndexIT") includeClasses.add("org.opensearch.remotestore.CreateRemoteIndexTranslogDisabledIT") - includeClasses.add("org.opensearch.remotestore.RemoteIndexPrimaryRelocationIT") includeClasses.add("org.opensearch.remotestore.RemoteStoreBackpressureIT") includeClasses.add("org.opensearch.remotestore.RemoteStoreIT") includeClasses.add("org.opensearch.remotestore.RemoteStoreRefreshListenerIT") @@ -545,6 +544,7 @@ subprojects { includeClasses.add("org.opensearch.snapshots.SnapshotStatusApisIT") includeClasses.add("org.opensearch.test.rest.ClientYamlTestSuiteIT") includeClasses.add("org.opensearch.upgrade.DetectEsInstallationTaskTests") + includeClasses.add("org.opensearch.cluster.MinimumClusterManagerNodesIT") } } } diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 1cb21acd14af7..0562ecc6ee61b 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -118,7 +118,7 @@ dependencies { api 'com.avast.gradle:gradle-docker-compose-plugin:0.17.6' api "org.yaml:snakeyaml:${props.getProperty('snakeyaml')}" api 'org.apache.maven:maven-model:3.9.6' - api 'com.networknt:json-schema-validator:1.1.0' + api 'com.networknt:json-schema-validator:1.2.0' api 'org.jruby.jcodings:jcodings:1.0.58' api 'org.jruby.joni:joni:2.2.1' api "com.fasterxml.jackson.core:jackson-databind:${props.getProperty('jackson_databind')}" diff --git a/buildSrc/src/main/java/org/opensearch/gradle/RepositoriesSetupPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/RepositoriesSetupPlugin.java index 8ecfbf40b6c62..0c901b9726992 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/RepositoriesSetupPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/RepositoriesSetupPlugin.java @@ -94,7 +94,7 @@ public static void configureRepositories(Project project) { String revision = matcher.group(1); MavenArtifactRepository luceneRepo = repos.maven(repo -> { repo.setName("lucene-snapshots"); - repo.setUrl("https://artifacts.opensearch.org/snapshots/lucene/"); + repo.setUrl("https://ci.opensearch.org/ci/dbc/snapshots/lucene/"); }); repos.exclusiveContent(exclusiveRepo -> { exclusiveRepo.filter( diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java index 1ad7e056b6ae6..bc44f81a81aff 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java @@ -77,9 +77,9 @@ import java.util.stream.Stream; public class DistroTestPlugin implements Plugin<Project> { - private static final String SYSTEM_JDK_VERSION = "17.0.9+9"; + private static final String SYSTEM_JDK_VERSION = "21.0.2+13"; private static final String SYSTEM_JDK_VENDOR = "adoptium"; - private static final String GRADLE_JDK_VERSION = "17.0.9+9"; + private static final String GRADLE_JDK_VERSION = "21.0.2+13"; private static final String GRADLE_JDK_VENDOR = "adoptium"; // all distributions used by distro tests. this is temporary until tests are per distribution diff --git a/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle b/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle index 351b42e5bc921..48dfb206375ca 100644 --- a/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle +++ b/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle @@ -17,7 +17,7 @@ repositories { } dependencies { - implementation "org.apache.logging.log4j:log4j-core:2.22.1" + implementation "org.apache.logging.log4j:log4j-core:2.23.1" } ["0.0.1", "0.0.2"].forEach { v -> diff --git a/buildSrc/version.properties b/buildSrc/version.properties index dd7f2e1eaabf0..6da095473b520 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,14 +1,14 @@ opensearch = 3.0.0 -lucene = 9.9.1 +lucene = 9.11.0-snapshot-8a555eb bundled_jdk_vendor = adoptium -bundled_jdk = 21.0.1+12 +bundled_jdk = 21.0.2+13 # optional dependencies spatial4j = 0.7 jts = 1.15.0 -jackson = 2.16.1 -jackson_databind = 2.16.1 +jackson = 2.17.0 +jackson_databind = 2.17.0 snakeyaml = 2.1 icu4j = 70.1 supercsv = 2.4.0 @@ -18,7 +18,7 @@ asm = 9.6 jettison = 1.5.4 woodstox = 6.4.0 kotlin = 1.7.10 -antlr4 = 4.11.1 +antlr4 = 4.13.1 guava = 32.1.1-jre protobuf = 3.22.3 jakarta_annotation = 1.3.5 @@ -26,12 +26,12 @@ jakarta_annotation = 1.3.5 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.13.0 -netty = 4.1.104.Final +netty = 4.1.107.Final joda = 2.12.2 # project reactor -reactor_netty = 1.1.13 -reactor = 3.5.11 +reactor_netty = 1.1.17 +reactor = 3.5.15 # client dependencies httpclient5 = 5.2.1 @@ -44,18 +44,18 @@ commonscodec = 1.15 commonslang = 3.13.0 commonscompress = 1.24.0 # plugin dependencies -aws = 2.20.55 +aws = 2.20.86 reactivestreams = 1.0.4 # when updating this version, you need to ensure compatibility with: # - plugins/ingest-attachment (transitive dependency, check the upstream POM) # - distribution/tools/plugin-cli -bouncycastle=1.76 +bouncycastle=1.77 # test dependencies randomizedrunner = 2.7.1 junit = 4.13.2 hamcrest = 2.1 -mockito = 5.5.0 +mockito = 5.10.0 objenesis = 3.2 bytebuddy = 1.14.7 @@ -70,5 +70,5 @@ jzlib = 1.1.3 resteasy = 6.2.4.Final # opentelemetry dependencies -opentelemetry = 1.34.1 +opentelemetry = 1.36.0 opentelemetrysemconv = 1.23.1-alpha diff --git a/client/sniffer/licenses/jackson-core-2.16.1.jar.sha1 b/client/sniffer/licenses/jackson-core-2.16.1.jar.sha1 deleted file mode 100644 index 908d071b34a2a..0000000000000 --- a/client/sniffer/licenses/jackson-core-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9456bb3cdd0f79f91a5f730a1b1bb041a380c91f \ No newline at end of file diff --git a/client/sniffer/licenses/jackson-core-2.17.0.jar.sha1 b/client/sniffer/licenses/jackson-core-2.17.0.jar.sha1 new file mode 100644 index 0000000000000..9b906dbda1656 --- /dev/null +++ b/client/sniffer/licenses/jackson-core-2.17.0.jar.sha1 @@ -0,0 +1 @@ +a6e5058ef9720623c517252d17162f845306ff3a \ No newline at end of file diff --git a/client/sniffer/src/main/java/org/opensearch/client/sniff/Sniffer.java b/client/sniffer/src/main/java/org/opensearch/client/sniff/Sniffer.java index adddb3bda725c..f609fae4e3c81 100644 --- a/client/sniffer/src/main/java/org/opensearch/client/sniff/Sniffer.java +++ b/client/sniffer/src/main/java/org/opensearch/client/sniff/Sniffer.java @@ -305,6 +305,7 @@ public void shutdown() { } } + @SuppressWarnings("removal") static class SnifferThreadFactory implements ThreadFactory { private final AtomicInteger threadNumber = new AtomicInteger(1); private final String namePrefix; diff --git a/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/JsonLogsFormatAndParseIT.java b/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/JsonLogsFormatAndParseIT.java index 88f667549f3e8..faef1441d0a02 100644 --- a/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/JsonLogsFormatAndParseIT.java +++ b/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/JsonLogsFormatAndParseIT.java @@ -51,6 +51,7 @@ protected Matcher<String> nodeNameMatcher() { return is("integTest-0"); } + @SuppressWarnings("removal") @Override protected BufferedReader openReader(Path logFile) { assumeFalse("Skipping test because it is being run against an external cluster.", diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index ededa7bff34d8..4e85d19986e43 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -63,7 +63,7 @@ import java.util.regex.Pattern */ plugins { - id "com.netflix.nebula.ospackage-base" version "11.6.0" + id "com.netflix.nebula.ospackage-base" version "11.8.1" } void addProcessFilesTask(String type, boolean jdk) { diff --git a/distribution/src/config/opensearch.yml b/distribution/src/config/opensearch.yml index b7ab2e1c2309b..10bab9b3fce92 100644 --- a/distribution/src/config/opensearch.yml +++ b/distribution/src/config/opensearch.yml @@ -117,13 +117,11 @@ ${path.logs} #opensearch.experimental.feature.extensions.enabled: false # # -# Gates the concurrent segment search feature. This feature enables concurrent segment search in a separate -# index searcher threadpool. -# -#opensearch.experimental.feature.concurrent_segment_search.enabled: false -# -# # Gates the optimization of datetime formatters caching along with change in default datetime formatter # Once there is no observed impact on performance, this feature flag can be removed. # #opensearch.experimental.optimization.datetime_formatter_caching.enabled: false +# +# Gates the functionality of enabling Opensearch to use pluggable caches with respective store names via setting. +# +#opensearch.experimental.feature.pluggable.caching.enabled: false diff --git a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ListPluginsCommand.java b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ListPluginsCommand.java index d269603656114..9ca42ac5f4ec1 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ListPluginsCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ListPluginsCommand.java @@ -78,15 +78,14 @@ private void printPlugin(Environment env, Terminal terminal, Path plugin, String PluginInfo info = PluginInfo.readFromProperties(env.pluginsDir().resolve(plugin)); terminal.println(Terminal.Verbosity.SILENT, prefix + info.getName()); terminal.println(Terminal.Verbosity.VERBOSE, info.toString(prefix)); - if (info.getOpenSearchVersion().equals(Version.CURRENT) == false) { + if (!PluginsService.isPluginVersionCompatible(info, Version.CURRENT)) { terminal.errorPrintln( "WARNING: plugin [" + info.getName() + "] was built for OpenSearch version " - + info.getVersion() - + " but version " + + info.getOpenSearchVersionRangesString() + + " and is not compatible with " + Version.CURRENT - + " is required" ); } } diff --git a/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/InstallPluginCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/InstallPluginCommandTests.java index f4532f5f83cc4..c264788df20e8 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/InstallPluginCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/InstallPluginCommandTests.java @@ -70,8 +70,10 @@ import org.opensearch.core.util.FileSystemUtils; import org.opensearch.env.Environment; import org.opensearch.env.TestEnvironment; +import org.opensearch.semver.SemverRange; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.PosixPermissionsResetter; +import org.opensearch.test.VersionUtils; import org.junit.After; import org.junit.Before; @@ -284,6 +286,35 @@ static void writePlugin(String name, Path structure, String... additionalProps) writeJar(structure.resolve("plugin.jar"), className); } + static void writePlugin(String name, Path structure, SemverRange opensearchVersionRange, String... additionalProps) throws IOException { + String[] properties = Stream.concat( + Stream.of( + "description", + "fake desc", + "name", + name, + "version", + "1.0", + "dependencies", + "{opensearch:\"" + opensearchVersionRange + "\"}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin" + ), + Arrays.stream(additionalProps) + ).toArray(String[]::new); + PluginTestUtil.writePluginProperties(structure, properties); + String className = name.substring(0, 1).toUpperCase(Locale.ENGLISH) + name.substring(1) + "Plugin"; + writeJar(structure.resolve("plugin.jar"), className); + } + + static Path createPlugin(String name, Path structure, SemverRange opensearchVersionRange, String... additionalProps) + throws IOException { + writePlugin(name, structure, opensearchVersionRange, additionalProps); + return writeZip(structure, null); + } + static void writePluginSecurityPolicy(Path pluginDir, String... permissions) throws IOException { StringBuilder securityPolicyContent = new StringBuilder("grant {\n "); for (String permission : permissions) { @@ -867,6 +898,32 @@ public void testInstallMisspelledOfficialPlugins() throws Exception { assertThat(e.getMessage(), containsString("Unknown plugin unknown_plugin")); } + public void testInstallPluginWithCompatibleDependencies() throws Exception { + Tuple<Path, Environment> env = createEnv(fs, temp); + Path pluginDir = createPluginDir(temp); + String pluginZip = createPlugin("fake", pluginDir, SemverRange.fromString("~" + Version.CURRENT.toString())).toUri() + .toURL() + .toString(); + skipJarHellCommand.execute(terminal, Collections.singletonList(pluginZip), false, env.v2()); + assertThat(terminal.getOutput(), containsString("100%")); + } + + public void testInstallPluginWithIncompatibleDependencies() throws Exception { + Tuple<Path, Environment> env = createEnv(fs, temp); + Path pluginDir = createPluginDir(temp); + // Core version is behind plugin version by one w.r.t patch, hence incompatible + Version coreVersion = Version.CURRENT; + Version pluginVersion = VersionUtils.getVersion(coreVersion.major, coreVersion.minor, (byte) (coreVersion.revision + 1)); + String pluginZip = createPlugin("fake", pluginDir, SemverRange.fromString("~" + pluginVersion.toString())).toUri() + .toURL() + .toString(); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> skipJarHellCommand.execute(terminal, Collections.singletonList(pluginZip), false, env.v2()) + ); + assertThat(e.getMessage(), containsString("Plugin [fake] was built for OpenSearch version ~" + pluginVersion)); + } + public void testBatchFlag() throws Exception { MockTerminal terminal = new MockTerminal(); installPlugin(terminal, true); diff --git a/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/ListPluginsCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/ListPluginsCommandTests.java index 7bbced38c7adb..6878efce4c804 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/ListPluginsCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/ListPluginsCommandTests.java @@ -278,7 +278,7 @@ public void testExistingIncompatiblePlugin() throws Exception { buildFakePlugin(env, "fake desc 2", "fake_plugin2", "org.fake2"); MockTerminal terminal = listPlugins(home); - String message = "plugin [fake_plugin1] was built for OpenSearch version 1.0 but version " + Version.CURRENT + " is required"; + String message = "plugin [fake_plugin1] was built for OpenSearch version 5.0.0 and is not compatible with " + Version.CURRENT; assertEquals("fake_plugin1\nfake_plugin2\n", terminal.getOutput()); assertEquals("WARNING: " + message + "\n", terminal.getErrorOutput()); @@ -286,4 +286,41 @@ public void testExistingIncompatiblePlugin() throws Exception { terminal = listPlugins(home, params); assertEquals("fake_plugin1\nfake_plugin2\n", terminal.getOutput()); } + + public void testPluginWithDependencies() throws Exception { + PluginTestUtil.writePluginProperties( + env.pluginsDir().resolve("fake_plugin1"), + "description", + "fake desc 1", + "name", + "fake_plugin1", + "version", + "1.0", + "dependencies", + "{opensearch:\"" + Version.CURRENT + "\"}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "org.fake1" + ); + String[] params = { "-v" }; + MockTerminal terminal = listPlugins(home, params); + assertEquals( + buildMultiline( + "Plugins directory: " + env.pluginsDir(), + "fake_plugin1", + "- Plugin information:", + "Name: fake_plugin1", + "Description: fake desc 1", + "Version: 1.0", + "OpenSearch Version: " + Version.CURRENT.toString(), + "Java Version: " + System.getProperty("java.specification.version"), + "Native Controller: false", + "Extended Plugins: []", + " * Classname: org.fake1", + "Folder name: null" + ), + terminal.getOutput() + ); + } } diff --git a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.16.1.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.16.1.jar.sha1 deleted file mode 100644 index cbc65687606fc..0000000000000 --- a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fd441d574a71e7d10a4f73de6609f881d8cdfeec \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.0.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.0.jar.sha1 new file mode 100644 index 0000000000000..66bf7ed6ecce8 --- /dev/null +++ b/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.0.jar.sha1 @@ -0,0 +1 @@ +880a742337010da4c851f843d8cac150e22dff9f \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.16.1.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.16.1.jar.sha1 deleted file mode 100644 index d231db4fd49fc..0000000000000 --- a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -02a16efeb840c45af1e2f31753dfe76795278b73 \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.0.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.0.jar.sha1 new file mode 100644 index 0000000000000..c0e4bb0c56849 --- /dev/null +++ b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.0.jar.sha1 @@ -0,0 +1 @@ +7173e9e1d4bc6d7ca03bc4eeedcd548b8b580b34 \ No newline at end of file diff --git a/gradle/code-coverage.gradle b/gradle/code-coverage.gradle index 822b471e2e034..3ca6b1fe84ea7 100644 --- a/gradle/code-coverage.gradle +++ b/gradle/code-coverage.gradle @@ -13,7 +13,7 @@ repositories { gradlePluginPortal() // TODO: Find the way to use the repositories from RepositoriesSetupPlugin maven { - url = "https://artifacts.opensearch.org/snapshots/lucene/" + url = "https://ci.opensearch.org/ci/dbc/snapshots/lucene/" } } @@ -37,7 +37,7 @@ tasks.withType(JacocoReport).configureEach { if (System.getProperty("tests.coverage")) { reporting { reports { - testCodeCoverageReport(JacocoCoverageReport) { + testCodeCoverageReport(JacocoCoverageReport) { testType = TestSuiteType.UNIT_TEST } } @@ -45,6 +45,6 @@ if (System.getProperty("tests.coverage")) { // Attach code coverage report task to Gradle check task project.getTasks().named(JavaBasePlugin.CHECK_TASK_NAME).configure { - dependsOn tasks.named('testCodeCoverageReport', JacocoReport) + dependsOn tasks.named('testCodeCoverageReport', JacocoReport) } } diff --git a/gradle/formatting.gradle b/gradle/formatting.gradle index 93e1127c97a56..f3a4bf5cc765b 100644 --- a/gradle/formatting.gradle +++ b/gradle/formatting.gradle @@ -99,7 +99,9 @@ allprojects { } } format 'misc', { - target '*.md', '*.gradle', '**/*.yaml', '**/*.yml', '**/*.svg' + target '*.md', '*.gradle', '**/*.json', '**/*.yaml', '**/*.yml', '**/*.svg' + + targetExclude '**/simple-bulk11.json', '**/simple-msearch5.json' trimTrailingWhitespace() endWithNewline() diff --git a/gradle/ide.gradle b/gradle/ide.gradle index bc442a081adf0..14d6b2982ccd0 100644 --- a/gradle/ide.gradle +++ b/gradle/ide.gradle @@ -82,6 +82,9 @@ if (System.getProperty('idea.active') == 'true') { runConfigurations { defaults(JUnit) { vmParameters = '-ea -Djava.locale.providers=SPI,COMPAT' + if (BuildParams.runtimeJavaVersion > JavaVersion.VERSION_17) { + vmParameters += ' -Djava.security.manager=allow' + } } } copyright { diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index f1d76d80bbfa3..82a4add334a7d 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -11,7 +11,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-8.5-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.6-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=c16d517b50dd28b3f5838f0e844b7520b8f1eb610f2f29de7e4e04a1b7c9c79b +distributionSha256Sum=85719317abd2112f021d4f41f09ec370534ba288432065f4b477b6a3b652910d diff --git a/libs/common/build.gradle b/libs/common/build.gradle index 4f89b81636420..60bf488833393 100644 --- a/libs/common/build.gradle +++ b/libs/common/build.gradle @@ -43,3 +43,64 @@ tasks.named('forbiddenApisMain').configure { // TODO: Need to decide how we want to handle for forbidden signatures with the changes to server replaceSignatureFiles 'jdk-signatures' } + +// Add support for incubator modules on supported Java versions. +if (BuildParams.runtimeJavaVersion >= JavaVersion.VERSION_20) { + sourceSets { + java20 { + java { + srcDirs = ['src/main/java20'] + } + } + } + + configurations { + java20Implementation.extendsFrom(implementation) + } + + dependencies { + java20Implementation sourceSets.main.output + } + + compileJava20Java { + targetCompatibility = JavaVersion.VERSION_20 + options.compilerArgs += ['--add-modules', 'jdk.incubator.vector'] + options.compilerArgs -= '-Werror' // use of incubator modules is reported as a warning + } + + jar { + metaInf { + into 'versions/20' + from sourceSets.java20.output + } + manifest.attributes('Multi-Release': 'true') + } + + tasks.withType(Test).configureEach { + // Relying on the convention for Test.classpath in custom Test tasks has been deprecated + // and scheduled to be removed in Gradle 9.0. Below lines are added from the migration guide: + // https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#test_task_default_classpath + testClassesDirs = testing.suites.test.sources.output.classesDirs + classpath = testing.suites.test.sources.runtimeClasspath + + // Adds the multi-release JAR to the classpath when executing tests. + // This allows newer sources to be picked up at test runtime (if supported). + classpath += files(jar.archiveFile) + // Removes the "main" sources from the classpath to avoid JarHell problems as + // the multi-release JAR already contains those classes. + classpath -= sourceSets.main.output + } + + tasks.register('roundableSimdTest', Test) { + group 'verification' + include '**/RoundableTests.class' + systemProperty 'opensearch.experimental.feature.simd.rounding.enabled', 'forced' + } + + check.dependsOn(roundableSimdTest) + + forbiddenApisJava20 { + failOnMissingClasses = false + ignoreSignaturesOfMissingClasses = true + } +} diff --git a/libs/common/src/main/java/org/opensearch/common/network/InetAddresses.java b/libs/common/src/main/java/org/opensearch/common/network/InetAddresses.java index 0f289c09bbae2..60c0717a28f05 100644 --- a/libs/common/src/main/java/org/opensearch/common/network/InetAddresses.java +++ b/libs/common/src/main/java/org/opensearch/common/network/InetAddresses.java @@ -52,7 +52,7 @@ public static boolean isInetAddress(String ipString) { return ipStringToBytes(ipString) != null; } - private static byte[] ipStringToBytes(String ipString) { + public static byte[] ipStringToBytes(String ipString) { // Make a first pass to categorize the characters in this string. boolean hasColon = false; boolean hasDot = false; diff --git a/libs/common/src/main/java20/org/opensearch/common/round/BtreeSearcher.java b/libs/common/src/main/java20/org/opensearch/common/round/BtreeSearcher.java new file mode 100644 index 0000000000000..626fb6e6b810e --- /dev/null +++ b/libs/common/src/main/java20/org/opensearch/common/round/BtreeSearcher.java @@ -0,0 +1,100 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.round; + +import org.opensearch.common.annotation.InternalApi; + +import jdk.incubator.vector.LongVector; +import jdk.incubator.vector.Vector; +import jdk.incubator.vector.VectorOperators; +import jdk.incubator.vector.VectorSpecies; + +/** + * It uses vectorized B-tree search to find the round-down point. + * + * @opensearch.internal + */ +@InternalApi +class BtreeSearcher implements Roundable { + private static final VectorSpecies<Long> LONG_VECTOR_SPECIES = LongVector.SPECIES_PREFERRED; + private static final int LANES = LONG_VECTOR_SPECIES.length(); + private static final int SHIFT = log2(LANES); + + private final long[] values; + private final long minValue; + + BtreeSearcher(long[] values, int size) { + if (size <= 0) { + throw new IllegalArgumentException("at least one value must be present"); + } + + int blocks = (size + LANES - 1) / LANES; // number of blocks + int length = 1 + blocks * LANES; // size of the backing array (1-indexed) + + this.minValue = values[0]; + this.values = new long[length]; + build(values, 0, size, this.values, 1); + } + + /** + * Builds the B-tree memory layout. + * It builds the tree recursively, following an in-order traversal. + * + * <p> + * Each block stores 'lanes' values at indices {@code i, i + 1, ..., i + lanes - 1} where {@code i} is the + * starting offset. The starting offset of the root block is 1. The branching factor is (1 + lanes) so each + * block can have these many children. Given the starting offset {@code i} of a block, the starting offset + * of its k-th child (ranging from {@code 0, 1, ..., k}) can be computed as {@code i + ((i + k) << shift)}. + * + * @param src is the sorted input array + * @param i is the index in the input array to read the value from + * @param size the number of values in the input array + * @param dst is the output array + * @param j is the index in the output array to write the value to + * @return the next index 'i' + */ + private static int build(long[] src, int i, int size, long[] dst, int j) { + if (j < dst.length) { + for (int k = 0; k < LANES; k++) { + i = build(src, i, size, dst, j + ((j + k) << SHIFT)); + + // Fills the B-tree as a complete tree, i.e., all levels are completely filled, + // except the last level which is filled from left to right. + // The trick is to fill the destination array between indices 1...size (inclusive / 1-indexed) + // and pad the remaining array with +infinity. + dst[j + k] = (j + k <= size) ? src[i++] : Long.MAX_VALUE; + } + i = build(src, i, size, dst, j + ((j + LANES) << SHIFT)); + } + return i; + } + + @Override + public long floor(long key) { + Vector<Long> keyVector = LongVector.broadcast(LONG_VECTOR_SPECIES, key); + int i = 1, result = 1; + + while (i < values.length) { + Vector<Long> valuesVector = LongVector.fromArray(LONG_VECTOR_SPECIES, values, i); + int j = i + valuesVector.compare(VectorOperators.GT, keyVector).firstTrue(); + result = (j > i) ? j : result; + i += (j << SHIFT); + } + + assert result > 1 : "key must be greater than or equal to " + minValue; + return values[result - 1]; + } + + private static int log2(int num) { + if ((num <= 0) || ((num & (num - 1)) != 0)) { + throw new IllegalArgumentException(num + " is not a positive power of 2"); + } + return 32 - Integer.numberOfLeadingZeros(num - 1); + } +} diff --git a/libs/common/src/main/java20/org/opensearch/common/round/RoundableFactory.java b/libs/common/src/main/java20/org/opensearch/common/round/RoundableFactory.java new file mode 100644 index 0000000000000..0709ed4374227 --- /dev/null +++ b/libs/common/src/main/java20/org/opensearch/common/round/RoundableFactory.java @@ -0,0 +1,75 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.round; + +import org.opensearch.common.annotation.InternalApi; + +/** + * Factory class to create and return the fastest implementation of {@link Roundable}. + * + * @opensearch.internal + */ +@InternalApi +public final class RoundableFactory { + /** + * The maximum limit up to which linear search is used, otherwise binary or B-tree search is used. + * This is because linear search is much faster on small arrays. + * Benchmark results: <a href="https://github.com/opensearch-project/OpenSearch/pull/9727">PR #9727</a> + */ + private static final int LINEAR_SEARCH_MAX_SIZE = 64; + + /** + * Indicates whether the vectorized (SIMD) B-tree search implementation is to be used. + * It is true when either: + * 1. The feature flag is set to "forced", or + * 2. The platform has a minimum of 4 long vector lanes and the feature flag is set to "true". + */ + private static final boolean USE_BTREE_SEARCHER; + + /** + * This class is initialized only when: + * - JDK-20+ + * - jdk.incubator.vector.LongVector is available (--add-modules=jdk.incubator.vector is passed) + */ + private static final class VectorCheck { + final static int SPECIES_PREFERRED = jdk.incubator.vector.LongVector.SPECIES_PREFERRED.length(); + } + + static { + String simdRoundingFeatureFlag = System.getProperty("opensearch.experimental.feature.simd.rounding.enabled"); + boolean useBtreeSearcher = false; + + try { + final Class<?> incubator = Class.forName("jdk.incubator.vector.LongVector"); + + useBtreeSearcher = "forced".equalsIgnoreCase(simdRoundingFeatureFlag) + || (VectorCheck.SPECIES_PREFERRED >= 4 && "true".equalsIgnoreCase(simdRoundingFeatureFlag)); + + } catch (final ClassNotFoundException ex) { + /* do not use BtreeSearcher */ + } + + USE_BTREE_SEARCHER = useBtreeSearcher; + } + + private RoundableFactory() {} + + /** + * Creates and returns the fastest implementation of {@link Roundable}. + */ + public static Roundable create(long[] values, int size) { + if (size <= LINEAR_SEARCH_MAX_SIZE) { + return new BidirectionalLinearSearcher(values, size); + } else if (USE_BTREE_SEARCHER) { + return new BtreeSearcher(values, size); + } else { + return new BinarySearcher(values, size); + } + } +} diff --git a/libs/common/src/test/java/org/opensearch/common/annotation/processor/CompilerSupport.java b/libs/common/src/test/java/org/opensearch/common/annotation/processor/CompilerSupport.java index dcf8dd7945012..c8fdb3333a714 100644 --- a/libs/common/src/test/java/org/opensearch/common/annotation/processor/CompilerSupport.java +++ b/libs/common/src/test/java/org/opensearch/common/annotation/processor/CompilerSupport.java @@ -43,6 +43,7 @@ default CompilerResult compile(String name, String... names) { return compileWithPackage(ApiAnnotationProcessorTests.class.getPackageName(), name, names); } + @SuppressWarnings("removal") default CompilerResult compileWithPackage(String pck, String name, String... names) { final JavaCompiler compiler = ToolProvider.getSystemJavaCompiler(); final DiagnosticCollector<JavaFileObject> collector = new DiagnosticCollector<>(); diff --git a/libs/common/src/test/java/org/opensearch/common/round/RoundableTests.java b/libs/common/src/test/java/org/opensearch/common/round/RoundableTests.java index ae9f629c59024..ad19f456b0df4 100644 --- a/libs/common/src/test/java/org/opensearch/common/round/RoundableTests.java +++ b/libs/common/src/test/java/org/opensearch/common/round/RoundableTests.java @@ -12,15 +12,31 @@ public class RoundableTests extends OpenSearchTestCase { - public void testFloor() { - int size = randomIntBetween(1, 256); - long[] values = new long[size]; - for (int i = 1; i < values.length; i++) { - values[i] = values[i - 1] + (randomNonNegativeLong() % 200) + 1; - } + public void testRoundingEmptyArray() { + Throwable throwable = assertThrows(IllegalArgumentException.class, () -> RoundableFactory.create(new long[0], 0)); + assertEquals("at least one value must be present", throwable.getMessage()); + } + + public void testRoundingSmallArray() { + int size = randomIntBetween(1, 64); + long[] values = randomArrayOfSortedValues(size); + Roundable roundable = RoundableFactory.create(values, size); + + assertEquals("BidirectionalLinearSearcher", roundable.getClass().getSimpleName()); + assertRounding(roundable, values, size); + } - Roundable[] impls = { new BinarySearcher(values, size), new BidirectionalLinearSearcher(values, size) }; + public void testRoundingLargeArray() { + int size = randomIntBetween(65, 256); + long[] values = randomArrayOfSortedValues(size); + Roundable roundable = RoundableFactory.create(values, size); + boolean useBtreeSearcher = "forced".equalsIgnoreCase(System.getProperty("opensearch.experimental.feature.simd.rounding.enabled")); + assertEquals(useBtreeSearcher ? "BtreeSearcher" : "BinarySearcher", roundable.getClass().getSimpleName()); + assertRounding(roundable, values, size); + } + + private void assertRounding(Roundable roundable, long[] values, int size) { for (int i = 0; i < 100000; i++) { // Index of the expected round-down point. int idx = randomIntBetween(0, size - 1); @@ -35,23 +51,21 @@ public void testFloor() { // round-down point, which will still floor to the same value. long key = expected + (randomNonNegativeLong() % delta); - for (Roundable roundable : impls) { - assertEquals(expected, roundable.floor(key)); - } + assertEquals(expected, roundable.floor(key)); } + + Throwable throwable = assertThrows(AssertionError.class, () -> roundable.floor(values[0] - 1)); + assertEquals("key must be greater than or equal to " + values[0], throwable.getMessage()); } - public void testFailureCases() { - Throwable throwable; + private static long[] randomArrayOfSortedValues(int size) { + int capacity = size + randomInt(20); // May be slightly more than the size. + long[] values = new long[capacity]; - throwable = assertThrows(IllegalArgumentException.class, () -> new BinarySearcher(new long[0], 0)); - assertEquals("at least one value must be present", throwable.getMessage()); - throwable = assertThrows(IllegalArgumentException.class, () -> new BidirectionalLinearSearcher(new long[0], 0)); - assertEquals("at least one value must be present", throwable.getMessage()); + for (int i = 1; i < size; i++) { + values[i] = values[i - 1] + (randomNonNegativeLong() % 200) + 1; + } - throwable = assertThrows(AssertionError.class, () -> new BinarySearcher(new long[] { 100 }, 1).floor(50)); - assertEquals("key must be greater than or equal to 100", throwable.getMessage()); - throwable = assertThrows(AssertionError.class, () -> new BidirectionalLinearSearcher(new long[] { 100 }, 1).floor(50)); - assertEquals("key must be greater than or equal to 100", throwable.getMessage()); + return values; } } diff --git a/libs/core/licenses/jackson-core-2.16.1.jar.sha1 b/libs/core/licenses/jackson-core-2.16.1.jar.sha1 deleted file mode 100644 index 908d071b34a2a..0000000000000 --- a/libs/core/licenses/jackson-core-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9456bb3cdd0f79f91a5f730a1b1bb041a380c91f \ No newline at end of file diff --git a/libs/core/licenses/jackson-core-2.17.0.jar.sha1 b/libs/core/licenses/jackson-core-2.17.0.jar.sha1 new file mode 100644 index 0000000000000..9b906dbda1656 --- /dev/null +++ b/libs/core/licenses/jackson-core-2.17.0.jar.sha1 @@ -0,0 +1 @@ +a6e5058ef9720623c517252d17162f845306ff3a \ No newline at end of file diff --git a/libs/core/licenses/lucene-core-9.11.0-snapshot-8a555eb.jar.sha1 b/libs/core/licenses/lucene-core-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..25beb34873c0c --- /dev/null +++ b/libs/core/licenses/lucene-core-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +fe1cf5663be8bdb6aa757fd4101bc551684c90fb \ No newline at end of file diff --git a/libs/core/licenses/lucene-core-9.9.1.jar.sha1 b/libs/core/licenses/lucene-core-9.9.1.jar.sha1 deleted file mode 100644 index ae596196d9e6a..0000000000000 --- a/libs/core/licenses/lucene-core-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -55249fa9a0ed321adcf8283c6f3b649a6812b0a9 \ No newline at end of file diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java index 6a92993f5dd42..66ba446d4fc54 100644 --- a/libs/core/src/main/java/org/opensearch/Version.java +++ b/libs/core/src/main/java/org/opensearch/Version.java @@ -98,8 +98,10 @@ public class Version implements Comparable<Version>, ToXContentFragment { public static final Version V_2_11_0 = new Version(2110099, org.apache.lucene.util.Version.LUCENE_9_7_0); public static final Version V_2_11_1 = new Version(2110199, org.apache.lucene.util.Version.LUCENE_9_7_0); public static final Version V_2_11_2 = new Version(2110299, org.apache.lucene.util.Version.LUCENE_9_7_0); - public static final Version V_2_12_0 = new Version(2120099, org.apache.lucene.util.Version.LUCENE_9_9_1); - public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_9_1); + public static final Version V_2_12_0 = new Version(2120099, org.apache.lucene.util.Version.LUCENE_9_9_2); + public static final Version V_2_12_1 = new Version(2120199, org.apache.lucene.util.Version.LUCENE_9_9_2); + public static final Version V_2_13_0 = new Version(2130099, org.apache.lucene.util.Version.LUCENE_9_10_0); + public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_11_0); public static final Version CURRENT = V_3_0_0; public static Version fromId(int id) { diff --git a/libs/core/src/main/java/org/opensearch/core/common/bytes/BytesReference.java b/libs/core/src/main/java/org/opensearch/core/common/bytes/BytesReference.java index 8cb65c9feb1ca..6b60e7448cd03 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/bytes/BytesReference.java +++ b/libs/core/src/main/java/org/opensearch/core/common/bytes/BytesReference.java @@ -82,6 +82,11 @@ static byte[] toBytes(BytesReference reference) { return ArrayUtil.copyOfSubArray(bytesRef.bytes, bytesRef.offset, bytesRef.offset + bytesRef.length); } + static byte[] toBytesWithoutCompact(BytesReference reference) { + final BytesRef bytesRef = reference.toBytesRef(); + return bytesRef.bytes; + } + /** * Returns an array of byte buffers from the given BytesReference. */ diff --git a/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamInput.java b/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamInput.java index 3e996bdee83a2..ea23b3d81a775 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamInput.java +++ b/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamInput.java @@ -56,6 +56,7 @@ import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.semver.SemverRange; import java.io.ByteArrayInputStream; import java.io.EOFException; @@ -750,6 +751,8 @@ public Object readGenericValue() throws IOException { return readCollection(StreamInput::readGenericValue, HashSet::new, Collections.emptySet()); case 26: return readBigInteger(); + case 27: + return readSemverRange(); default: throw new IOException("Can't read unknown type [" + type + "]"); } @@ -1090,6 +1093,10 @@ public Version readVersion() throws IOException { return Version.fromId(readVInt()); } + public SemverRange readSemverRange() throws IOException { + return SemverRange.fromString(readString()); + } + /** Reads the {@link Version} from the input stream */ public Build readBuild() throws IOException { // the following is new for opensearch: we write the distribution to support any "forks" diff --git a/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamOutput.java b/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamOutput.java index 2d69e1c686df3..b7599265aece3 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamOutput.java +++ b/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamOutput.java @@ -54,6 +54,7 @@ import org.opensearch.core.common.settings.SecureString; import org.opensearch.core.common.text.Text; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.semver.SemverRange; import java.io.EOFException; import java.io.FileNotFoundException; @@ -784,6 +785,10 @@ public final void writeOptionalInstant(@Nullable Instant instant) throws IOExcep o.writeByte((byte) 26); o.writeString(v.toString()); }); + writers.put(SemverRange.class, (o, v) -> { + o.writeByte((byte) 27); + o.writeSemverRange((SemverRange) v); + }); WRITERS = Collections.unmodifiableMap(writers); } @@ -1101,6 +1106,10 @@ public void writeVersion(final Version version) throws IOException { writeVInt(version.id); } + public void writeSemverRange(final SemverRange range) throws IOException { + writeString(range.toString()); + } + /** Writes the OpenSearch {@link Build} informn to the output stream */ public void writeBuild(final Build build) throws IOException { // the following is new for opensearch: we write the distribution name to support any "forks" of the code diff --git a/libs/core/src/main/java/org/opensearch/semver/SemverRange.java b/libs/core/src/main/java/org/opensearch/semver/SemverRange.java new file mode 100644 index 0000000000000..da87acc7124aa --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/semver/SemverRange.java @@ -0,0 +1,170 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.semver; + +import org.opensearch.Version; +import org.opensearch.common.Nullable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.semver.expr.Caret; +import org.opensearch.semver.expr.Equal; +import org.opensearch.semver.expr.Expression; +import org.opensearch.semver.expr.Tilde; + +import java.io.IOException; +import java.util.Objects; +import java.util.Optional; + +import static java.util.Arrays.stream; + +/** + * Represents a single semver range that allows for specifying which {@code org.opensearch.Version}s satisfy the range. + * It is composed of a range version and a range operator. Following are the supported operators: + * <ul> + * <li>'=' Requires exact match with the range version. For example, =1.2.3 range would match only 1.2.3</li> + * <li>'~' Allows for patch version variability starting from the range version. For example, ~1.2.3 range would match versions greater than or equal to 1.2.3 but less than 1.3.0</li> + * <li>'^' Allows for patch and minor version variability starting from the range version. For example, ^1.2.3 range would match versions greater than or equal to 1.2.3 but less than 2.0.0</li> + * </ul> + */ +public class SemverRange implements ToXContentFragment { + + private final Version rangeVersion; + private final RangeOperator rangeOperator; + + public SemverRange(final Version rangeVersion, final RangeOperator rangeOperator) { + this.rangeVersion = rangeVersion; + this.rangeOperator = rangeOperator; + } + + /** + * Constructs a {@code SemverRange} from its string representation. + * @param range given range + * @return a {@code SemverRange} + */ + public static SemverRange fromString(final String range) { + RangeOperator rangeOperator = RangeOperator.fromRange(range); + String version = range.replaceFirst(rangeOperator.asEscapedString(), ""); + if (!Version.stringHasLength(version)) { + throw new IllegalArgumentException("Version cannot be empty"); + } + return new SemverRange(Version.fromString(version), rangeOperator); + } + + /** + * Return the range operator for this range. + * @return range operator + */ + public RangeOperator getRangeOperator() { + return rangeOperator; + } + + /** + * Return the version for this range. + * @return the range version + */ + public Version getRangeVersion() { + return rangeVersion; + } + + /** + * Check if range is satisfied by given version string. + * + * @param versionToEvaluate version to check + * @return {@code true} if range is satisfied by version, {@code false} otherwise + */ + public boolean isSatisfiedBy(final String versionToEvaluate) { + return isSatisfiedBy(Version.fromString(versionToEvaluate)); + } + + /** + * Check if range is satisfied by given version. + * + * @param versionToEvaluate version to check + * @return {@code true} if range is satisfied by version, {@code false} otherwise + * @see #isSatisfiedBy(String) + */ + public boolean isSatisfiedBy(final Version versionToEvaluate) { + return this.rangeOperator.expression.evaluate(this.rangeVersion, versionToEvaluate); + } + + @Override + public boolean equals(@Nullable final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + SemverRange range = (SemverRange) o; + return Objects.equals(rangeVersion, range.rangeVersion) && rangeOperator == range.rangeOperator; + } + + @Override + public int hashCode() { + return Objects.hash(rangeVersion, rangeOperator); + } + + @Override + public String toString() { + return rangeOperator.asString() + rangeVersion; + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + return builder.value(toString()); + } + + /** + * A range operator. + */ + public enum RangeOperator { + + EQ("=", new Equal()), + TILDE("~", new Tilde()), + CARET("^", new Caret()), + DEFAULT("", new Equal()); + + private final String operator; + private final Expression expression; + + RangeOperator(final String operator, final Expression expression) { + this.operator = operator; + this.expression = expression; + } + + /** + * String representation of the range operator. + * + * @return range operator as string + */ + public String asString() { + return operator; + } + + /** + * Escaped string representation of the range operator, + * if operator is a regex character. + * + * @return range operator as escaped string, if operator is a regex character + */ + public String asEscapedString() { + if (Objects.equals(operator, "^")) { + return "\\^"; + } + return operator; + } + + public static RangeOperator fromRange(final String range) { + Optional<RangeOperator> rangeOperator = stream(values()).filter( + operator -> operator != DEFAULT && range.startsWith(operator.asString()) + ).findFirst(); + return rangeOperator.orElse(DEFAULT); + } + } +} diff --git a/libs/core/src/main/java/org/opensearch/semver/expr/Caret.java b/libs/core/src/main/java/org/opensearch/semver/expr/Caret.java new file mode 100644 index 0000000000000..ce2b74dde0865 --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/semver/expr/Caret.java @@ -0,0 +1,32 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.semver.expr; + +import org.opensearch.Version; + +/** + * Expression to evaluate version compatibility allowing for minor and patch version variability. + */ +public class Caret implements Expression { + + /** + * Checks if the given version is compatible with the range version allowing for minor and + * patch version variability. + * Allows all versions starting from the rangeVersion upto next major version (exclusive). + * @param rangeVersion the version specified in range + * @param versionToEvaluate the version to evaluate + * @return {@code true} if the versions are compatible {@code false} otherwise + */ + @Override + public boolean evaluate(final Version rangeVersion, final Version versionToEvaluate) { + Version lower = rangeVersion; + Version upper = Version.fromString((rangeVersion.major + 1) + ".0.0"); + return versionToEvaluate.onOrAfter(lower) && versionToEvaluate.before(upper); + } +} diff --git a/libs/core/src/main/java/org/opensearch/semver/expr/Equal.java b/libs/core/src/main/java/org/opensearch/semver/expr/Equal.java new file mode 100644 index 0000000000000..d3e1d63060b77 --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/semver/expr/Equal.java @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.semver.expr; + +import org.opensearch.Version; + +/** + * Expression to evaluate equality of versions. + */ +public class Equal implements Expression { + + /** + * Checks if a given version matches a certain range version. + * + * @param rangeVersion the version specified in range + * @param versionToEvaluate the version to evaluate + * @return {@code true} if the versions are equal {@code false} otherwise + */ + @Override + public boolean evaluate(final Version rangeVersion, final Version versionToEvaluate) { + return versionToEvaluate.equals(rangeVersion); + } +} diff --git a/libs/core/src/main/java/org/opensearch/semver/expr/Expression.java b/libs/core/src/main/java/org/opensearch/semver/expr/Expression.java new file mode 100644 index 0000000000000..68bb4e249836a --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/semver/expr/Expression.java @@ -0,0 +1,26 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.semver.expr; + +import org.opensearch.Version; + +/** + * An evaluation expression. + */ +public interface Expression { + + /** + * Evaluates an expression. + * + * @param rangeVersion the version specified in range + * @param versionToEvaluate the version to evaluate + * @return the result of the expression evaluation + */ + boolean evaluate(final Version rangeVersion, final Version versionToEvaluate); +} diff --git a/libs/core/src/main/java/org/opensearch/semver/expr/Tilde.java b/libs/core/src/main/java/org/opensearch/semver/expr/Tilde.java new file mode 100644 index 0000000000000..5f62ffe62ddeb --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/semver/expr/Tilde.java @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.semver.expr; + +import org.opensearch.Version; + +/** + * Expression to evaluate version compatibility allowing patch version variability. + */ +public class Tilde implements Expression { + + /** + * Checks if the given version is compatible with a range version allowing for patch version variability. + * Allows all versions starting from the rangeVersion upto next minor version (exclusive). + * @param rangeVersion the version specified in range + * @param versionToEvaluate the version to evaluate + * @return {@code true} if the versions are compatible {@code false} otherwise + */ + @Override + public boolean evaluate(final Version rangeVersion, final Version versionToEvaluate) { + Version lower = rangeVersion; + Version upper = Version.fromString(rangeVersion.major + "." + (rangeVersion.minor + 1) + "." + 0); + return versionToEvaluate.onOrAfter(lower) && versionToEvaluate.before(upper); + } +} diff --git a/libs/core/src/main/java/org/opensearch/semver/expr/package-info.java b/libs/core/src/main/java/org/opensearch/semver/expr/package-info.java new file mode 100644 index 0000000000000..06cf9feaaaf8f --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/semver/expr/package-info.java @@ -0,0 +1,9 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +/** Expressions library module */ +package org.opensearch.semver.expr; diff --git a/server/src/main/java/org/opensearch/common/cache/tier/package-info.java b/libs/core/src/main/java/org/opensearch/semver/package-info.java similarity index 70% rename from server/src/main/java/org/opensearch/common/cache/tier/package-info.java rename to libs/core/src/main/java/org/opensearch/semver/package-info.java index 7ad81dbe3073c..ada935582d408 100644 --- a/server/src/main/java/org/opensearch/common/cache/tier/package-info.java +++ b/libs/core/src/main/java/org/opensearch/semver/package-info.java @@ -6,5 +6,5 @@ * compatible open source license. */ -/** Base package for cache tier support. */ -package org.opensearch.common.cache.tier; +/** Semver library module */ +package org.opensearch.semver; diff --git a/libs/core/src/test/java/org/opensearch/semver/SemverRangeTests.java b/libs/core/src/test/java/org/opensearch/semver/SemverRangeTests.java new file mode 100644 index 0000000000000..af1d95b2561b7 --- /dev/null +++ b/libs/core/src/test/java/org/opensearch/semver/SemverRangeTests.java @@ -0,0 +1,105 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.semver; + +import org.opensearch.test.OpenSearchTestCase; + +public class SemverRangeTests extends OpenSearchTestCase { + + public void testRangeWithEqualsOperator() { + SemverRange range = SemverRange.fromString("=1.2.3"); + assertEquals(range.getRangeOperator(), SemverRange.RangeOperator.EQ); + assertTrue(range.isSatisfiedBy("1.2.3")); + assertFalse(range.isSatisfiedBy("1.2.4")); + assertFalse(range.isSatisfiedBy("1.3.3")); + assertFalse(range.isSatisfiedBy("2.2.3")); + } + + public void testRangeWithDefaultOperator() { + SemverRange range = SemverRange.fromString("1.2.3"); + assertEquals(range.getRangeOperator(), SemverRange.RangeOperator.DEFAULT); + assertTrue(range.isSatisfiedBy("1.2.3")); + assertFalse(range.isSatisfiedBy("1.2.4")); + assertFalse(range.isSatisfiedBy("1.3.3")); + assertFalse(range.isSatisfiedBy("2.2.3")); + } + + public void testRangeWithTildeOperator() { + SemverRange range = SemverRange.fromString("~2.3.4"); + assertEquals(range.getRangeOperator(), SemverRange.RangeOperator.TILDE); + assertTrue(range.isSatisfiedBy("2.3.4")); + assertTrue(range.isSatisfiedBy("2.3.5")); + assertTrue(range.isSatisfiedBy("2.3.12")); + + assertFalse(range.isSatisfiedBy("2.3.0")); + assertFalse(range.isSatisfiedBy("2.3.3")); + assertFalse(range.isSatisfiedBy("2.4.0")); + assertFalse(range.isSatisfiedBy("3.0.0")); + } + + public void testRangeWithCaretOperator() { + SemverRange range = SemverRange.fromString("^2.3.4"); + assertEquals(range.getRangeOperator(), SemverRange.RangeOperator.CARET); + assertTrue(range.isSatisfiedBy("2.3.4")); + assertTrue(range.isSatisfiedBy("2.3.5")); + assertTrue(range.isSatisfiedBy("2.4.12")); + + assertFalse(range.isSatisfiedBy("2.3.3")); + assertFalse(range.isSatisfiedBy("3.0.0")); + } + + public void testInvalidRanges() { + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("")); + assertEquals("Version cannot be empty", ex.getMessage()); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("1")); + assertTrue(ex.getMessage().contains("the version needs to contain major, minor, and revision, and optionally the build")); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("1.2")); + assertTrue(ex.getMessage().contains("the version needs to contain major, minor, and revision, and optionally the build")); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("=")); + assertEquals("Version cannot be empty", ex.getMessage()); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("=1")); + assertTrue(ex.getMessage().contains("the version needs to contain major, minor, and revision, and optionally the build")); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("=1.2")); + assertTrue(ex.getMessage().contains("the version needs to contain major, minor, and revision, and optionally the build")); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("~")); + assertEquals("Version cannot be empty", ex.getMessage()); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("~1")); + assertTrue(ex.getMessage().contains("the version needs to contain major, minor, and revision, and optionally the build")); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("~1.2")); + assertTrue(ex.getMessage().contains("the version needs to contain major, minor, and revision, and optionally the build")); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("^")); + assertEquals("Version cannot be empty", ex.getMessage()); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("^1")); + assertTrue(ex.getMessage().contains("the version needs to contain major, minor, and revision, and optionally the build")); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("^1.2")); + assertTrue(ex.getMessage().contains("the version needs to contain major, minor, and revision, and optionally the build")); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("$")); + assertTrue(ex.getMessage().contains("the version needs to contain major, minor, and revision, and optionally the build")); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("$1")); + assertTrue(ex.getMessage().contains("the version needs to contain major, minor, and revision, and optionally the build")); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("$1.2")); + assertTrue(ex.getMessage().contains("the version needs to contain major, minor, and revision, and optionally the build")); + + expectThrows(NumberFormatException.class, () -> SemverRange.fromString("$1.2.3")); + } +} diff --git a/libs/core/src/test/java/org/opensearch/semver/expr/CaretTests.java b/libs/core/src/test/java/org/opensearch/semver/expr/CaretTests.java new file mode 100644 index 0000000000000..3cb168d42cda0 --- /dev/null +++ b/libs/core/src/test/java/org/opensearch/semver/expr/CaretTests.java @@ -0,0 +1,30 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.semver.expr; + +import org.opensearch.Version; +import org.opensearch.test.OpenSearchTestCase; + +public class CaretTests extends OpenSearchTestCase { + + public void testMinorAndPatchVersionVariability() { + Caret caretExpr = new Caret(); + Version rangeVersion = Version.fromString("1.2.3"); + + // Compatible versions + assertTrue(caretExpr.evaluate(rangeVersion, Version.fromString("1.2.3"))); + assertTrue(caretExpr.evaluate(rangeVersion, Version.fromString("1.2.4"))); + assertTrue(caretExpr.evaluate(rangeVersion, Version.fromString("1.3.3"))); + assertTrue(caretExpr.evaluate(rangeVersion, Version.fromString("1.9.9"))); + + // Incompatible versions + assertFalse(caretExpr.evaluate(rangeVersion, Version.fromString("1.2.2"))); + assertFalse(caretExpr.evaluate(rangeVersion, Version.fromString("2.0.0"))); + } +} diff --git a/libs/core/src/test/java/org/opensearch/semver/expr/EqualTests.java b/libs/core/src/test/java/org/opensearch/semver/expr/EqualTests.java new file mode 100644 index 0000000000000..fb090865157ed --- /dev/null +++ b/libs/core/src/test/java/org/opensearch/semver/expr/EqualTests.java @@ -0,0 +1,22 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.semver.expr; + +import org.opensearch.Version; +import org.opensearch.test.OpenSearchTestCase; + +public class EqualTests extends OpenSearchTestCase { + + public void testEquality() { + Equal equalExpr = new Equal(); + Version rangeVersion = Version.fromString("1.2.3"); + assertTrue(equalExpr.evaluate(rangeVersion, Version.fromString("1.2.3"))); + assertFalse(equalExpr.evaluate(rangeVersion, Version.fromString("1.2.4"))); + } +} diff --git a/libs/core/src/test/java/org/opensearch/semver/expr/TildeTests.java b/libs/core/src/test/java/org/opensearch/semver/expr/TildeTests.java new file mode 100644 index 0000000000000..8666611645c3a --- /dev/null +++ b/libs/core/src/test/java/org/opensearch/semver/expr/TildeTests.java @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.semver.expr; + +import org.opensearch.Version; +import org.opensearch.test.OpenSearchTestCase; + +public class TildeTests extends OpenSearchTestCase { + + public void testPatchVersionVariability() { + Tilde tildeExpr = new Tilde(); + Version rangeVersion = Version.fromString("1.2.3"); + + assertTrue(tildeExpr.evaluate(rangeVersion, Version.fromString("1.2.3"))); + assertTrue(tildeExpr.evaluate(rangeVersion, Version.fromString("1.2.4"))); + assertTrue(tildeExpr.evaluate(rangeVersion, Version.fromString("1.2.9"))); + + assertFalse(tildeExpr.evaluate(rangeVersion, Version.fromString("1.2.0"))); + assertFalse(tildeExpr.evaluate(rangeVersion, Version.fromString("1.2.2"))); + assertFalse(tildeExpr.evaluate(rangeVersion, Version.fromString("1.3.0"))); + assertFalse(tildeExpr.evaluate(rangeVersion, Version.fromString("2.0.0"))); + } +} diff --git a/libs/nio/src/main/java/org/opensearch/nio/ServerChannelContext.java b/libs/nio/src/main/java/org/opensearch/nio/ServerChannelContext.java index 898ce7e4e913b..ab48cc2357e7f 100644 --- a/libs/nio/src/main/java/org/opensearch/nio/ServerChannelContext.java +++ b/libs/nio/src/main/java/org/opensearch/nio/ServerChannelContext.java @@ -129,6 +129,7 @@ private void configureSocket(ServerSocket socket) throws IOException { socket.setReuseAddress(config.tcpReuseAddress()); } + @SuppressWarnings("removal") protected static SocketChannel accept(ServerSocketChannel serverSocketChannel) throws IOException { try { assert serverSocketChannel.isBlocking() == false; diff --git a/libs/nio/src/main/java/org/opensearch/nio/SocketChannelContext.java b/libs/nio/src/main/java/org/opensearch/nio/SocketChannelContext.java index 3df8e42fe4f14..530aa1d86afc7 100644 --- a/libs/nio/src/main/java/org/opensearch/nio/SocketChannelContext.java +++ b/libs/nio/src/main/java/org/opensearch/nio/SocketChannelContext.java @@ -388,6 +388,7 @@ private void configureSocket(Socket socket, boolean isConnectComplete) throws IO } } + @SuppressWarnings("removal") private static void connect(SocketChannel socketChannel, InetSocketAddress remoteAddress) throws IOException { try { AccessController.doPrivileged((PrivilegedExceptionAction<Boolean>) () -> socketChannel.connect(remoteAddress)); diff --git a/libs/plugin-classloader/src/main/java/org/opensearch/plugins/ExtendedPluginsClassLoader.java b/libs/plugin-classloader/src/main/java/org/opensearch/plugins/ExtendedPluginsClassLoader.java index 4a200a5dfa9bd..969fa91b50538 100644 --- a/libs/plugin-classloader/src/main/java/org/opensearch/plugins/ExtendedPluginsClassLoader.java +++ b/libs/plugin-classloader/src/main/java/org/opensearch/plugins/ExtendedPluginsClassLoader.java @@ -65,6 +65,7 @@ protected Class<?> findClass(String name) throws ClassNotFoundException { /** * Return a new classloader across the parent and extended loaders. */ + @SuppressWarnings("removal") public static ExtendedPluginsClassLoader create(ClassLoader parent, List<ClassLoader> extendedLoaders) { return AccessController.doPrivileged( (PrivilegedAction<ExtendedPluginsClassLoader>) () -> new ExtendedPluginsClassLoader(parent, extendedLoaders) diff --git a/libs/secure-sm/src/main/java/org/opensearch/secure_sm/SecureSM.java b/libs/secure-sm/src/main/java/org/opensearch/secure_sm/SecureSM.java index f41c49844997d..a2531f4a9156e 100644 --- a/libs/secure-sm/src/main/java/org/opensearch/secure_sm/SecureSM.java +++ b/libs/secure-sm/src/main/java/org/opensearch/secure_sm/SecureSM.java @@ -76,6 +76,7 @@ * @see <a href="http://cs.oswego.edu/pipermail/concurrency-interest/2009-August/006508.html"> * http://cs.oswego.edu/pipermail/concurrency-interest/2009-August/006508.html</a> */ +@SuppressWarnings("removal") public class SecureSM extends SecurityManager { private final String[] classesThatCanExit; diff --git a/libs/secure-sm/src/main/java/org/opensearch/secure_sm/SecuredForkJoinWorkerThreadFactory.java b/libs/secure-sm/src/main/java/org/opensearch/secure_sm/SecuredForkJoinWorkerThreadFactory.java index fe239fea8129e..3c8e78a902fcb 100644 --- a/libs/secure-sm/src/main/java/org/opensearch/secure_sm/SecuredForkJoinWorkerThreadFactory.java +++ b/libs/secure-sm/src/main/java/org/opensearch/secure_sm/SecuredForkJoinWorkerThreadFactory.java @@ -18,6 +18,7 @@ import java.util.concurrent.ForkJoinPool.ForkJoinWorkerThreadFactory; import java.util.concurrent.ForkJoinWorkerThread; +@SuppressWarnings("removal") public class SecuredForkJoinWorkerThreadFactory implements ForkJoinWorkerThreadFactory { static AccessControlContext contextWithPermissions(Permission... perms) { Permissions permissions = new Permissions(); diff --git a/libs/secure-sm/src/test/java/org/opensearch/secure_sm/SecureSMTests.java b/libs/secure-sm/src/test/java/org/opensearch/secure_sm/SecureSMTests.java index 026ffb080ee61..fd666c70cfebb 100644 --- a/libs/secure-sm/src/test/java/org/opensearch/secure_sm/SecureSMTests.java +++ b/libs/secure-sm/src/test/java/org/opensearch/secure_sm/SecureSMTests.java @@ -41,6 +41,7 @@ import junit.framework.TestCase; /** Simple tests for SecureSM */ +@SuppressWarnings("removal") public class SecureSMTests extends TestCase { static { // install a mock security policy: diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistry.java index d57def9406b17..c861c21f89fc5 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistry.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistry.java @@ -8,7 +8,11 @@ package org.opensearch.telemetry.metrics; +import org.opensearch.telemetry.metrics.tags.Tags; + +import java.io.Closeable; import java.io.IOException; +import java.util.function.Supplier; /** * Default implementation for {@link MetricsRegistry} @@ -34,6 +38,16 @@ public Counter createUpDownCounter(String name, String description, String unit) return metricsTelemetry.createUpDownCounter(name, description, unit); } + @Override + public Histogram createHistogram(String name, String description, String unit) { + return metricsTelemetry.createHistogram(name, description, unit); + } + + @Override + public Closeable createGauge(String name, String description, String unit, Supplier<Double> valueProvider, Tags tags) { + return metricsTelemetry.createGauge(name, description, unit, valueProvider, tags); + } + @Override public void close() throws IOException { metricsTelemetry.close(); diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/Histogram.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/Histogram.java new file mode 100644 index 0000000000000..95ada626e21ee --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/Histogram.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.telemetry.metrics.tags.Tags; + +/** + * Histogram records the value for an existing metric. + * {@opensearch.experimental} + */ +@ExperimentalApi +public interface Histogram { + + /** + * record value. + * @param value value to be added. + */ + void record(double value); + + /** + * record value along with the attributes. + * + * @param value value to be added. + * @param tags attributes/dimensions of the metric. + */ + void record(double value, Tags tags); + +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsRegistry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsRegistry.java index 61b3df089928b..3ab3dcf82c7a7 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsRegistry.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsRegistry.java @@ -9,8 +9,10 @@ package org.opensearch.telemetry.metrics; import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.telemetry.metrics.tags.Tags; import java.io.Closeable; +import java.util.function.Supplier; /** * MetricsRegistry helps in creating the metric instruments. @@ -36,4 +38,29 @@ public interface MetricsRegistry extends Closeable { * @return counter. */ Counter createUpDownCounter(String name, String description, String unit); + + /** + * Creates the histogram type of Metric. Implementation framework will take care + * of the bucketing strategy. + * + * @param name name of the histogram. + * @param description any description about the metric. + * @param unit unit of the metric. + * @return histogram. + */ + Histogram createHistogram(String name, String description, String unit); + + /** + * Creates the Observable Gauge type of Metric. Where the value provider will be called at a certain frequency + * to capture the value. + * + * @param name name of the observable gauge. + * @param description any description about the metric. + * @param unit unit of the metric. + * @param valueProvider value provider. + * @param tags attributes/dimensions of the metric. + * @return closeable to dispose/close the Gauge metric. + */ + Closeable createGauge(String name, String description, String unit, Supplier<Double> valueProvider, Tags tags); + } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopHistogram.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopHistogram.java new file mode 100644 index 0000000000000..20e72bccad899 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopHistogram.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics.noop; + +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.telemetry.metrics.Histogram; +import org.opensearch.telemetry.metrics.tags.Tags; + +/** + * No-op {@link Histogram} + * {@opensearch.internal} + */ +@InternalApi +public class NoopHistogram implements Histogram { + + /** + * No-op Histogram instance + */ + public final static NoopHistogram INSTANCE = new NoopHistogram(); + + private NoopHistogram() {} + + @Override + public void record(double value) { + + } + + @Override + public void record(double value, Tags tags) { + + } +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopMetricsRegistry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopMetricsRegistry.java index 640c6842a8960..9a913d25e872d 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopMetricsRegistry.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopMetricsRegistry.java @@ -10,9 +10,13 @@ import org.opensearch.common.annotation.InternalApi; import org.opensearch.telemetry.metrics.Counter; +import org.opensearch.telemetry.metrics.Histogram; import org.opensearch.telemetry.metrics.MetricsRegistry; +import org.opensearch.telemetry.metrics.tags.Tags; +import java.io.Closeable; import java.io.IOException; +import java.util.function.Supplier; /** *No-op {@link MetricsRegistry} @@ -38,6 +42,16 @@ public Counter createUpDownCounter(String name, String description, String unit) return NoopCounter.INSTANCE; } + @Override + public Histogram createHistogram(String name, String description, String unit) { + return NoopHistogram.INSTANCE; + } + + @Override + public Closeable createGauge(String name, String description, String unit, Supplier<Double> valueProvider, Tags tags) { + return () -> {}; + } + @Override public void close() throws IOException { diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanContext.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanContext.java index f9af611553aff..e5e62c795e5d0 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanContext.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanContext.java @@ -31,4 +31,19 @@ public SpanContext(Span span) { Span getSpan() { return span; } + + /** + * Sets the error for the current span behind this context + * @param cause error + */ + public void setError(final Exception cause) { + span.setError(cause); + } + + /** + * Ends current span + */ + public void endSpan() { + span.endSpan(); + } } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanCreationContext.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanCreationContext.java index cbbcfe7a85d57..6af7c440f8de9 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanCreationContext.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanCreationContext.java @@ -79,8 +79,8 @@ public SpanCreationContext attributes(Attributes attributes) { } /** - * Sets the parent for spann - * @param parent parent + * Sets the parent for span + * @param parent parent span context * @return spanCreationContext */ public SpanCreationContext parent(SpanContext parent) { diff --git a/libs/telemetry/src/test/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistryTests.java b/libs/telemetry/src/test/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistryTests.java index 6171641db5f07..872f697ade09e 100644 --- a/libs/telemetry/src/test/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistryTests.java +++ b/libs/telemetry/src/test/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistryTests.java @@ -8,8 +8,12 @@ package org.opensearch.telemetry.metrics; +import org.opensearch.telemetry.metrics.tags.Tags; import org.opensearch.test.OpenSearchTestCase; +import java.io.Closeable; +import java.util.function.Supplier; + import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -48,4 +52,31 @@ public void testUpDownCounter() { assertSame(mockCounter, counter); } + public void testHistogram() { + Histogram mockHistogram = mock(Histogram.class); + when(defaultMeterRegistry.createHistogram(any(String.class), any(String.class), any(String.class))).thenReturn(mockHistogram); + Histogram histogram = defaultMeterRegistry.createHistogram( + "org.opensearch.telemetry.metrics.DefaultMeterRegistryTests.testHistogram", + "test up-down counter", + "ms" + ); + assertSame(mockHistogram, histogram); + } + + @SuppressWarnings("unchecked") + public void testGauge() { + Closeable mockCloseable = mock(Closeable.class); + when( + defaultMeterRegistry.createGauge(any(String.class), any(String.class), any(String.class), any(Supplier.class), any(Tags.class)) + ).thenReturn(mockCloseable); + Closeable closeable = defaultMeterRegistry.createGauge( + "org.opensearch.telemetry.metrics.DefaultMeterRegistryTests.testObservableGauge", + "test observable gauge", + "ms", + () -> 1.0, + Tags.EMPTY + ); + assertSame(mockCloseable, closeable); + } + } diff --git a/libs/x-content/licenses/jackson-core-2.16.1.jar.sha1 b/libs/x-content/licenses/jackson-core-2.16.1.jar.sha1 deleted file mode 100644 index 908d071b34a2a..0000000000000 --- a/libs/x-content/licenses/jackson-core-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9456bb3cdd0f79f91a5f730a1b1bb041a380c91f \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-core-2.17.0.jar.sha1 b/libs/x-content/licenses/jackson-core-2.17.0.jar.sha1 new file mode 100644 index 0000000000000..9b906dbda1656 --- /dev/null +++ b/libs/x-content/licenses/jackson-core-2.17.0.jar.sha1 @@ -0,0 +1 @@ +a6e5058ef9720623c517252d17162f845306ff3a \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-cbor-2.16.1.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-cbor-2.16.1.jar.sha1 deleted file mode 100644 index b4b781f604910..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-cbor-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1be7098dccc079171464dca7e386bd8df623b031 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-cbor-2.17.0.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-cbor-2.17.0.jar.sha1 new file mode 100644 index 0000000000000..382e20d3d31c1 --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-cbor-2.17.0.jar.sha1 @@ -0,0 +1 @@ +6833c8573452d583e4af650a7424d547606b2501 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-smile-2.16.1.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-smile-2.16.1.jar.sha1 deleted file mode 100644 index ad91e748ebe94..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-smile-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c4ddbc5277670f2e56b1f5e44e83afa748bcb125 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-smile-2.17.0.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-smile-2.17.0.jar.sha1 new file mode 100644 index 0000000000000..d117479166d17 --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-smile-2.17.0.jar.sha1 @@ -0,0 +1 @@ +f10183857607fde789490d33ea46372a2d2b0c72 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-yaml-2.16.1.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-yaml-2.16.1.jar.sha1 deleted file mode 100644 index 9b30e7bf921b2..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-yaml-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8e4f1923d73cd55f2b4c0d56ee4ed80419297354 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-yaml-2.17.0.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-yaml-2.17.0.jar.sha1 new file mode 100644 index 0000000000000..35242eed9b212 --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-yaml-2.17.0.jar.sha1 @@ -0,0 +1 @@ +57a963c6258c49febc11390082d8503f71bb15a9 \ No newline at end of file diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentContraints.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentContraints.java index 4c05f0058f2ed..2f4dada29780d 100644 --- a/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentContraints.java +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentContraints.java @@ -19,6 +19,7 @@ */ @InternalApi public interface XContentContraints { + final String DEFAULT_CODEPOINT_LIMIT_PROPERTY = "opensearch.xcontent.codepoint.max"; final String DEFAULT_MAX_STRING_LEN_PROPERTY = "opensearch.xcontent.string.length.max"; final String DEFAULT_MAX_NAME_LEN_PROPERTY = "opensearch.xcontent.name.length.max"; final String DEFAULT_MAX_DEPTH_PROPERTY = "opensearch.xcontent.depth.max"; @@ -32,4 +33,6 @@ public interface XContentContraints { final int DEFAULT_MAX_DEPTH = Integer.parseInt( System.getProperty(DEFAULT_MAX_DEPTH_PROPERTY, "1000" /* StreamReadConstraints.DEFAULT_MAX_DEPTH */) ); + + final int DEFAULT_CODEPOINT_LIMIT = Integer.parseInt(System.getProperty(DEFAULT_CODEPOINT_LIMIT_PROPERTY, "52428800" /* ~50 Mb */)); } diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/yaml/YamlXContent.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/yaml/YamlXContent.java index 3f6a4b3aeead7..0e69c6c33b923 100644 --- a/libs/x-content/src/main/java/org/opensearch/common/xcontent/yaml/YamlXContent.java +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/yaml/YamlXContent.java @@ -38,6 +38,7 @@ import com.fasterxml.jackson.core.StreamReadFeature; import com.fasterxml.jackson.core.StreamWriteConstraints; import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; +import com.fasterxml.jackson.dataformat.yaml.YAMLFactoryBuilder; import org.opensearch.common.xcontent.XContentContraints; import org.opensearch.common.xcontent.XContentType; @@ -55,6 +56,8 @@ import java.io.Reader; import java.util.Set; +import org.yaml.snakeyaml.LoaderOptions; + /** * A YAML based content implementation using Jackson. */ @@ -67,7 +70,9 @@ public static XContentBuilder contentBuilder() throws IOException { public static final YamlXContent yamlXContent; static { - yamlFactory = new YAMLFactory(); + final LoaderOptions loaderOptions = new LoaderOptions(); + loaderOptions.setCodePointLimit(DEFAULT_CODEPOINT_LIMIT); + yamlFactory = new YAMLFactoryBuilder(new YAMLFactory()).loaderOptions(loaderOptions).build(); yamlFactory.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true); yamlFactory.setStreamWriteConstraints(StreamWriteConstraints.builder().maxNestingDepth(DEFAULT_MAX_DEPTH).build()); yamlFactory.setStreamReadConstraints( diff --git a/libs/x-content/src/test/java/org/opensearch/common/xcontent/XContentParserTests.java b/libs/x-content/src/test/java/org/opensearch/common/xcontent/XContentParserTests.java index 0e431d8ea4277..81a2b0e290121 100644 --- a/libs/x-content/src/test/java/org/opensearch/common/xcontent/XContentParserTests.java +++ b/libs/x-content/src/test/java/org/opensearch/common/xcontent/XContentParserTests.java @@ -85,7 +85,8 @@ public class XContentParserTests extends OpenSearchTestCase { () -> randomAlphaOfLengthBetween(1, SmileXContent.DEFAULT_MAX_STRING_LEN), /* YAML parser limitation */ XContentType.YAML, - () -> randomAlphaOfLengthBetween(1, 3140000) + /* use 75% of the limit, difficult to get the exact size of the content right */ + () -> randomRealisticUnicodeOfCodepointLengthBetween(1, (int) (YamlXContent.DEFAULT_CODEPOINT_LIMIT * 0.75)) ); private static final Map<XContentType, Supplier<String>> OFF_LIMIT_GENERATORS = Map.of( @@ -97,7 +98,7 @@ public class XContentParserTests extends OpenSearchTestCase { () -> randomAlphaOfLength(SmileXContent.DEFAULT_MAX_STRING_LEN + 1), /* YAML parser limitation */ XContentType.YAML, - () -> randomRealisticUnicodeOfCodepointLength(3145730) + () -> randomRealisticUnicodeOfCodepointLength(YamlXContent.DEFAULT_CODEPOINT_LIMIT + 1) ); private static final Map<XContentType, Supplier<String>> FIELD_NAME_GENERATORS = Map.of( diff --git a/modules/analysis-common/src/internalClusterTest/java/org/opensearch/analysis/common/QueryStringWithAnalyzersIT.java b/modules/analysis-common/src/internalClusterTest/java/org/opensearch/analysis/common/QueryStringWithAnalyzersIT.java index 71af708f2e1dc..648536f9136a8 100644 --- a/modules/analysis-common/src/internalClusterTest/java/org/opensearch/analysis/common/QueryStringWithAnalyzersIT.java +++ b/modules/analysis-common/src/internalClusterTest/java/org/opensearch/analysis/common/QueryStringWithAnalyzersIT.java @@ -36,10 +36,9 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.Operator; import org.opensearch.plugins.Plugin; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -49,10 +48,10 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; -public class QueryStringWithAnalyzersIT extends ParameterizedOpenSearchIntegTestCase { +public class QueryStringWithAnalyzersIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public QueryStringWithAnalyzersIT(Settings dynamicSettings) { - super(dynamicSettings); + public QueryStringWithAnalyzersIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -63,11 +62,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(CommonAnalysisModulePlugin.class); diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/HighlighterWithAnalyzersTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/HighlighterWithAnalyzersTests.java index 26f4acb2b1e6a..e55c1c69b2e40 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/HighlighterWithAnalyzersTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/HighlighterWithAnalyzersTests.java @@ -36,7 +36,6 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexSettings; @@ -44,7 +43,7 @@ import org.opensearch.plugins.Plugin; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.search.fetch.subphase.highlight.HighlightBuilder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.Arrays; @@ -68,10 +67,10 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.startsWith; -public class HighlighterWithAnalyzersTests extends ParameterizedOpenSearchIntegTestCase { +public class HighlighterWithAnalyzersTests extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public HighlighterWithAnalyzersTests(Settings dynamicSettings) { - super(dynamicSettings); + public HighlighterWithAnalyzersTests(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -82,11 +81,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(CommonAnalysisModulePlugin.class); diff --git a/modules/cache-common/build.gradle b/modules/cache-common/build.gradle new file mode 100644 index 0000000000000..98cdec83b9ad1 --- /dev/null +++ b/modules/cache-common/build.gradle @@ -0,0 +1,19 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +apply plugin: 'opensearch.internal-cluster-test' + +opensearchplugin { + description 'Module for caches which are optional and do not require additional security permission' + classname 'org.opensearch.cache.common.tier.TieredSpilloverCachePlugin' +} + +test { + // TODO: Adding permission in plugin-security.policy doesn't seem to work. + systemProperty 'tests.security.manager', 'false' +} diff --git a/modules/cache-common/src/internalClusterTest/java/org.opensearch.cache.common.tier/TieredSpilloverCacheIT.java b/modules/cache-common/src/internalClusterTest/java/org.opensearch.cache.common.tier/TieredSpilloverCacheIT.java new file mode 100644 index 0000000000000..568ac4d188c51 --- /dev/null +++ b/modules/cache-common/src/internalClusterTest/java/org.opensearch.cache.common.tier/TieredSpilloverCacheIT.java @@ -0,0 +1,150 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache.common.tier; + +import org.opensearch.action.admin.cluster.node.info.NodeInfo; +import org.opensearch.action.admin.cluster.node.info.NodesInfoRequest; +import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.opensearch.action.admin.cluster.node.info.PluginsAndModules; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.search.SearchType; +import org.opensearch.client.Client; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.ICache; +import org.opensearch.common.cache.settings.CacheSettings; +import org.opensearch.common.cache.store.OpenSearchOnHeapCache; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.indices.IndicesRequestCache; +import org.opensearch.plugins.CachePlugin; +import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.PluginInfo; +import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.Assert; + +import java.time.ZoneId; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.greaterThan; + +public class TieredSpilloverCacheIT extends OpenSearchIntegTestCase { + + @Override + protected Collection<Class<? extends Plugin>> nodePlugins() { + return Arrays.asList(TieredSpilloverCachePlugin.class, MockDiskCachePlugin.class); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.PLUGGABLE_CACHE, "true").build(); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put( + CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(), + TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME + ) + .put( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_NAME.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ).getKey(), + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME + ) + .put( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_STORE_NAME.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ).getKey(), + MockDiskCache.MockDiskCacheFactory.NAME + ) + .build(); + } + + public void testPluginsAreInstalled() { + NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(); + nodesInfoRequest.addMetric(NodesInfoRequest.Metric.PLUGINS.metricName()); + NodesInfoResponse nodesInfoResponse = OpenSearchIntegTestCase.client().admin().cluster().nodesInfo(nodesInfoRequest).actionGet(); + List<PluginInfo> pluginInfos = nodesInfoResponse.getNodes() + .stream() + .flatMap( + (Function<NodeInfo, Stream<PluginInfo>>) nodeInfo -> nodeInfo.getInfo(PluginsAndModules.class).getPluginInfos().stream() + ) + .collect(Collectors.toList()); + Assert.assertTrue( + pluginInfos.stream() + .anyMatch(pluginInfo -> pluginInfo.getName().equals("org.opensearch.cache.common" + ".tier.TieredSpilloverCachePlugin")) + ); + } + + public void testSanityChecksWithIndicesRequestCache() throws InterruptedException { + Client client = client(); + assertAcked( + client.admin() + .indices() + .prepareCreate("index") + .setMapping("f", "type=date") + .setSettings(Settings.builder().put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true).build()) + .get() + ); + indexRandom( + true, + client.prepareIndex("index").setSource("f", "2014-03-10T00:00:00.000Z"), + client.prepareIndex("index").setSource("f", "2014-05-13T00:00:00.000Z") + ); + ensureSearchable("index"); + + // This is not a random example: serialization with time zones writes shared strings + // which used to not work well with the query cache because of the handles stream output + // see #9500 + final SearchResponse r1 = client.prepareSearch("index") + .setSize(0) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .addAggregation( + dateHistogram("histo").field("f") + .timeZone(ZoneId.of("+01:00")) + .minDocCount(0) + .dateHistogramInterval(DateHistogramInterval.MONTH) + ) + .get(); + assertSearchResponse(r1); + + // The cached is actually used + assertThat( + client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), + greaterThan(0L) + ); + } + + public static class MockDiskCachePlugin extends Plugin implements CachePlugin { + + public MockDiskCachePlugin() {} + + @Override + public Map<String, ICache.Factory> getCacheFactoryMap() { + return Map.of(MockDiskCache.MockDiskCacheFactory.NAME, new MockDiskCache.MockDiskCacheFactory(0, 1000)); + } + + @Override + public String getName() { + return "mock_disk_plugin"; + } + } +} diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/policy/TookTimePolicy.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/policy/TookTimePolicy.java new file mode 100644 index 0000000000000..96ef027c17187 --- /dev/null +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/policy/TookTimePolicy.java @@ -0,0 +1,70 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.cache.common.policy; + +import org.opensearch.common.cache.policy.CachedQueryResult; +import org.opensearch.common.unit.TimeValue; + +import java.util.function.Function; +import java.util.function.Predicate; + +/** + * A cache tier policy which accepts queries whose took time is greater than some threshold. + * The threshold should be set to approximately the time it takes to get a result from the cache tier. + * The policy accepts values of type V and decodes them into CachedQueryResult.PolicyValues, which has the data needed + * to decide whether to admit the value. + * @param <V> The type of data consumed by test(). + */ +public class TookTimePolicy<V> implements Predicate<V> { + /** + * The minimum took time to allow a query. Set to TimeValue.ZERO to let all data through. + */ + private final TimeValue threshold; + + /** + * Function which extracts the relevant PolicyValues from a serialized CachedQueryResult + */ + private final Function<V, CachedQueryResult.PolicyValues> cachedResultParser; + + /** + * Constructs a took time policy. + * @param threshold the threshold + * @param cachedResultParser the function providing policy values + */ + public TookTimePolicy(TimeValue threshold, Function<V, CachedQueryResult.PolicyValues> cachedResultParser) { + if (threshold.compareTo(TimeValue.ZERO) < 0) { + throw new IllegalArgumentException("Threshold for TookTimePolicy must be >= 0ms but was " + threshold.getStringRep()); + } + this.threshold = threshold; + this.cachedResultParser = cachedResultParser; + } + + /** + * Check whether to admit data. + * @param data the input argument + * @return whether to admit the data + */ + public boolean test(V data) { + long tookTimeNanos; + try { + tookTimeNanos = cachedResultParser.apply(data).getTookTimeNanos(); + } catch (Exception e) { + // If we can't read a CachedQueryResult.PolicyValues from the BytesReference, reject the data + return false; + } + + TimeValue tookTime = TimeValue.timeValueNanos(tookTimeNanos); + return tookTime.compareTo(threshold) >= 0; + } +} diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/policy/package-info.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/policy/package-info.java new file mode 100644 index 0000000000000..45cfb00662c98 --- /dev/null +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/policy/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** A package for policies controlling what can enter caches. */ +package org.opensearch.cache.common.policy; diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java new file mode 100644 index 0000000000000..00a8eec93acc9 --- /dev/null +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java @@ -0,0 +1,386 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache.common.tier; + +import org.opensearch.cache.common.policy.TookTimePolicy; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.ICache; +import org.opensearch.common.cache.LoadAwareCacheLoader; +import org.opensearch.common.cache.RemovalListener; +import org.opensearch.common.cache.RemovalNotification; +import org.opensearch.common.cache.policy.CachedQueryResult; +import org.opensearch.common.cache.store.config.CacheConfig; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.ReleasableLock; +import org.opensearch.common.util.iterable.Iterables; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.Function; +import java.util.function.Predicate; + +/** + * This cache spillover the evicted items from heap tier to disk tier. All the new items are first cached on heap + * and the items evicted from on heap cache are moved to disk based cache. If disk based cache also gets full, + * then items are eventually evicted from it and removed which will result in cache miss. + * + * @param <K> Type of key + * @param <V> Type of value + * + * @opensearch.experimental + */ +@ExperimentalApi +public class TieredSpilloverCache<K, V> implements ICache<K, V> { + + private final ICache<K, V> diskCache; + private final ICache<K, V> onHeapCache; + private final RemovalListener<K, V> removalListener; + ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); + ReleasableLock readLock = new ReleasableLock(readWriteLock.readLock()); + ReleasableLock writeLock = new ReleasableLock(readWriteLock.writeLock()); + /** + * Maintains caching tiers in ascending order of cache latency. + */ + private final List<ICache<K, V>> cacheList; + private final List<Predicate<V>> policies; + + TieredSpilloverCache(Builder<K, V> builder) { + Objects.requireNonNull(builder.onHeapCacheFactory, "onHeap cache builder can't be null"); + Objects.requireNonNull(builder.diskCacheFactory, "disk cache builder can't be null"); + this.removalListener = Objects.requireNonNull(builder.removalListener, "Removal listener can't be null"); + + this.onHeapCache = builder.onHeapCacheFactory.create( + new CacheConfig.Builder<K, V>().setRemovalListener(new RemovalListener<K, V>() { + @Override + public void onRemoval(RemovalNotification<K, V> notification) { + try (ReleasableLock ignore = writeLock.acquire()) { + if (evaluatePolicies(notification.getValue())) { + diskCache.put(notification.getKey(), notification.getValue()); + } + } + } + }) + .setKeyType(builder.cacheConfig.getKeyType()) + .setValueType(builder.cacheConfig.getValueType()) + .setSettings(builder.cacheConfig.getSettings()) + .setWeigher(builder.cacheConfig.getWeigher()) + .setMaxSizeInBytes(builder.cacheConfig.getMaxSizeInBytes()) + .setExpireAfterAccess(builder.cacheConfig.getExpireAfterAccess()) + .build(), + builder.cacheType, + builder.cacheFactories + + ); + this.diskCache = builder.diskCacheFactory.create(builder.cacheConfig, builder.cacheType, builder.cacheFactories); + this.cacheList = Arrays.asList(onHeapCache, diskCache); + + this.policies = builder.policies; // Will never be null; builder initializes it to an empty list + } + + // Package private for testing + ICache<K, V> getOnHeapCache() { + return onHeapCache; + } + + // Package private for testing + ICache<K, V> getDiskCache() { + return diskCache; + } + + @Override + public V get(K key) { + return getValueFromTieredCache().apply(key); + } + + @Override + public void put(K key, V value) { + try (ReleasableLock ignore = writeLock.acquire()) { + onHeapCache.put(key, value); + } + } + + @Override + public V computeIfAbsent(K key, LoadAwareCacheLoader<K, V> loader) throws Exception { + + V cacheValue = getValueFromTieredCache().apply(key); + if (cacheValue == null) { + // Add the value to the onHeap cache. We are calling computeIfAbsent which does another get inside. + // This is needed as there can be many requests for the same key at the same time and we only want to load + // the value once. + V value = null; + try (ReleasableLock ignore = writeLock.acquire()) { + value = onHeapCache.computeIfAbsent(key, loader); + } + return value; + } + return cacheValue; + } + + @Override + public void invalidate(K key) { + // We are trying to invalidate the key from all caches though it would be present in only of them. + // Doing this as we don't know where it is located. We could do a get from both and check that, but what will + // also trigger a hit/miss listener event, so ignoring it for now. + try (ReleasableLock ignore = writeLock.acquire()) { + for (ICache<K, V> cache : cacheList) { + cache.invalidate(key); + } + } + } + + @Override + public void invalidateAll() { + try (ReleasableLock ignore = writeLock.acquire()) { + for (ICache<K, V> cache : cacheList) { + cache.invalidateAll(); + } + } + } + + /** + * Provides an iteration over both onHeap and disk keys. This is not protected from any mutations to the cache. + * @return An iterable over (onHeap + disk) keys + */ + @SuppressWarnings("unchecked") + @Override + public Iterable<K> keys() { + return Iterables.concat(onHeapCache.keys(), diskCache.keys()); + } + + @Override + public long count() { + long count = 0; + for (ICache<K, V> cache : cacheList) { + count += cache.count(); + } + return count; + } + + @Override + public void refresh() { + try (ReleasableLock ignore = writeLock.acquire()) { + for (ICache<K, V> cache : cacheList) { + cache.refresh(); + } + } + } + + @Override + public void close() throws IOException { + for (ICache<K, V> cache : cacheList) { + cache.close(); + } + } + + private Function<K, V> getValueFromTieredCache() { + return key -> { + try (ReleasableLock ignore = readLock.acquire()) { + for (ICache<K, V> cache : cacheList) { + V value = cache.get(key); + if (value != null) { + // update hit stats + return value; + } else { + // update miss stats + } + } + } + return null; + }; + } + + boolean evaluatePolicies(V value) { + for (Predicate<V> policy : policies) { + if (!policy.test(value)) { + return false; + } + } + return true; + } + + /** + * Factory to create TieredSpilloverCache objects. + */ + public static class TieredSpilloverCacheFactory implements ICache.Factory { + + /** + * Defines cache name + */ + public static final String TIERED_SPILLOVER_CACHE_NAME = "tiered_spillover"; + + /** + * Default constructor + */ + public TieredSpilloverCacheFactory() {} + + @Override + public <K, V> ICache<K, V> create(CacheConfig<K, V> config, CacheType cacheType, Map<String, Factory> cacheFactories) { + Settings settings = config.getSettings(); + Setting<String> onHeapSetting = TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_NAME.getConcreteSettingForNamespace( + cacheType.getSettingPrefix() + ); + String onHeapCacheStoreName = onHeapSetting.get(settings); + if (!cacheFactories.containsKey(onHeapCacheStoreName)) { + throw new IllegalArgumentException( + "No associated onHeapCache found for tieredSpilloverCache for " + "cacheType:" + cacheType + ); + } + ICache.Factory onHeapCacheFactory = cacheFactories.get(onHeapCacheStoreName); + + Setting<String> onDiskSetting = TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_STORE_NAME.getConcreteSettingForNamespace( + cacheType.getSettingPrefix() + ); + String diskCacheStoreName = onDiskSetting.get(settings); + if (!cacheFactories.containsKey(diskCacheStoreName)) { + throw new IllegalArgumentException( + "No associated diskCache found for tieredSpilloverCache for " + "cacheType:" + cacheType + ); + } + ICache.Factory diskCacheFactory = cacheFactories.get(diskCacheStoreName); + + TimeValue diskPolicyThreshold = TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_TOOK_TIME_THRESHOLD + .getConcreteSettingForNamespace(cacheType.getSettingPrefix()) + .get(settings); + Function<V, CachedQueryResult.PolicyValues> cachedResultParser = Objects.requireNonNull( + config.getCachedResultParser(), + "Cached result parser fn can't be null" + ); + + return new Builder<K, V>().setDiskCacheFactory(diskCacheFactory) + .setOnHeapCacheFactory(onHeapCacheFactory) + .setRemovalListener(config.getRemovalListener()) + .setCacheConfig(config) + .setCacheType(cacheType) + .addPolicy(new TookTimePolicy<V>(diskPolicyThreshold, cachedResultParser)) + .build(); + } + + @Override + public String getCacheName() { + return TIERED_SPILLOVER_CACHE_NAME; + } + } + + /** + * Builder object for tiered spillover cache. + * @param <K> Type of key + * @param <V> Type of value + */ + public static class Builder<K, V> { + private ICache.Factory onHeapCacheFactory; + private ICache.Factory diskCacheFactory; + private RemovalListener<K, V> removalListener; + private CacheConfig<K, V> cacheConfig; + private CacheType cacheType; + private Map<String, ICache.Factory> cacheFactories; + private final ArrayList<Predicate<V>> policies = new ArrayList<>(); + + /** + * Default constructor + */ + public Builder() {} + + /** + * Set onHeap cache factory + * @param onHeapCacheFactory Factory for onHeap cache. + * @return builder + */ + public Builder<K, V> setOnHeapCacheFactory(ICache.Factory onHeapCacheFactory) { + this.onHeapCacheFactory = onHeapCacheFactory; + return this; + } + + /** + * Set disk cache factory + * @param diskCacheFactory Factory for disk cache. + * @return builder + */ + public Builder<K, V> setDiskCacheFactory(ICache.Factory diskCacheFactory) { + this.diskCacheFactory = diskCacheFactory; + return this; + } + + /** + * Set removal listener for tiered cache. + * @param removalListener Removal listener + * @return builder + */ + public Builder<K, V> setRemovalListener(RemovalListener<K, V> removalListener) { + this.removalListener = removalListener; + return this; + } + + /** + * Set cache config. + * @param cacheConfig cache config. + * @return builder + */ + public Builder<K, V> setCacheConfig(CacheConfig<K, V> cacheConfig) { + this.cacheConfig = cacheConfig; + return this; + } + + /** + * Set cache type. + * @param cacheType Cache type + * @return builder + */ + public Builder<K, V> setCacheType(CacheType cacheType) { + this.cacheType = cacheType; + return this; + } + + /** + * Set cache factories + * @param cacheFactories cache factories + * @return builder + */ + public Builder<K, V> setCacheFactories(Map<String, ICache.Factory> cacheFactories) { + this.cacheFactories = cacheFactories; + return this; + } + + /** + * Set a cache policy to be used to limit access to this cache's disk tier. + * @param policy the policy + * @return builder + */ + public Builder<K, V> addPolicy(Predicate<V> policy) { + this.policies.add(policy); + return this; + } + + /** + * Set multiple policies to be used to limit access to this cache's disk tier. + * @param policies the policies + * @return builder + */ + public Builder<K, V> addPolicies(List<Predicate<V>> policies) { + this.policies.addAll(policies); + return this; + } + + /** + * Build tiered spillover cache. + * @return TieredSpilloverCache + */ + public TieredSpilloverCache<K, V> build() { + return new TieredSpilloverCache<>(this); + } + } +} diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCachePlugin.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCachePlugin.java new file mode 100644 index 0000000000000..0cc8a711faaf5 --- /dev/null +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCachePlugin.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache.common.tier; + +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.ICache; +import org.opensearch.common.settings.Setting; +import org.opensearch.plugins.CachePlugin; +import org.opensearch.plugins.Plugin; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +/** + * Plugin for TieredSpilloverCache. + */ +public class TieredSpilloverCachePlugin extends Plugin implements CachePlugin { + + /** + * Plugin name + */ + public static final String TIERED_CACHE_SPILLOVER_PLUGIN_NAME = "tieredSpilloverCachePlugin"; + + /** + * Default constructor + */ + public TieredSpilloverCachePlugin() {} + + @Override + public Map<String, ICache.Factory> getCacheFactoryMap() { + return Map.of( + TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME, + new TieredSpilloverCache.TieredSpilloverCacheFactory() + ); + } + + @Override + public List<Setting<?>> getSettings() { + List<Setting<?>> settingList = new ArrayList<>(); + for (CacheType cacheType : CacheType.values()) { + settingList.add( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_NAME.getConcreteSettingForNamespace(cacheType.getSettingPrefix()) + ); + settingList.add( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_STORE_NAME.getConcreteSettingForNamespace(cacheType.getSettingPrefix()) + ); + settingList.add( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_TOOK_TIME_THRESHOLD.getConcreteSettingForNamespace( + cacheType.getSettingPrefix() + ) + ); + } + return settingList; + } + + @Override + public String getName() { + return TIERED_CACHE_SPILLOVER_PLUGIN_NAME; + } +} diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheSettings.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheSettings.java new file mode 100644 index 0000000000000..684307960b8a5 --- /dev/null +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheSettings.java @@ -0,0 +1,61 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache.common.tier; + +import org.opensearch.common.settings.Setting; +import org.opensearch.common.unit.TimeValue; + +import java.util.concurrent.TimeUnit; + +import static org.opensearch.common.settings.Setting.Property.NodeScope; + +/** + * Settings related to TieredSpilloverCache. + */ +public class TieredSpilloverCacheSettings { + + /** + * Setting which defines the onHeap cache store to be used in TieredSpilloverCache. + * + * Pattern: {cache_type}.tiered_spillover.onheap.store.name + * Example: indices.request.cache.tiered_spillover.onheap.store.name + */ + public static final Setting.AffixSetting<String> TIERED_SPILLOVER_ONHEAP_STORE_NAME = Setting.suffixKeySetting( + TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME + ".onheap.store.name", + (key) -> Setting.simpleString(key, "", NodeScope) + ); + + /** + * Setting which defines the disk cache store to be used in TieredSpilloverCache. + */ + public static final Setting.AffixSetting<String> TIERED_SPILLOVER_DISK_STORE_NAME = Setting.suffixKeySetting( + TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME + ".disk.store.name", + (key) -> Setting.simpleString(key, "", NodeScope) + ); + + /** + * Setting defining the minimum took time for a query to be allowed into the disk cache. + */ + public static final Setting.AffixSetting<TimeValue> TIERED_SPILLOVER_DISK_TOOK_TIME_THRESHOLD = Setting.suffixKeySetting( + TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME + ".disk.store.policies.took_time.threshold", + (key) -> Setting.timeSetting( + key, + new TimeValue(10, TimeUnit.MILLISECONDS), // Default value for this setting + TimeValue.ZERO, // Minimum value for this setting + NodeScope + ) + ); + // 10 ms was chosen as a safe value based on proof of concept, where we saw disk latencies in this range. + // Will be tuned further with future benchmarks. + + /** + * Default constructor + */ + TieredSpilloverCacheSettings() {} +} diff --git a/server/src/main/java/org/opensearch/common/cache/store/enums/package-info.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/package-info.java similarity index 68% rename from server/src/main/java/org/opensearch/common/cache/store/enums/package-info.java rename to modules/cache-common/src/main/java/org/opensearch/cache/common/tier/package-info.java index 7a4e0fa7201fd..fa2de3c14b5dc 100644 --- a/server/src/main/java/org/opensearch/common/cache/store/enums/package-info.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/package-info.java @@ -6,5 +6,5 @@ * compatible open source license. */ -/** Package related to tiered cache enums */ -package org.opensearch.common.cache.store.enums; +/** Package related to cache tiers **/ +package org.opensearch.cache.common.tier; diff --git a/modules/cache-common/src/main/plugin-metadata/plugin-security.policy b/modules/cache-common/src/main/plugin-metadata/plugin-security.policy new file mode 100644 index 0000000000000..12fe9f2ddb60b --- /dev/null +++ b/modules/cache-common/src/main/plugin-metadata/plugin-security.policy @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +grant { + permission java.lang.RuntimePermission "accessClassInPackage.sun.misc"; + permission java.lang.RuntimePermission "createClassLoader"; +}; diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/policy/TookTimePolicyTests.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/policy/TookTimePolicyTests.java new file mode 100644 index 0000000000000..237c9c7b79db4 --- /dev/null +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/policy/TookTimePolicyTests.java @@ -0,0 +1,103 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache.common.policy; + +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TotalHits; +import org.opensearch.common.Randomness; +import org.opensearch.common.cache.policy.CachedQueryResult; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.lucene.search.TopDocsAndMaxScore; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.search.DocValueFormat; +import org.opensearch.search.query.QuerySearchResult; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.Random; +import java.util.function.Function; + +public class TookTimePolicyTests extends OpenSearchTestCase { + private final Function<BytesReference, CachedQueryResult.PolicyValues> transformationFunction = (data) -> { + try { + return CachedQueryResult.getPolicyValues(data); + } catch (IOException e) { + throw new RuntimeException(e); + } + }; + + private TookTimePolicy<BytesReference> getTookTimePolicy(TimeValue threshold) { + return new TookTimePolicy<>(threshold, transformationFunction); + } + + public void testTookTimePolicy() throws Exception { + double threshMillis = 10; + long shortMillis = (long) (0.9 * threshMillis); + long longMillis = (long) (1.5 * threshMillis); + TookTimePolicy<BytesReference> tookTimePolicy = getTookTimePolicy(new TimeValue((long) threshMillis)); + BytesReference shortTime = getValidPolicyInput(shortMillis * 1000000); + BytesReference longTime = getValidPolicyInput(longMillis * 1000000); + + boolean shortResult = tookTimePolicy.test(shortTime); + assertFalse(shortResult); + boolean longResult = tookTimePolicy.test(longTime); + assertTrue(longResult); + + TookTimePolicy<BytesReference> disabledPolicy = getTookTimePolicy(TimeValue.ZERO); + shortResult = disabledPolicy.test(shortTime); + assertTrue(shortResult); + longResult = disabledPolicy.test(longTime); + assertTrue(longResult); + } + + public void testNegativeOneInput() throws Exception { + // PolicyValues with -1 took time can be passed to this policy if we shouldn't accept it for whatever reason + TookTimePolicy<BytesReference> tookTimePolicy = getTookTimePolicy(TimeValue.ZERO); + BytesReference minusOne = getValidPolicyInput(-1L); + assertFalse(tookTimePolicy.test(minusOne)); + } + + public void testInvalidThreshold() throws Exception { + assertThrows(IllegalArgumentException.class, () -> getTookTimePolicy(TimeValue.MINUS_ONE)); + } + + private BytesReference getValidPolicyInput(Long tookTimeNanos) throws IOException { + // When it's used in the cache, the policy will receive BytesReferences which come from + // serializing a CachedQueryResult. + CachedQueryResult cachedQueryResult = new CachedQueryResult(getQSR(), tookTimeNanos); + BytesStreamOutput out = new BytesStreamOutput(); + cachedQueryResult.writeToNoId(out); + return out.bytes(); + } + + private QuerySearchResult getQSR() { + // We can't mock the QSR with mockito because the class is final. Construct a real one + QuerySearchResult mockQSR = new QuerySearchResult(); + + // duplicated from DfsQueryPhaseTests.java + mockQSR.topDocs( + new TopDocsAndMaxScore( + new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(42, 1.0F) }), + 2.0F + ), + new DocValueFormat[0] + ); + return mockQSR; + } + + private void writeRandomBytes(StreamOutput out, int numBytes) throws IOException { + Random rand = Randomness.get(); + byte[] bytes = new byte[numBytes]; + rand.nextBytes(bytes); + out.writeBytes(bytes); + } +} diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java new file mode 100644 index 0000000000000..d8a6eb480a5a5 --- /dev/null +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java @@ -0,0 +1,159 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache.common.tier; + +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.ICache; +import org.opensearch.common.cache.LoadAwareCacheLoader; +import org.opensearch.common.cache.RemovalListener; +import org.opensearch.common.cache.RemovalNotification; +import org.opensearch.common.cache.RemovalReason; +import org.opensearch.common.cache.serializer.Serializer; +import org.opensearch.common.cache.store.builders.ICacheBuilder; +import org.opensearch.common.cache.store.config.CacheConfig; + +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +public class MockDiskCache<K, V> implements ICache<K, V> { + + Map<K, V> cache; + int maxSize; + long delay; + + private final RemovalListener<K, V> removalListener; + + public MockDiskCache(int maxSize, long delay, RemovalListener<K, V> removalListener) { + this.maxSize = maxSize; + this.delay = delay; + this.removalListener = removalListener; + this.cache = new ConcurrentHashMap<K, V>(); + } + + @Override + public V get(K key) { + V value = cache.get(key); + return value; + } + + @Override + public void put(K key, V value) { + if (this.cache.size() >= maxSize) { // For simplification + this.removalListener.onRemoval(new RemovalNotification<>(key, value, RemovalReason.EVICTED)); + } + try { + Thread.sleep(delay); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + this.cache.put(key, value); + } + + @Override + public V computeIfAbsent(K key, LoadAwareCacheLoader<K, V> loader) { + V value = cache.computeIfAbsent(key, key1 -> { + try { + return loader.load(key); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + return value; + } + + @Override + public void invalidate(K key) { + this.cache.remove(key); + } + + @Override + public void invalidateAll() { + this.cache.clear(); + } + + @Override + public Iterable<K> keys() { + return this.cache.keySet(); + } + + @Override + public long count() { + return this.cache.size(); + } + + @Override + public void refresh() {} + + @Override + public void close() { + + } + + public static class MockDiskCacheFactory implements Factory { + + public static final String NAME = "mockDiskCache"; + final long delay; + final int maxSize; + + public MockDiskCacheFactory(long delay, int maxSize) { + this.delay = delay; + this.maxSize = maxSize; + } + + @Override + @SuppressWarnings({ "unchecked" }) + public <K, V> ICache<K, V> create(CacheConfig<K, V> config, CacheType cacheType, Map<String, Factory> cacheFactories) { + return new Builder<K, V>().setKeySerializer((Serializer<K, byte[]>) config.getKeySerializer()) + .setValueSerializer((Serializer<V, byte[]>) config.getValueSerializer()) + .setMaxSize(maxSize) + .setDeliberateDelay(delay) + .setRemovalListener(config.getRemovalListener()) + .build(); + } + + @Override + public String getCacheName() { + return NAME; + } + } + + public static class Builder<K, V> extends ICacheBuilder<K, V> { + + int maxSize; + long delay; + Serializer<K, byte[]> keySerializer; + Serializer<V, byte[]> valueSerializer; + + @Override + public ICache<K, V> build() { + return new MockDiskCache<K, V>(this.maxSize, this.delay, this.getRemovalListener()); + } + + public Builder<K, V> setMaxSize(int maxSize) { + this.maxSize = maxSize; + return this; + } + + public Builder<K, V> setDeliberateDelay(long millis) { + this.delay = millis; + return this; + } + + public Builder<K, V> setKeySerializer(Serializer<K, byte[]> keySerializer) { + this.keySerializer = keySerializer; + return this; + } + + public Builder<K, V> setValueSerializer(Serializer<V, byte[]> valueSerializer) { + this.valueSerializer = valueSerializer; + return this; + } + + } +} diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCachePluginTests.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCachePluginTests.java new file mode 100644 index 0000000000000..1172a48e97c6a --- /dev/null +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCachePluginTests.java @@ -0,0 +1,24 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache.common.tier; + +import org.opensearch.common.cache.ICache; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Map; + +public class TieredSpilloverCachePluginTests extends OpenSearchTestCase { + + public void testGetCacheFactoryMap() { + TieredSpilloverCachePlugin tieredSpilloverCachePlugin = new TieredSpilloverCachePlugin(); + Map<String, ICache.Factory> map = tieredSpilloverCachePlugin.getCacheFactoryMap(); + assertNotNull(map.get(TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME)); + assertEquals(TieredSpilloverCachePlugin.TIERED_CACHE_SPILLOVER_PLUGIN_NAME, tieredSpilloverCachePlugin.getName()); + } +} diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java new file mode 100644 index 0000000000000..b132952834f06 --- /dev/null +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java @@ -0,0 +1,1143 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache.common.tier; + +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.ICache; +import org.opensearch.common.cache.LoadAwareCacheLoader; +import org.opensearch.common.cache.RemovalListener; +import org.opensearch.common.cache.RemovalNotification; +import org.opensearch.common.cache.policy.CachedQueryResult; +import org.opensearch.common.cache.settings.CacheSettings; +import org.opensearch.common.cache.store.OpenSearchOnHeapCache; +import org.opensearch.common.cache.store.config.CacheConfig; +import org.opensearch.common.cache.store.settings.OpenSearchOnHeapCacheSettings; +import org.opensearch.common.metrics.CounterMetric; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Phaser; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; +import java.util.function.Predicate; + +import static org.opensearch.common.cache.store.settings.OpenSearchOnHeapCacheSettings.MAXIMUM_SIZE_IN_BYTES_KEY; + +public class TieredSpilloverCacheTests extends OpenSearchTestCase { + + public void testComputeIfAbsentWithoutAnyOnHeapCacheEviction() throws Exception { + int onHeapCacheSize = randomIntBetween(10, 30); + int keyValueSize = 50; + + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( + keyValueSize, + randomIntBetween(1, 4), + removalListener, + Settings.builder() + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build(), + 0 + ); + int numOfItems1 = randomIntBetween(1, onHeapCacheSize / 2 - 1); + List<String> keys = new ArrayList<>(); + // Put values in cache. + for (int iter = 0; iter < numOfItems1; iter++) { + String key = UUID.randomUUID().toString(); + keys.add(key); + LoadAwareCacheLoader<String, String> tieredCacheLoader = getLoadAwareCacheLoader(); + tieredSpilloverCache.computeIfAbsent(key, tieredCacheLoader); + } + assertEquals(0, removalListener.evictionsMetric.count()); + + // Try to hit cache again with some randomization. + int numOfItems2 = randomIntBetween(1, onHeapCacheSize / 2 - 1); + int cacheHit = 0; + int cacheMiss = 0; + for (int iter = 0; iter < numOfItems2; iter++) { + if (randomBoolean()) { + // Hit cache with stored key + cacheHit++; + int index = randomIntBetween(0, keys.size() - 1); + tieredSpilloverCache.computeIfAbsent(keys.get(index), getLoadAwareCacheLoader()); + } else { + // Hit cache with randomized key which is expected to miss cache always. + tieredSpilloverCache.computeIfAbsent(UUID.randomUUID().toString(), getLoadAwareCacheLoader()); + cacheMiss++; + } + } + assertEquals(0, removalListener.evictionsMetric.count()); + } + + public void testComputeIfAbsentWithFactoryBasedCacheCreation() throws Exception { + int onHeapCacheSize = randomIntBetween(10, 30); + int diskCacheSize = randomIntBetween(60, 100); + int totalSize = onHeapCacheSize + diskCacheSize; + int keyValueSize = 50; + + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + + // Set the desired settings needed to create a TieredSpilloverCache object with INDICES_REQUEST_CACHE cacheType. + Settings settings = Settings.builder() + .put( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_NAME.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ).getKey(), + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME + ) + .put( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_STORE_NAME.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ).getKey(), + MockDiskCache.MockDiskCacheFactory.NAME + ) + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .put( + CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(), + TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME + ) + .put(FeatureFlags.PLUGGABLE_CACHE, "true") + .build(); + + ICache<String, String> tieredSpilloverICache = new TieredSpilloverCache.TieredSpilloverCacheFactory().create( + new CacheConfig.Builder<String, String>().setKeyType(String.class) + .setKeyType(String.class) + .setWeigher((k, v) -> keyValueSize) + .setRemovalListener(removalListener) + .setSettings(settings) + .setCachedResultParser(s -> new CachedQueryResult.PolicyValues(20_000_000L)) // Values will always appear to have taken + // 20_000_000 ns = 20 ms to compute + .build(), + CacheType.INDICES_REQUEST_CACHE, + Map.of( + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME, + new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(), + MockDiskCache.MockDiskCacheFactory.NAME, + new MockDiskCache.MockDiskCacheFactory(0, randomIntBetween(100, 300)) + ) + ); + + TieredSpilloverCache<String, String> tieredSpilloverCache = (TieredSpilloverCache<String, String>) tieredSpilloverICache; + + int numOfItems1 = randomIntBetween(onHeapCacheSize + 1, totalSize); + for (int iter = 0; iter < numOfItems1; iter++) { + String key = UUID.randomUUID().toString(); + LoadAwareCacheLoader<String, String> tieredCacheLoader = getLoadAwareCacheLoader(); + tieredSpilloverCache.computeIfAbsent(key, tieredCacheLoader); + } + // Verify on heap cache size. + assertEquals(onHeapCacheSize, tieredSpilloverCache.getOnHeapCache().count()); + // Verify disk cache size. + assertEquals(numOfItems1 - onHeapCacheSize, tieredSpilloverCache.getDiskCache().count()); + } + + public void testWithFactoryCreationWithOnHeapCacheNotPresent() { + int onHeapCacheSize = randomIntBetween(10, 30); + int keyValueSize = 50; + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + + // Set the settings without onHeap cache settings. + Settings settings = Settings.builder() + .put( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_STORE_NAME.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ).getKey(), + MockDiskCache.MockDiskCacheFactory.NAME + ) + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .put( + CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(), + TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME + ) + .put(FeatureFlags.PLUGGABLE_CACHE, "true") + .build(); + + IllegalArgumentException ex = assertThrows( + IllegalArgumentException.class, + () -> new TieredSpilloverCache.TieredSpilloverCacheFactory().create( + new CacheConfig.Builder<String, String>().setKeyType(String.class) + .setKeyType(String.class) + .setWeigher((k, v) -> keyValueSize) + .setRemovalListener(removalListener) + .setSettings(settings) + .build(), + CacheType.INDICES_REQUEST_CACHE, + Map.of( + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME, + new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(), + MockDiskCache.MockDiskCacheFactory.NAME, + new MockDiskCache.MockDiskCacheFactory(0, randomIntBetween(100, 300)) + ) + ) + ); + assertEquals( + ex.getMessage(), + "No associated onHeapCache found for tieredSpilloverCache for " + "cacheType:" + CacheType.INDICES_REQUEST_CACHE + ); + } + + public void testWithFactoryCreationWithDiskCacheNotPresent() { + int onHeapCacheSize = randomIntBetween(10, 30); + int keyValueSize = 50; + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + + // Set the settings without onHeap cache settings. + Settings settings = Settings.builder() + .put( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_NAME.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ).getKey(), + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME + ) + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build(); + + IllegalArgumentException ex = assertThrows( + IllegalArgumentException.class, + () -> new TieredSpilloverCache.TieredSpilloverCacheFactory().create( + new CacheConfig.Builder<String, String>().setKeyType(String.class) + .setKeyType(String.class) + .setWeigher((k, v) -> keyValueSize) + .setRemovalListener(removalListener) + .setSettings(settings) + .build(), + CacheType.INDICES_REQUEST_CACHE, + Map.of( + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME, + new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(), + MockDiskCache.MockDiskCacheFactory.NAME, + new MockDiskCache.MockDiskCacheFactory(0, randomIntBetween(100, 300)) + ) + ) + ); + assertEquals( + ex.getMessage(), + "No associated diskCache found for tieredSpilloverCache for " + "cacheType:" + CacheType.INDICES_REQUEST_CACHE + ); + } + + public void testComputeIfAbsentWithEvictionsFromOnHeapCache() throws Exception { + int onHeapCacheSize = randomIntBetween(10, 30); + int diskCacheSize = randomIntBetween(60, 100); + int totalSize = onHeapCacheSize + diskCacheSize; + int keyValueSize = 50; + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + ICache.Factory onHeapCacheFactory = new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(); + CacheConfig<String, String> cacheConfig = new CacheConfig.Builder<String, String>().setKeyType(String.class) + .setKeyType(String.class) + .setWeigher((k, v) -> keyValueSize) + .setRemovalListener(removalListener) + .setSettings( + Settings.builder() + .put( + CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(), + TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME + ) + .put(FeatureFlags.PLUGGABLE_CACHE, "true") + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build() + ) + .build(); + + ICache.Factory mockDiskCacheFactory = new MockDiskCache.MockDiskCacheFactory(0, diskCacheSize); + + TieredSpilloverCache<String, String> tieredSpilloverCache = new TieredSpilloverCache.Builder<String, String>() + .setOnHeapCacheFactory(onHeapCacheFactory) + .setDiskCacheFactory(mockDiskCacheFactory) + .setCacheConfig(cacheConfig) + .setRemovalListener(removalListener) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .build(); + + // Put values in cache more than it's size and cause evictions from onHeap. + int numOfItems1 = randomIntBetween(onHeapCacheSize + 1, totalSize); + List<String> onHeapKeys = new ArrayList<>(); + List<String> diskTierKeys = new ArrayList<>(); + for (int iter = 0; iter < numOfItems1; iter++) { + String key = UUID.randomUUID().toString(); + LoadAwareCacheLoader<String, String> tieredCacheLoader = getLoadAwareCacheLoader(); + tieredSpilloverCache.computeIfAbsent(key, tieredCacheLoader); + } + + tieredSpilloverCache.getOnHeapCache().keys().forEach(onHeapKeys::add); + tieredSpilloverCache.getDiskCache().keys().forEach(diskTierKeys::add); + + assertEquals(tieredSpilloverCache.getOnHeapCache().count(), onHeapKeys.size()); + assertEquals(tieredSpilloverCache.getDiskCache().count(), diskTierKeys.size()); + + // Try to hit cache again with some randomization. + int numOfItems2 = randomIntBetween(50, 200); + int onHeapCacheHit = 0; + int diskCacheHit = 0; + int cacheMiss = 0; + for (int iter = 0; iter < numOfItems2; iter++) { + if (randomBoolean()) { // Hit cache with key stored in onHeap cache. + onHeapCacheHit++; + int index = randomIntBetween(0, onHeapKeys.size() - 1); + LoadAwareCacheLoader<String, String> loadAwareCacheLoader = getLoadAwareCacheLoader(); + tieredSpilloverCache.computeIfAbsent(onHeapKeys.get(index), loadAwareCacheLoader); + assertFalse(loadAwareCacheLoader.isLoaded()); + } else { // Hit cache with key stored in disk cache. + diskCacheHit++; + int index = randomIntBetween(0, diskTierKeys.size() - 1); + LoadAwareCacheLoader<String, String> loadAwareCacheLoader = getLoadAwareCacheLoader(); + tieredSpilloverCache.computeIfAbsent(diskTierKeys.get(index), loadAwareCacheLoader); + assertFalse(loadAwareCacheLoader.isLoaded()); + } + } + for (int iter = 0; iter < randomIntBetween(50, 200); iter++) { + // Hit cache with randomized key which is expected to miss cache always. + LoadAwareCacheLoader<String, String> tieredCacheLoader = getLoadAwareCacheLoader(); + tieredSpilloverCache.computeIfAbsent(UUID.randomUUID().toString(), tieredCacheLoader); + cacheMiss++; + } + } + + public void testComputeIfAbsentWithEvictionsFromTieredCache() throws Exception { + int onHeapCacheSize = randomIntBetween(10, 30); + int diskCacheSize = randomIntBetween(onHeapCacheSize + 1, 100); + int totalSize = onHeapCacheSize + diskCacheSize; + int keyValueSize = 50; + + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( + keyValueSize, + diskCacheSize, + removalListener, + Settings.builder() + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build(), + 0 + ); + int numOfItems = randomIntBetween(totalSize + 1, totalSize * 3); + for (int iter = 0; iter < numOfItems; iter++) { + LoadAwareCacheLoader<String, String> tieredCacheLoader = getLoadAwareCacheLoader(); + tieredSpilloverCache.computeIfAbsent(UUID.randomUUID().toString(), tieredCacheLoader); + } + int evictions = numOfItems - (totalSize); + assertEquals(evictions, removalListener.evictionsMetric.count()); + } + + public void testGetAndCount() throws Exception { + int onHeapCacheSize = randomIntBetween(10, 30); + int diskCacheSize = randomIntBetween(onHeapCacheSize + 1, 100); + int keyValueSize = 50; + int totalSize = onHeapCacheSize + diskCacheSize; + + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( + keyValueSize, + diskCacheSize, + removalListener, + Settings.builder() + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build(), + 0 + ); + + int numOfItems1 = randomIntBetween(onHeapCacheSize + 1, totalSize); + List<String> onHeapKeys = new ArrayList<>(); + List<String> diskTierKeys = new ArrayList<>(); + for (int iter = 0; iter < numOfItems1; iter++) { + String key = UUID.randomUUID().toString(); + if (iter > (onHeapCacheSize - 1)) { + // All these are bound to go to disk based cache. + diskTierKeys.add(key); + } else { + onHeapKeys.add(key); + } + LoadAwareCacheLoader<String, String> loadAwareCacheLoader = getLoadAwareCacheLoader(); + tieredSpilloverCache.computeIfAbsent(key, loadAwareCacheLoader); + } + + for (int iter = 0; iter < numOfItems1; iter++) { + if (randomBoolean()) { + if (randomBoolean()) { + int index = randomIntBetween(0, onHeapKeys.size() - 1); + assertNotNull(tieredSpilloverCache.get(onHeapKeys.get(index))); + } else { + int index = randomIntBetween(0, diskTierKeys.size() - 1); + assertNotNull(tieredSpilloverCache.get(diskTierKeys.get(index))); + } + } else { + assertNull(tieredSpilloverCache.get(UUID.randomUUID().toString())); + } + } + assertEquals(numOfItems1, tieredSpilloverCache.count()); + } + + public void testPut() { + int onHeapCacheSize = randomIntBetween(10, 30); + int diskCacheSize = randomIntBetween(onHeapCacheSize + 1, 100); + int keyValueSize = 50; + + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( + keyValueSize, + diskCacheSize, + removalListener, + Settings.builder() + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build(), + 0 + ); + String key = UUID.randomUUID().toString(); + String value = UUID.randomUUID().toString(); + tieredSpilloverCache.put(key, value); + assertEquals(1, tieredSpilloverCache.count()); + } + + public void testPutAndVerifyNewItemsArePresentOnHeapCache() throws Exception { + int onHeapCacheSize = randomIntBetween(200, 400); + int diskCacheSize = randomIntBetween(450, 800); + int keyValueSize = 50; + + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + + TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( + keyValueSize, + diskCacheSize, + removalListener, + Settings.builder() + .put( + CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(), + TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME + ) + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + (onHeapCacheSize * keyValueSize) + "b" + ) + .build(), + 0 + ); + + for (int i = 0; i < onHeapCacheSize; i++) { + tieredSpilloverCache.computeIfAbsent(UUID.randomUUID().toString(), new LoadAwareCacheLoader<>() { + @Override + public boolean isLoaded() { + return false; + } + + @Override + public String load(String key) { + return UUID.randomUUID().toString(); + } + }); + } + + assertEquals(onHeapCacheSize, tieredSpilloverCache.getOnHeapCache().count()); + assertEquals(0, tieredSpilloverCache.getDiskCache().count()); + + // Again try to put OnHeap cache capacity amount of new items. + List<String> newKeyList = new ArrayList<>(); + for (int i = 0; i < onHeapCacheSize; i++) { + newKeyList.add(UUID.randomUUID().toString()); + } + + for (int i = 0; i < newKeyList.size(); i++) { + tieredSpilloverCache.computeIfAbsent(newKeyList.get(i), new LoadAwareCacheLoader<>() { + @Override + public boolean isLoaded() { + return false; + } + + @Override + public String load(String key) { + return UUID.randomUUID().toString(); + } + }); + } + + // Verify that new items are part of onHeap cache. + List<String> actualOnHeapCacheKeys = new ArrayList<>(); + tieredSpilloverCache.getOnHeapCache().keys().forEach(actualOnHeapCacheKeys::add); + + assertEquals(newKeyList.size(), actualOnHeapCacheKeys.size()); + for (int i = 0; i < actualOnHeapCacheKeys.size(); i++) { + assertTrue(newKeyList.contains(actualOnHeapCacheKeys.get(i))); + } + assertEquals(onHeapCacheSize, tieredSpilloverCache.getOnHeapCache().count()); + assertEquals(onHeapCacheSize, tieredSpilloverCache.getDiskCache().count()); + } + + public void testInvalidate() { + int onHeapCacheSize = 1; + int diskCacheSize = 10; + int keyValueSize = 20; + + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( + keyValueSize, + diskCacheSize, + removalListener, + Settings.builder() + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build(), + 0 + ); + String key = UUID.randomUUID().toString(); + String value = UUID.randomUUID().toString(); + // First try to invalidate without the key present in cache. + tieredSpilloverCache.invalidate(key); + + // Now try to invalidate with the key present in onHeap cache. + tieredSpilloverCache.put(key, value); + tieredSpilloverCache.invalidate(key); + assertEquals(0, tieredSpilloverCache.count()); + + tieredSpilloverCache.put(key, value); + // Put another key/value so that one of the item is evicted to disk cache. + String key2 = UUID.randomUUID().toString(); + tieredSpilloverCache.put(key2, UUID.randomUUID().toString()); + assertEquals(2, tieredSpilloverCache.count()); + // Again invalidate older key + tieredSpilloverCache.invalidate(key); + assertEquals(1, tieredSpilloverCache.count()); + } + + public void testCacheKeys() throws Exception { + int onHeapCacheSize = randomIntBetween(10, 30); + int diskCacheSize = randomIntBetween(60, 100); + int keyValueSize = 50; + + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( + keyValueSize, + diskCacheSize, + removalListener, + Settings.builder() + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build(), + 0 + ); + List<String> onHeapKeys = new ArrayList<>(); + List<String> diskTierKeys = new ArrayList<>(); + // During first round add onHeapCacheSize entries. Will go to onHeap cache initially. + for (int i = 0; i < onHeapCacheSize; i++) { + String key = UUID.randomUUID().toString(); + diskTierKeys.add(key); + tieredSpilloverCache.computeIfAbsent(key, getLoadAwareCacheLoader()); + } + // In another round, add another onHeapCacheSize entries. These will go to onHeap and above ones will be + // evicted to onDisk cache. + for (int i = 0; i < onHeapCacheSize; i++) { + String key = UUID.randomUUID().toString(); + onHeapKeys.add(key); + tieredSpilloverCache.computeIfAbsent(key, getLoadAwareCacheLoader()); + } + + List<String> actualOnHeapKeys = new ArrayList<>(); + List<String> actualOnDiskKeys = new ArrayList<>(); + Iterable<String> onHeapiterable = tieredSpilloverCache.getOnHeapCache().keys(); + Iterable<String> onDiskiterable = tieredSpilloverCache.getDiskCache().keys(); + onHeapiterable.iterator().forEachRemaining(actualOnHeapKeys::add); + onDiskiterable.iterator().forEachRemaining(actualOnDiskKeys::add); + for (String onHeapKey : onHeapKeys) { + assertTrue(actualOnHeapKeys.contains(onHeapKey)); + } + for (String onDiskKey : actualOnDiskKeys) { + assertTrue(actualOnDiskKeys.contains(onDiskKey)); + } + + // Testing keys() which returns all keys. + List<String> actualMergedKeys = new ArrayList<>(); + List<String> expectedMergedKeys = new ArrayList<>(); + expectedMergedKeys.addAll(onHeapKeys); + expectedMergedKeys.addAll(diskTierKeys); + + Iterable<String> mergedIterable = tieredSpilloverCache.keys(); + mergedIterable.iterator().forEachRemaining(actualMergedKeys::add); + + assertEquals(expectedMergedKeys.size(), actualMergedKeys.size()); + for (String key : expectedMergedKeys) { + assertTrue(actualMergedKeys.contains(key)); + } + } + + public void testRefresh() { + int diskCacheSize = randomIntBetween(60, 100); + + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( + 50, + diskCacheSize, + removalListener, + Settings.EMPTY, + 0 + ); + tieredSpilloverCache.refresh(); + } + + public void testInvalidateAll() throws Exception { + int onHeapCacheSize = randomIntBetween(10, 30); + int diskCacheSize = randomIntBetween(60, 100); + int keyValueSize = 50; + int totalSize = onHeapCacheSize + diskCacheSize; + + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( + keyValueSize, + diskCacheSize, + removalListener, + Settings.builder() + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build(), + 0 + ); + // Put values in cache more than it's size and cause evictions from onHeap. + int numOfItems1 = randomIntBetween(onHeapCacheSize + 1, totalSize); + List<String> onHeapKeys = new ArrayList<>(); + List<String> diskTierKeys = new ArrayList<>(); + for (int iter = 0; iter < numOfItems1; iter++) { + String key = UUID.randomUUID().toString(); + if (iter > (onHeapCacheSize - 1)) { + // All these are bound to go to disk based cache. + diskTierKeys.add(key); + } else { + onHeapKeys.add(key); + } + LoadAwareCacheLoader<String, String> tieredCacheLoader = getLoadAwareCacheLoader(); + tieredSpilloverCache.computeIfAbsent(key, tieredCacheLoader); + } + assertEquals(numOfItems1, tieredSpilloverCache.count()); + tieredSpilloverCache.invalidateAll(); + assertEquals(0, tieredSpilloverCache.count()); + } + + public void testComputeIfAbsentConcurrently() throws Exception { + int onHeapCacheSize = randomIntBetween(100, 300); + int diskCacheSize = randomIntBetween(200, 400); + int keyValueSize = 50; + + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + Settings settings = Settings.builder() + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build(); + + TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( + keyValueSize, + diskCacheSize, + removalListener, + settings, + 0 + ); + + int numberOfSameKeys = randomIntBetween(10, onHeapCacheSize - 1); + String key = UUID.randomUUID().toString(); + String value = UUID.randomUUID().toString(); + + Thread[] threads = new Thread[numberOfSameKeys]; + Phaser phaser = new Phaser(numberOfSameKeys + 1); + CountDownLatch countDownLatch = new CountDownLatch(numberOfSameKeys); // To wait for all threads to finish. + + List<LoadAwareCacheLoader<String, String>> loadAwareCacheLoaderList = new CopyOnWriteArrayList<>(); + + for (int i = 0; i < numberOfSameKeys; i++) { + threads[i] = new Thread(() -> { + try { + LoadAwareCacheLoader<String, String> loadAwareCacheLoader = new LoadAwareCacheLoader<>() { + boolean isLoaded = false; + + @Override + public boolean isLoaded() { + return isLoaded; + } + + @Override + public String load(String key) { + isLoaded = true; + return value; + } + }; + loadAwareCacheLoaderList.add(loadAwareCacheLoader); + phaser.arriveAndAwaitAdvance(); + tieredSpilloverCache.computeIfAbsent(key, loadAwareCacheLoader); + } catch (Exception e) { + throw new RuntimeException(e); + } + countDownLatch.countDown(); + }); + threads[i].start(); + } + phaser.arriveAndAwaitAdvance(); + countDownLatch.await(); // Wait for rest of tasks to be cancelled. + int numberOfTimesKeyLoaded = 0; + assertEquals(numberOfSameKeys, loadAwareCacheLoaderList.size()); + for (int i = 0; i < loadAwareCacheLoaderList.size(); i++) { + LoadAwareCacheLoader<String, String> loader = loadAwareCacheLoaderList.get(i); + if (loader.isLoaded()) { + numberOfTimesKeyLoaded++; + } + } + assertEquals(1, numberOfTimesKeyLoaded); // It should be loaded only once. + } + + public void testConcurrencyForEvictionFlowFromOnHeapToDiskTier() throws Exception { + int diskCacheSize = randomIntBetween(450, 800); + + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + + ICache.Factory onHeapCacheFactory = new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(); + ICache.Factory diskCacheFactory = new MockDiskCache.MockDiskCacheFactory(500, diskCacheSize); + CacheConfig<String, String> cacheConfig = new CacheConfig.Builder<String, String>().setKeyType(String.class) + .setKeyType(String.class) + .setWeigher((k, v) -> 150) + .setRemovalListener(removalListener) + .setSettings( + Settings.builder() + .put( + CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(), + TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME + ) + .put(FeatureFlags.PLUGGABLE_CACHE, "true") + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + 200 + "b" + ) + .build() + ) + .build(); + TieredSpilloverCache<String, String> tieredSpilloverCache = new TieredSpilloverCache.Builder<String, String>() + .setOnHeapCacheFactory(onHeapCacheFactory) + .setDiskCacheFactory(diskCacheFactory) + .setRemovalListener(removalListener) + .setCacheConfig(cacheConfig) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .build(); + + String keyToBeEvicted = "key1"; + String secondKey = "key2"; + + // Put first key on tiered cache. Will go into onHeap cache. + tieredSpilloverCache.computeIfAbsent(keyToBeEvicted, new LoadAwareCacheLoader<>() { + @Override + public boolean isLoaded() { + return false; + } + + @Override + public String load(String key) { + return UUID.randomUUID().toString(); + } + }); + CountDownLatch countDownLatch = new CountDownLatch(1); + CountDownLatch countDownLatch1 = new CountDownLatch(1); + // Put second key on tiered cache. Will cause eviction of first key from onHeap cache and should go into + // disk cache. + LoadAwareCacheLoader<String, String> loadAwareCacheLoader = getLoadAwareCacheLoader(); + Thread thread = new Thread(() -> { + try { + tieredSpilloverCache.computeIfAbsent(secondKey, loadAwareCacheLoader); + countDownLatch1.countDown(); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + thread.start(); + assertBusy(() -> { assertTrue(loadAwareCacheLoader.isLoaded()); }, 100, TimeUnit.MILLISECONDS); // We wait for new key to be loaded + // after which it eviction flow is + // guaranteed to occur. + ICache<String, String> onDiskCache = tieredSpilloverCache.getDiskCache(); + + // Now on a different thread, try to get key(above one which got evicted) from tiered cache. We expect this + // should return not null value as it should be present on diskCache. + AtomicReference<String> actualValue = new AtomicReference<>(); + Thread thread1 = new Thread(() -> { + try { + actualValue.set(tieredSpilloverCache.get(keyToBeEvicted)); + } catch (Exception e) { + throw new RuntimeException(e); + } + countDownLatch.countDown(); + }); + thread1.start(); + countDownLatch.await(); + assertNotNull(actualValue.get()); + countDownLatch1.await(); + assertEquals(1, tieredSpilloverCache.getOnHeapCache().count()); + assertEquals(1, onDiskCache.count()); + assertNotNull(onDiskCache.get(keyToBeEvicted)); + } + + class MockCacheRemovalListener<K, V> implements RemovalListener<K, V> { + final CounterMetric evictionsMetric = new CounterMetric(); + + @Override + public void onRemoval(RemovalNotification<K, V> notification) { + evictionsMetric.inc(); + } + } + + public void testDiskTierPolicies() throws Exception { + // For policy function, allow if what it receives starts with "a" and string is even length + ArrayList<Predicate<String>> policies = new ArrayList<>(); + policies.add(new AllowFirstLetterA()); + policies.add(new AllowEvenLengths()); + + int keyValueSize = 50; + int onHeapCacheSize = 0; + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( + keyValueSize, + 100, + removalListener, + Settings.builder() + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * 50 + "b" + ) + .build(), + 0, + policies + ); + + Map<String, String> keyValuePairs = new HashMap<>(); + Map<String, Boolean> expectedOutputs = new HashMap<>(); + keyValuePairs.put("key1", "abcd"); + expectedOutputs.put("key1", true); + keyValuePairs.put("key2", "abcde"); + expectedOutputs.put("key2", false); + keyValuePairs.put("key3", "bbc"); + expectedOutputs.put("key3", false); + keyValuePairs.put("key4", "ab"); + expectedOutputs.put("key4", true); + keyValuePairs.put("key5", ""); + expectedOutputs.put("key5", false); + + LoadAwareCacheLoader<String, String> loader = new LoadAwareCacheLoader<String, String>() { + boolean isLoaded = false; + + @Override + public boolean isLoaded() { + return isLoaded; + } + + @Override + public String load(String key) throws Exception { + isLoaded = true; + return keyValuePairs.get(key); + } + }; + + for (String key : keyValuePairs.keySet()) { + Boolean expectedOutput = expectedOutputs.get(key); + String value = tieredSpilloverCache.computeIfAbsent(key, loader); + assertEquals(keyValuePairs.get(key), value); + String result = tieredSpilloverCache.get(key); + if (expectedOutput) { + // Should retrieve from disk tier if it was accepted + assertEquals(keyValuePairs.get(key), result); + } else { + // Should miss as heap tier size = 0 and the policy rejected it + assertNull(result); + } + } + } + + public void testTookTimePolicyFromFactory() throws Exception { + // Mock took time by passing this map to the policy info wrapper fn + // The policy inspects values, not keys, so this is a map from values -> took time + Map<String, Long> tookTimeMap = new HashMap<>(); + tookTimeMap.put("a", 10_000_000L); + tookTimeMap.put("b", 0L); + tookTimeMap.put("c", 99_999_999L); + tookTimeMap.put("d", null); + tookTimeMap.put("e", -1L); + tookTimeMap.put("f", 8_888_888L); + long timeValueThresholdNanos = 10_000_000L; + + Map<String, String> keyValueMap = Map.of("A", "a", "B", "b", "C", "c", "D", "d", "E", "e", "F", "f"); + + // Most of setup duplicated from testComputeIfAbsentWithFactoryBasedCacheCreation() + int onHeapCacheSize = randomIntBetween(tookTimeMap.size() + 1, tookTimeMap.size() + 30); + int diskCacheSize = tookTimeMap.size(); + int keyValueSize = 50; + + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + + // Set the desired settings needed to create a TieredSpilloverCache object with INDICES_REQUEST_CACHE cacheType. + Settings settings = Settings.builder() + .put( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_NAME.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ).getKey(), + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME + ) + .put( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_STORE_NAME.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ).getKey(), + MockDiskCache.MockDiskCacheFactory.NAME + ) + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .put( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_TOOK_TIME_THRESHOLD.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ).getKey(), + new TimeValue(timeValueThresholdNanos / 1_000_000) + ) + .build(); + + ICache<String, String> tieredSpilloverICache = new TieredSpilloverCache.TieredSpilloverCacheFactory().create( + new CacheConfig.Builder<String, String>().setKeyType(String.class) + .setKeyType(String.class) + .setWeigher((k, v) -> keyValueSize) + .setRemovalListener(removalListener) + .setSettings(settings) + .setMaxSizeInBytes(onHeapCacheSize * keyValueSize) + .setCachedResultParser(new Function<String, CachedQueryResult.PolicyValues>() { + @Override + public CachedQueryResult.PolicyValues apply(String s) { + return new CachedQueryResult.PolicyValues(tookTimeMap.get(s)); + } + }) + .build(), + CacheType.INDICES_REQUEST_CACHE, + Map.of( + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME, + new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(), + MockDiskCache.MockDiskCacheFactory.NAME, + new MockDiskCache.MockDiskCacheFactory(0, randomIntBetween(100, 300)) + ) + ); + + TieredSpilloverCache<String, String> tieredSpilloverCache = (TieredSpilloverCache<String, String>) tieredSpilloverICache; + + // First add all our values to the on heap cache + for (String key : tookTimeMap.keySet()) { + tieredSpilloverCache.computeIfAbsent(key, getLoadAwareCacheLoader(keyValueMap)); + } + assertEquals(tookTimeMap.size(), tieredSpilloverCache.count()); + + // Ensure all these keys get evicted from the on heap tier by adding > heap tier size worth of random keys + for (int i = 0; i < onHeapCacheSize; i++) { + tieredSpilloverCache.computeIfAbsent(UUID.randomUUID().toString(), getLoadAwareCacheLoader(keyValueMap)); + } + ICache<String, String> onHeapCache = tieredSpilloverCache.getOnHeapCache(); + for (String key : tookTimeMap.keySet()) { + assertNull(onHeapCache.get(key)); + } + + // Now the original keys should be in the disk tier if the policy allows them, or misses if not + for (String key : tookTimeMap.keySet()) { + String computedValue = tieredSpilloverCache.get(key); + String mapValue = keyValueMap.get(key); + Long tookTime = tookTimeMap.get(mapValue); + if (tookTime != null && tookTime > timeValueThresholdNanos) { + // expect a hit + assertNotNull(computedValue); + } else { + // expect a miss + assertNull(computedValue); + } + } + } + + public void testMinimumThresholdSettingValue() throws Exception { + // Confirm we can't set TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_DISK_TOOK_TIME_THRESHOLD to below + // TimeValue.ZERO (for example, MINUS_ONE) + Setting<TimeValue> concreteSetting = TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_TOOK_TIME_THRESHOLD + .getConcreteSettingForNamespace(CacheType.INDICES_REQUEST_CACHE.getSettingPrefix()); + TimeValue validDuration = new TimeValue(0, TimeUnit.MILLISECONDS); + Settings validSettings = Settings.builder().put(concreteSetting.getKey(), validDuration).build(); + + Settings belowThresholdSettings = Settings.builder().put(concreteSetting.getKey(), TimeValue.MINUS_ONE).build(); + + assertThrows(IllegalArgumentException.class, () -> concreteSetting.get(belowThresholdSettings)); + assertEquals(validDuration, concreteSetting.get(validSettings)); + } + + private static class AllowFirstLetterA implements Predicate<String> { + @Override + public boolean test(String data) { + try { + return (data.charAt(0) == 'a'); + } catch (StringIndexOutOfBoundsException e) { + return false; + } + } + } + + private static class AllowEvenLengths implements Predicate<String> { + @Override + public boolean test(String data) { + return data.length() % 2 == 0; + } + } + + private LoadAwareCacheLoader<String, String> getLoadAwareCacheLoader() { + return new LoadAwareCacheLoader<>() { + boolean isLoaded = false; + + @Override + public String load(String key) { + isLoaded = true; + return UUID.randomUUID().toString(); + } + + @Override + public boolean isLoaded() { + return isLoaded; + } + }; + } + + private LoadAwareCacheLoader<String, String> getLoadAwareCacheLoader(Map<String, String> keyValueMap) { + return new LoadAwareCacheLoader<>() { + boolean isLoaded = false; + + @Override + public String load(String key) { + isLoaded = true; + String mapValue = keyValueMap.get(key); + if (mapValue == null) { + mapValue = UUID.randomUUID().toString(); + } + return mapValue; + } + + @Override + public boolean isLoaded() { + return isLoaded; + } + }; + } + + private TieredSpilloverCache<String, String> intializeTieredSpilloverCache( + int keyValueSize, + int diskCacheSize, + RemovalListener<String, String> removalListener, + Settings settings, + long diskDeliberateDelay + + ) { + return intializeTieredSpilloverCache(keyValueSize, diskCacheSize, removalListener, settings, diskDeliberateDelay, null); + } + + private TieredSpilloverCache<String, String> intializeTieredSpilloverCache( + int keyValueSize, + int diskCacheSize, + RemovalListener<String, String> removalListener, + Settings settings, + long diskDeliberateDelay, + List<Predicate<String>> policies + ) { + ICache.Factory onHeapCacheFactory = new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(); + CacheConfig<String, String> cacheConfig = new CacheConfig.Builder<String, String>().setKeyType(String.class) + .setKeyType(String.class) + .setWeigher((k, v) -> keyValueSize) + .setRemovalListener(removalListener) + .setSettings( + Settings.builder() + .put( + CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(), + TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME + ) + .put(FeatureFlags.PLUGGABLE_CACHE, "true") + .put(settings) + .build() + ) + .build(); + ICache.Factory mockDiskCacheFactory = new MockDiskCache.MockDiskCacheFactory(diskDeliberateDelay, diskCacheSize); + + TieredSpilloverCache.Builder<String, String> builder = new TieredSpilloverCache.Builder<String, String>().setCacheType( + CacheType.INDICES_REQUEST_CACHE + ) + .setRemovalListener(removalListener) + .setOnHeapCacheFactory(onHeapCacheFactory) + .setDiskCacheFactory(mockDiskCacheFactory) + .setCacheConfig(cacheConfig); + if (policies != null) { + builder.addPolicies(policies); + } + return builder.build(); + } +} diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/GeoModulePluginIntegTestCase.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/GeoModulePluginIntegTestCase.java index c38b29502e282..6afd5c4ca75c1 100644 --- a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/GeoModulePluginIntegTestCase.java +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/GeoModulePluginIntegTestCase.java @@ -11,12 +11,11 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.geometry.utils.StandardValidator; import org.opensearch.geometry.utils.WellKnownText; import org.opensearch.index.mapper.GeoShapeFieldMapper; import org.opensearch.plugins.Plugin; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.TestGeoShapeFieldMapperPlugin; import java.util.Arrays; @@ -29,14 +28,14 @@ * This is the base class for all the Geo related integration tests. Use this class to add the features and settings * for the test cluster on which integration tests are running. */ -public abstract class GeoModulePluginIntegTestCase extends ParameterizedOpenSearchIntegTestCase { +public abstract class GeoModulePluginIntegTestCase extends ParameterizedStaticSettingsOpenSearchIntegTestCase { protected static final double GEOHASH_TOLERANCE = 1E-5D; protected static final WellKnownText WKT = new WellKnownText(true, new StandardValidator(true)); - public GeoModulePluginIntegTestCase(Settings dynamicSettings) { - super(dynamicSettings); + public GeoModulePluginIntegTestCase(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -47,11 +46,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - /** * Returns a collection of plugins that should be loaded on each node for doing the integration tests. As this * geo plugin is not getting packaged in a zip, we need to load it before the tests run. diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/MissingValueIT.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/MissingValueIT.java index 7344903fd5220..9e7ce0d3c7980 100644 --- a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/MissingValueIT.java +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/MissingValueIT.java @@ -44,8 +44,8 @@ public class MissingValueIT extends GeoModulePluginIntegTestCase { private GeoPoint bottomRight; private GeoPoint topLeft; - public MissingValueIT(Settings dynamicSettings) { - super(dynamicSettings); + public MissingValueIT(Settings staticSettings) { + super(staticSettings); } @Override diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/CommunityIdProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/CommunityIdProcessor.java new file mode 100644 index 0000000000000..c968fb2f6c2da --- /dev/null +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/CommunityIdProcessor.java @@ -0,0 +1,647 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ingest.common; + +import org.opensearch.common.hash.MessageDigests; +import org.opensearch.common.network.InetAddresses; +import org.opensearch.core.common.Strings; +import org.opensearch.ingest.AbstractProcessor; +import org.opensearch.ingest.ConfigurationUtils; +import org.opensearch.ingest.IngestDocument; +import org.opensearch.ingest.Processor; + +import java.security.MessageDigest; +import java.util.Arrays; +import java.util.Base64; +import java.util.Locale; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.opensearch.ingest.ConfigurationUtils.newConfigurationException; + +/** + * Processor that generating community id flow hash for the network flow tuples, the algorithm is defined in + * <a href="https://github.com/corelight/community-id-spec">Community ID Flow Hashing</a>. + */ +public class CommunityIdProcessor extends AbstractProcessor { + public static final String TYPE = "community_id"; + // the version of the community id flow hashing algorithm + private static final String COMMUNITY_ID_HASH_VERSION = "1"; + // 0 byte for padding + private static final byte PADDING_BYTE = 0; + // the maximum code number for network protocol, ICMP message type and code as defined by IANA + private static final int IANA_COMMON_MAX_NUMBER = 255; + // the minimum code number for network protocol, ICMP message type and code as defined by IANA + private static final int IANA_COMMON_MIN_NUMBER = 0; + // the minimum seed for generating hash + private static final int MIN_SEED = 0; + // the maximum seed for generating hash + private static final int MAX_SEED = 65535; + // the minimum port number in transport layer + private static final int MIN_PORT = 0; + // the maximum port number in transport layer + private static final int MAX_PORT = 63335; + private static final String ICMP_MESSAGE_TYPE = "type"; + private static final String ICMP_MESSAGE_CODE = "code"; + private final String sourceIPField; + private final String sourcePortField; + private final String destinationIPField; + private final String destinationPortField; + private final String ianaProtocolNumberField; + private final String protocolField; + private final String icmpTypeField; + private final String icmpCodeField; + private final int seed; + private final String targetField; + private final boolean ignoreMissing; + + CommunityIdProcessor( + String tag, + String description, + String sourceIPField, + String sourcePortField, + String destinationIPField, + String destinationPortField, + String ianaProtocolNumberField, + String protocolField, + String icmpTypeField, + String icmpCodeField, + int seed, + String targetField, + boolean ignoreMissing + ) { + super(tag, description); + this.sourceIPField = sourceIPField; + this.sourcePortField = sourcePortField; + this.destinationIPField = destinationIPField; + this.destinationPortField = destinationPortField; + this.ianaProtocolNumberField = ianaProtocolNumberField; + this.protocolField = protocolField; + this.icmpTypeField = icmpTypeField; + this.icmpCodeField = icmpCodeField; + this.seed = seed; + this.targetField = targetField; + this.ignoreMissing = ignoreMissing; + } + + public String getSourceIPField() { + return sourceIPField; + } + + public String getSourcePortField() { + return sourcePortField; + } + + public String getDestinationIPField() { + return destinationIPField; + } + + public String getDestinationPortField() { + return destinationPortField; + } + + public String getIANAProtocolNumberField() { + return ianaProtocolNumberField; + } + + public String getProtocolField() { + return protocolField; + } + + public String getIcmpTypeField() { + return icmpTypeField; + } + + public String getIcmpCodeField() { + return icmpCodeField; + } + + public int getSeed() { + return seed; + } + + public String getTargetField() { + return targetField; + } + + public boolean isIgnoreMissing() { + return ignoreMissing; + } + + @Override + public IngestDocument execute(IngestDocument document) { + // resolve protocol firstly + Protocol protocol = resolveProtocol(document); + // exit quietly if protocol cannot be resolved and ignore_missing is true + if (protocol == null) { + return document; + } + + // resolve ip secondly, exit quietly if either source ip or destination ip cannot be resolved and ignore_missing is true + byte[] sourceIPByteArray = resolveIP(document, sourceIPField); + if (sourceIPByteArray == null) { + return document; + } + byte[] destIPByteArray = resolveIP(document, destinationIPField); + if (destIPByteArray == null) { + return document; + } + // source ip and destination ip must have same format, either ipv4 or ipv6 + if (sourceIPByteArray.length != destIPByteArray.length) { + throw new IllegalArgumentException("source ip and destination ip must have same format"); + } + + // resolve source port and destination port for transport protocols, + // exit quietly if either source port or destination port is null nor empty + Integer sourcePort = null; + Integer destinationPort = null; + if (protocol.isTransportProtocol()) { + sourcePort = resolvePort(document, sourcePortField); + if (sourcePort == null) { + return document; + } + + destinationPort = resolvePort(document, destinationPortField); + if (destinationPort == null) { + return document; + } + } + + // resolve ICMP message type and code, support both ipv4 and ipv6 + // set source port to icmp type, and set dest port to icmp code, so that we can have a generic way to handle + // all protocols + boolean isOneway = true; + final boolean isICMPProtocol = Protocol.ICMP == protocol || Protocol.ICMP_V6 == protocol; + if (isICMPProtocol) { + Integer icmpType = resolveICMP(document, icmpTypeField, ICMP_MESSAGE_TYPE); + if (icmpType == null) { + return document; + } else { + sourcePort = icmpType; + } + + // for the message types which don't have code, fetch the equivalent code from the pre-defined mapper, + // and they can be considered to two-way flow + Byte equivalentCode = Protocol.ICMP.getProtocolCode() == protocol.getProtocolCode() + ? ICMPType.getEquivalentCode(icmpType.byteValue()) + : ICMPv6Type.getEquivalentCode(icmpType.byteValue()); + if (equivalentCode != null) { + isOneway = false; + // for IPv6-ICMP, the pre-defined code is negative byte, + // we need to convert it to positive integer for later comparison + destinationPort = Protocol.ICMP.getProtocolCode() == protocol.getProtocolCode() + ? Integer.valueOf(equivalentCode) + : Byte.toUnsignedInt(equivalentCode); + } else { + // get icmp code from the document if we cannot get equivalent code from the pre-defined mapper + Integer icmpCode = resolveICMP(document, icmpCodeField, ICMP_MESSAGE_CODE); + if (icmpCode == null) { + return document; + } else { + destinationPort = icmpCode; + } + } + } + + assert (sourcePort != null && destinationPort != null); + boolean isLess = compareIPAndPort(sourceIPByteArray, sourcePort, destIPByteArray, destinationPort); + // swap ip and port to remove directionality in the flow tuple, smaller ip:port tuple comes first + // but for ICMP and IPv6-ICMP, if it's a one-way flow, the flow tuple is considered to be ordered + if (!isLess && (!isICMPProtocol || !isOneway)) { + byte[] byteArray = sourceIPByteArray; + sourceIPByteArray = destIPByteArray; + destIPByteArray = byteArray; + + int tempPort = sourcePort; + sourcePort = destinationPort; + destinationPort = tempPort; + } + + // generate flow hash + String digest = generateCommunityIDHash( + protocol.getProtocolCode(), + sourceIPByteArray, + destIPByteArray, + sourcePort, + destinationPort, + seed + ); + document.setFieldValue(targetField, digest); + return document; + } + + @Override + public String getType() { + return TYPE; + } + + /** + * Resolve network protocol + * @param document the ingesting document + * @return the resolved protocol, null if the resolved protocol is null and ignore_missing is true + * @throws IllegalArgumentException only if ignoreMissing is false and the field is null, empty, invalid, + * or if the field that is found at the provided path is not of the expected type. + */ + private Protocol resolveProtocol(IngestDocument document) { + Protocol protocol = null; + Integer ianaProtocolNumber = null; + String protocolName = null; + if (!Strings.isNullOrEmpty(ianaProtocolNumberField)) { + ianaProtocolNumber = document.getFieldValue(ianaProtocolNumberField, Integer.class, true); + } + if (!Strings.isNullOrEmpty(protocolField)) { + protocolName = document.getFieldValue(protocolField, String.class, true); + } + // if iana protocol number is not specified, then resolve protocol name + if (ianaProtocolNumber != null) { + if (ianaProtocolNumber >= IANA_COMMON_MIN_NUMBER + && ianaProtocolNumber <= IANA_COMMON_MAX_NUMBER + && Protocol.protocolCodeMap.containsKey(ianaProtocolNumber.byteValue())) { + protocol = Protocol.protocolCodeMap.get(ianaProtocolNumber.byteValue()); + } else { + throw new IllegalArgumentException("unsupported iana protocol number [" + ianaProtocolNumber + "]"); + } + } else if (protocolName != null) { + Protocol protocolFromName = Protocol.fromProtocolName(protocolName); + if (protocolFromName != null) { + protocol = protocolFromName; + } else { + throw new IllegalArgumentException("unsupported protocol [" + protocolName + "]"); + } + } + + // return null if protocol cannot be resolved and ignore_missing is true + if (protocol == null) { + if (ignoreMissing) { + return null; + } else { + throw new IllegalArgumentException( + "cannot resolve protocol by neither iana protocol number field [" + + ianaProtocolNumberField + + "] nor protocol name field [" + + protocolField + + "]" + ); + } + } + return protocol; + } + + /** + * Resolve ip address + * @param document the ingesting document + * @param fieldName the ip field to be resolved + * @return the byte array of the resolved ip + * @throws IllegalArgumentException only if ignoreMissing is false and the field is null, empty, invalid, + * or if the field that is found at the provided path is not of the expected type. + */ + private byte[] resolveIP(IngestDocument document, String fieldName) { + if (Strings.isNullOrEmpty(fieldName)) { + if (ignoreMissing) { + return null; + } else { + throw new IllegalArgumentException("both source ip field path and destination ip field path cannot be null nor empty"); + } + } + + String ipAddress = document.getFieldValue(fieldName, String.class, true); + if (Strings.isNullOrEmpty(ipAddress)) { + if (ignoreMissing) { + return null; + } else { + throw new IllegalArgumentException("ip address in the field [" + fieldName + "] is null or empty"); + } + } + + byte[] byteArray = InetAddresses.ipStringToBytes(ipAddress); + if (byteArray == null) { + throw new IllegalArgumentException( + "ip address [" + ipAddress + "] in the field [" + fieldName + "] is not a valid ipv4/ipv6 address" + ); + } else { + return byteArray; + } + } + + /** + * Resolve port for transport protocols + * @param document the ingesting document + * @param fieldName the port field to be resolved + * @return the resolved port number, null if the resolved port is null and ignoreMissing is true + * @throws IllegalArgumentException only if ignoreMissing is false and the field is null, empty, invalid, + * or if the field that is found at the provided path is not of the expected type. + */ + private Integer resolvePort(IngestDocument document, String fieldName) { + Integer port; + if (Strings.isNullOrEmpty(fieldName)) { + if (ignoreMissing) { + return null; + } else { + throw new IllegalArgumentException("both source port and destination port field path cannot be null nor empty"); + } + } else { + port = document.getFieldValue(fieldName, Integer.class, true); + } + + if (port == null) { + if (ignoreMissing) { + return null; + } else { + throw new IllegalArgumentException( + "both source port and destination port cannot be null, but port in the field path [" + fieldName + "] is null" + ); + } + } else if (port < MIN_PORT || port > MAX_PORT) { + throw new IllegalArgumentException( + "both source port and destination port must be between 0 and 65535, but port in the field path [" + + fieldName + + "] is [" + + port + + "]" + ); + } + return port; + } + + /** + * Resolve ICMP's message type and code field + * @param document the ingesting document + * @param fieldName name of the type or the code field + * @param fieldType type or code + * @return the resolved value of the specified field, return null if ignore_missing if true and the field doesn't exist or is null, + * @throws IllegalArgumentException only if ignoreMissing is false and the field is null, empty, invalid, + * or if the field that is found at the provided path is not of the expected type. + */ + private Integer resolveICMP(IngestDocument document, String fieldName, String fieldType) { + if (Strings.isNullOrEmpty(fieldName)) { + if (ignoreMissing) { + return null; + } else { + throw new IllegalArgumentException("icmp message " + fieldType + " field path cannot be null nor empty"); + } + } + Integer fieldValue = document.getFieldValue(fieldName, Integer.class, true); + if (fieldValue == null) { + if (ignoreMissing) { + return null; + } else { + throw new IllegalArgumentException("icmp message " + fieldType + " cannot be null"); + } + } else if (fieldValue < IANA_COMMON_MIN_NUMBER || fieldValue > IANA_COMMON_MAX_NUMBER) { + throw new IllegalArgumentException("invalid icmp message " + fieldType + " [" + fieldValue + "]"); + } else { + return fieldValue; + } + } + + /** + * + * @param protocolCode byte of the protocol number + * @param sourceIPByteArray bytes of the source ip in the network flow tuple + * @param destIPByteArray bytes of the destination ip in the network flow tuple + * @param sourcePort source port in the network flow tuple + * @param destinationPort destination port in the network flow tuple + * @param seed seed for generating hash + * @return the generated hash value, use SHA-1 + */ + private String generateCommunityIDHash( + byte protocolCode, + byte[] sourceIPByteArray, + byte[] destIPByteArray, + Integer sourcePort, + Integer destinationPort, + int seed + ) { + MessageDigest messageDigest = MessageDigests.sha1(); + messageDigest.update(intToTwoByteArray(seed)); + messageDigest.update(sourceIPByteArray); + messageDigest.update(destIPByteArray); + messageDigest.update(protocolCode); + messageDigest.update(PADDING_BYTE); + messageDigest.update(intToTwoByteArray(sourcePort)); + messageDigest.update(intToTwoByteArray(destinationPort)); + + return COMMUNITY_ID_HASH_VERSION + ":" + Base64.getEncoder().encodeToString(messageDigest.digest()); + } + + /** + * Convert an integer to two byte array + * @param val the integer which will be consumed to produce a two byte array + * @return the two byte array + */ + private byte[] intToTwoByteArray(Integer val) { + byte[] byteArray = new byte[2]; + byteArray[0] = Integer.valueOf(val >>> 8).byteValue(); + byteArray[1] = val.byteValue(); + return byteArray; + } + + /** + * Compare the ip and port, return true if the flow tuple is ordered + * @param sourceIPByteArray bytes of the source ip in the network flow tuple + * @param destIPByteArray bytes of the destination ip in the network flow tuple + * @param sourcePort source port in the network flow tuple + * @param destinationPort destination port in the network flow tuple + * @return true if sourceIP is less than destinationIP or sourceIP equals to destinationIP + * but sourcePort is less than destinationPort + */ + private boolean compareIPAndPort(byte[] sourceIPByteArray, int sourcePort, byte[] destIPByteArray, int destinationPort) { + int compareResult = compareByteArray(sourceIPByteArray, destIPByteArray); + return compareResult < 0 || compareResult == 0 && sourcePort < destinationPort; + } + + /** + * Compare two byte array which have same length + * @param byteArray1 the first byte array to compare + * @param byteArray2 the second byte array to compare + * @return 0 if each byte in both two arrays are same, a value less than 0 if byte in the first array is less than + * the byte at the same index, a value greater than 0 if byte in the first array is greater than the byte at the same index + */ + private int compareByteArray(byte[] byteArray1, byte[] byteArray2) { + assert (byteArray1.length == byteArray2.length); + int i = 0; + int j = 0; + while (i < byteArray1.length && j < byteArray2.length) { + int isLess = Byte.compareUnsigned(byteArray1[i], byteArray2[j]); + if (isLess == 0) { + i++; + j++; + } else { + return isLess; + } + } + return 0; + } + + /** + * Mapping ICMP's message type and code into a port-like notion for ordering the request or response + */ + enum ICMPType { + ECHO_REPLY((byte) 0, (byte) 8), + ECHO((byte) 8, (byte) 0), + RTR_ADVERT((byte) 9, (byte) 10), + RTR_SOLICIT((byte) 10, (byte) 9), + TSTAMP((byte) 13, (byte) 14), + TSTAMP_REPLY((byte) 14, (byte) 13), + INFO((byte) 15, (byte) 16), + INFO_REPLY((byte) 16, (byte) 15), + MASK((byte) 17, (byte) 18), + MASK_REPLY((byte) 18, (byte) 17); + + private final byte type; + private final byte code; + + ICMPType(byte type, byte code) { + this.type = type; + this.code = code; + } + + private static final Map<Byte, Byte> ICMPTypeMapper = Arrays.stream(values()).collect(Collectors.toMap(t -> t.type, t -> t.code)); + + /** + * Takes the message type of ICMP and derives equivalent message code + * @param type the message type of ICMP + * @return the equivalent message code + */ + public static Byte getEquivalentCode(int type) { + return ICMPTypeMapper.get(Integer.valueOf(type).byteValue()); + } + } + + /** + * Mapping IPv6-ICMP's message type and code into a port-like notion for ordering the request or response + */ + enum ICMPv6Type { + ECHO_REQUEST((byte) 128, (byte) 129), + ECHO_REPLY((byte) 129, (byte) 128), + MLD_LISTENER_QUERY((byte) 130, (byte) 131), + MLD_LISTENER_REPORT((byte) 131, (byte) 130), + ND_ROUTER_SOLICIT((byte) 133, (byte) 134), + ND_ROUTER_ADVERT((byte) 134, (byte) 133), + ND_NEIGHBOR_SOLICIT((byte) 135, (byte) 136), + ND_NEIGHBOR_ADVERT((byte) 136, (byte) 135), + WRU_REQUEST((byte) 139, (byte) 140), + WRU_REPLY((byte) 140, (byte) 139), + HAAD_REQUEST((byte) 144, (byte) 145), + HAAD_REPLY((byte) 145, (byte) 144); + + private final byte type; + private final byte code; + + ICMPv6Type(byte type, byte code) { + this.type = type; + this.code = code; + } + + private static final Map<Byte, Byte> ICMPTypeMapper = Arrays.stream(values()).collect(Collectors.toMap(t -> t.type, t -> t.code)); + + /** + * Takes the message type of IPv6-ICMP and derives equivalent message code + * @param type the message type of IPv6-ICMP + * @return the equivalent message code + */ + public static Byte getEquivalentCode(int type) { + return ICMPTypeMapper.get(Integer.valueOf(type).byteValue()); + } + } + + /** + * An enumeration of the supported network protocols + */ + enum Protocol { + ICMP((byte) 1, false), + TCP((byte) 6, true), + UDP((byte) 17, true), + ICMP_V6((byte) 58, false), + SCTP((byte) 132, true); + + private final byte protocolCode; + private final boolean isTransportProtocol; + + Protocol(int ianaNumber, boolean isTransportProtocol) { + this.protocolCode = Integer.valueOf(ianaNumber).byteValue(); + this.isTransportProtocol = isTransportProtocol; + } + + public static final Map<Byte, Protocol> protocolCodeMap = Arrays.stream(values()) + .collect(Collectors.toMap(Protocol::getProtocolCode, p -> p)); + + public static Protocol fromProtocolName(String protocolName) { + String name = protocolName.toUpperCase(Locale.ROOT); + if (name.equals("IPV6-ICMP")) { + return Protocol.ICMP_V6; + } + try { + return valueOf(name); + } catch (IllegalArgumentException e) { + return null; + } + } + + public byte getProtocolCode() { + return this.protocolCode; + } + + public boolean isTransportProtocol() { + return this.isTransportProtocol; + } + } + + public static class Factory implements Processor.Factory { + @Override + public CommunityIdProcessor create( + Map<String, Processor.Factory> registry, + String processorTag, + String description, + Map<String, Object> config + ) throws Exception { + String sourceIPField = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "source_ip_field"); + String sourcePortField = ConfigurationUtils.readOptionalStringProperty(TYPE, processorTag, config, "source_port_field"); + String destinationIPField = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "destination_ip_field"); + String destinationPortField = ConfigurationUtils.readOptionalStringProperty( + TYPE, + processorTag, + config, + "destination_port_field" + ); + String ianaProtocolNumberField = ConfigurationUtils.readOptionalStringProperty( + TYPE, + processorTag, + config, + "iana_protocol_number_field" + ); + String protocolField = ConfigurationUtils.readOptionalStringProperty(TYPE, processorTag, config, "protocol_field"); + String icmpTypeField = ConfigurationUtils.readOptionalStringProperty(TYPE, processorTag, config, "icmp_type_field"); + String icmpCodeField = ConfigurationUtils.readOptionalStringProperty(TYPE, processorTag, config, "icmp_code_field"); + int seed = ConfigurationUtils.readIntProperty(TYPE, processorTag, config, "seed", 0); + if (seed < MIN_SEED || seed > MAX_SEED) { + throw newConfigurationException(TYPE, processorTag, "seed", "seed must be between 0 and 65535"); + } + + String targetField = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "target_field", "community_id"); + boolean ignoreMissing = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "ignore_missing", false); + + return new CommunityIdProcessor( + processorTag, + description, + sourceIPField, + sourcePortField, + destinationIPField, + destinationPortField, + ianaProtocolNumberField, + protocolField, + icmpTypeField, + icmpCodeField, + seed, + targetField, + ignoreMissing + ); + } + } +} diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java index 7c1b4841122b0..0f8b248fd5af8 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java @@ -107,6 +107,8 @@ public Map<String, Processor.Factory> getProcessors(Processor.Parameters paramet processors.put(HtmlStripProcessor.TYPE, new HtmlStripProcessor.Factory()); processors.put(CsvProcessor.TYPE, new CsvProcessor.Factory()); processors.put(CopyProcessor.TYPE, new CopyProcessor.Factory(parameters.scriptService)); + processors.put(RemoveByPatternProcessor.TYPE, new RemoveByPatternProcessor.Factory()); + processors.put(CommunityIdProcessor.TYPE, new CommunityIdProcessor.Factory()); return Collections.unmodifiableMap(processors); } diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveByPatternProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveByPatternProcessor.java new file mode 100644 index 0000000000000..da87f5201db72 --- /dev/null +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveByPatternProcessor.java @@ -0,0 +1,180 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ingest.common; + +import org.opensearch.common.Nullable; +import org.opensearch.common.ValidationException; +import org.opensearch.common.regex.Regex; +import org.opensearch.core.common.Strings; +import org.opensearch.ingest.AbstractProcessor; +import org.opensearch.ingest.ConfigurationUtils; +import org.opensearch.ingest.IngestDocument; +import org.opensearch.ingest.Processor; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.opensearch.ingest.ConfigurationUtils.newConfigurationException; + +/** + * Processor that removes existing fields by field patterns or excluding field patterns. + */ +public final class RemoveByPatternProcessor extends AbstractProcessor { + + public static final String TYPE = "remove_by_pattern"; + private final List<String> fieldPatterns; + private final List<String> excludeFieldPatterns; + + RemoveByPatternProcessor( + String tag, + String description, + @Nullable List<String> fieldPatterns, + @Nullable List<String> excludeFieldPatterns + ) { + super(tag, description); + if (fieldPatterns != null && excludeFieldPatterns != null || fieldPatterns == null && excludeFieldPatterns == null) { + throw new IllegalArgumentException("either fieldPatterns and excludeFieldPatterns must be set"); + } + if (fieldPatterns == null) { + this.fieldPatterns = null; + this.excludeFieldPatterns = new ArrayList<>(excludeFieldPatterns); + } else { + this.fieldPatterns = new ArrayList<>(fieldPatterns); + this.excludeFieldPatterns = null; + } + } + + public List<String> getFieldPatterns() { + return fieldPatterns; + } + + public List<String> getExcludeFieldPatterns() { + return excludeFieldPatterns; + } + + @Override + public IngestDocument execute(IngestDocument document) { + Set<String> existingFields = new HashSet<>(document.getSourceAndMetadata().keySet()); + Set<String> metadataFields = document.getMetadata() + .keySet() + .stream() + .map(IngestDocument.Metadata::getFieldName) + .collect(Collectors.toSet()); + + if (fieldPatterns != null && !fieldPatterns.isEmpty()) { + existingFields.forEach(field -> { + // ignore metadata fields such as _index, _id, etc. + if (!metadataFields.contains(field)) { + final boolean matched = fieldPatterns.stream().anyMatch(pattern -> Regex.simpleMatch(pattern, field)); + if (matched) { + document.removeField(field); + } + } + }); + } + + if (excludeFieldPatterns != null && !excludeFieldPatterns.isEmpty()) { + existingFields.forEach(field -> { + // ignore metadata fields such as _index, _id, etc. + if (!metadataFields.contains(field)) { + final boolean matched = excludeFieldPatterns.stream().anyMatch(pattern -> Regex.simpleMatch(pattern, field)); + if (!matched) { + document.removeField(field); + } + } + }); + } + + return document; + } + + @Override + public String getType() { + return TYPE; + } + + public static final class Factory implements Processor.Factory { + + public Factory() {} + + @Override + public RemoveByPatternProcessor create( + Map<String, Processor.Factory> registry, + String processorTag, + String description, + Map<String, Object> config + ) throws Exception { + final List<String> fieldPatterns = new ArrayList<>(); + final List<String> excludeFieldPatterns = new ArrayList<>(); + final Object fieldPattern = ConfigurationUtils.readOptionalObject(config, "field_pattern"); + final Object excludeFieldPattern = ConfigurationUtils.readOptionalObject(config, "exclude_field_pattern"); + + if (fieldPattern == null && excludeFieldPattern == null || fieldPattern != null && excludeFieldPattern != null) { + throw newConfigurationException( + TYPE, + processorTag, + "field_pattern", + "either field_pattern or exclude_field_pattern must be set" + ); + } + + if (fieldPattern != null) { + if (fieldPattern instanceof List) { + @SuppressWarnings("unchecked") + List<String> fieldPatternList = (List<String>) fieldPattern; + fieldPatterns.addAll(fieldPatternList); + } else { + fieldPatterns.add((String) fieldPattern); + } + validateFieldPatterns(processorTag, fieldPatterns, "field_pattern"); + return new RemoveByPatternProcessor(processorTag, description, fieldPatterns, null); + } else { + if (excludeFieldPattern instanceof List) { + @SuppressWarnings("unchecked") + List<String> excludeFieldPatternList = (List<String>) excludeFieldPattern; + excludeFieldPatterns.addAll(excludeFieldPatternList); + } else { + excludeFieldPatterns.add((String) excludeFieldPattern); + } + validateFieldPatterns(processorTag, excludeFieldPatterns, "exclude_field_pattern"); + return new RemoveByPatternProcessor(processorTag, description, null, excludeFieldPatterns); + } + } + + private void validateFieldPatterns(String processorTag, List<String> patterns, String patternKey) { + List<String> validationErrors = new ArrayList<>(); + for (String fieldPattern : patterns) { + if (fieldPattern.contains("#")) { + validationErrors.add(patternKey + " [" + fieldPattern + "] must not contain a '#'"); + } + if (fieldPattern.contains(":")) { + validationErrors.add(patternKey + " [" + fieldPattern + "] must not contain a ':'"); + } + if (fieldPattern.startsWith("_")) { + validationErrors.add(patternKey + " [" + fieldPattern + "] must not start with '_'"); + } + if (Strings.validFileNameExcludingAstrix(fieldPattern) == false) { + validationErrors.add( + patternKey + " [" + fieldPattern + "] must not contain the following characters " + Strings.INVALID_FILENAME_CHARS + ); + } + } + + if (validationErrors.size() > 0) { + ValidationException validationException = new ValidationException(); + validationException.addValidationErrors(validationErrors); + throw newConfigurationException(TYPE, processorTag, patternKey, validationException.getMessage()); + } + } + } +} diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveProcessor.java index d01dce02fca31..e6d151aec9be1 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveProcessor.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveProcessor.java @@ -72,7 +72,7 @@ public final class RemoveProcessor extends AbstractProcessor { ) { super(tag, description); if (fields == null && excludeFields == null || fields != null && excludeFields != null) { - throw new IllegalArgumentException("ether fields and excludeFields must be set"); + throw new IllegalArgumentException("either fields or excludeFields must be set"); } if (fields != null) { this.fields = new ArrayList<>(fields); @@ -188,7 +188,7 @@ public RemoveProcessor create( final Object excludeField = ConfigurationUtils.readOptionalObject(config, "exclude_field"); if (field == null && excludeField == null || field != null && excludeField != null) { - throw newConfigurationException(TYPE, processorTag, "field", "ether field or exclude_field must be set"); + throw newConfigurationException(TYPE, processorTag, "field", "either field or exclude_field must be set"); } boolean ignoreMissing = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "ignore_missing", false); diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CommunityIdProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CommunityIdProcessorFactoryTests.java new file mode 100644 index 0000000000000..5edb44b8c64f2 --- /dev/null +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CommunityIdProcessorFactoryTests.java @@ -0,0 +1,117 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ingest.common; + +import org.opensearch.OpenSearchException; +import org.opensearch.OpenSearchParseException; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.CoreMatchers.equalTo; + +public class CommunityIdProcessorFactoryTests extends OpenSearchTestCase { + private CommunityIdProcessor.Factory factory; + + @Before + public void init() { + factory = new CommunityIdProcessor.Factory(); + } + + public void testCreate() throws Exception { + boolean ignoreMissing = randomBoolean(); + int seed = randomIntBetween(0, 65535); + Map<String, Object> config = new HashMap<>(); + config.put("source_ip_field", "source_ip"); + config.put("source_port_field", "source_port"); + config.put("destination_ip_field", "destination_ip"); + config.put("destination_port_field", "destination_port"); + config.put("iana_protocol_number_field", "iana_protocol_number"); + config.put("protocol_field", "protocol"); + config.put("icmp_type_field", "icmp_type"); + config.put("icmp_code_field", "icmp_code"); + config.put("seed", seed); + config.put("target_field", "community_id_hash"); + config.put("ignore_missing", ignoreMissing); + String processorTag = randomAlphaOfLength(10); + CommunityIdProcessor communityIDProcessor = factory.create(null, processorTag, null, config); + assertThat(communityIDProcessor.getTag(), equalTo(processorTag)); + assertThat(communityIDProcessor.getSourceIPField(), equalTo("source_ip")); + assertThat(communityIDProcessor.getSourcePortField(), equalTo("source_port")); + assertThat(communityIDProcessor.getDestinationIPField(), equalTo("destination_ip")); + assertThat(communityIDProcessor.getDestinationPortField(), equalTo("destination_port")); + assertThat(communityIDProcessor.getIANAProtocolNumberField(), equalTo("iana_protocol_number")); + assertThat(communityIDProcessor.getProtocolField(), equalTo("protocol")); + assertThat(communityIDProcessor.getIcmpTypeField(), equalTo("icmp_type")); + assertThat(communityIDProcessor.getIcmpCodeField(), equalTo("icmp_code")); + assertThat(communityIDProcessor.getSeed(), equalTo(seed)); + assertThat(communityIDProcessor.getTargetField(), equalTo("community_id_hash")); + assertThat(communityIDProcessor.isIgnoreMissing(), equalTo(ignoreMissing)); + } + + public void testCreateWithSourceIPField() throws Exception { + Map<String, Object> config = new HashMap<>(); + try { + factory.create(null, null, null, config); + fail("factory create should have failed"); + } catch (OpenSearchParseException e) { + assertThat(e.getMessage(), equalTo("[source_ip_field] required property is missing")); + } + + config.put("source_ip_field", null); + try { + factory.create(null, null, null, config); + fail("factory create should have failed"); + } catch (OpenSearchParseException e) { + assertThat(e.getMessage(), equalTo("[source_ip_field] required property is missing")); + } + } + + public void testCreateWithDestinationIPField() throws Exception { + Map<String, Object> config = new HashMap<>(); + config.put("source_ip_field", "source_ip"); + try { + factory.create(null, null, null, config); + fail("factory create should have failed"); + } catch (OpenSearchParseException e) { + assertThat(e.getMessage(), equalTo("[destination_ip_field] required property is missing")); + } + + config.put("source_ip_field", "source_ip"); + config.put("destination_ip_field", null); + try { + factory.create(null, null, null, config); + fail("factory create should have failed"); + } catch (OpenSearchParseException e) { + assertThat(e.getMessage(), equalTo("[destination_ip_field] required property is missing")); + } + } + + public void testInvalidSeed() throws Exception { + Map<String, Object> config = new HashMap<>(); + int seed; + if (randomBoolean()) { + seed = -1; + } else { + seed = 65536; + } + config.put("source_ip_field", "source_ip"); + config.put("destination_ip_field", "destination_ip"); + config.put("seed", seed); + try { + factory.create(null, null, null, config); + fail("factory create should have failed"); + } catch (OpenSearchException e) { + assertThat(e.getMessage(), equalTo("[seed] seed must be between 0 and 65535")); + } + } + +} diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CommunityIdProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CommunityIdProcessorTests.java new file mode 100644 index 0000000000000..2bda9db80dbcc --- /dev/null +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CommunityIdProcessorTests.java @@ -0,0 +1,910 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ingest.common; + +import org.opensearch.ingest.IngestDocument; +import org.opensearch.ingest.Processor; +import org.opensearch.ingest.RandomDocumentPicks; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +public class CommunityIdProcessorTests extends OpenSearchTestCase { + + public void testResolveProtocol() throws Exception { + Map<String, Object> source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + source.put("source_port", 1000); + source.put("destination_port", 2000); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), source); + + String targetFieldName = randomAlphaOfLength(100); + boolean ignore_missing = randomBoolean(); + Processor processor = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + null, + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + ignore_missing + ); + if (ignore_missing) { + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetFieldName), equalTo(false)); + } else { + assertThrows( + "cannot resolve protocol by neither iana protocol number field [iana_protocol_number] nor protocol name field [protocol]", + IllegalArgumentException.class, + () -> processor.execute(ingestDocument) + ); + } + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + source.put("source_port", 1000); + source.put("destination_port", 2000); + String protocol = randomAlphaOfLength(10); + source.put("protocol", protocol); + IngestDocument ingestDocumentWithProtocol = RandomDocumentPicks.randomIngestDocument(random(), source); + Processor processorWithProtocol = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + randomBoolean() + ); + assertThrows( + "unsupported protocol [" + protocol + "]", + IllegalArgumentException.class, + () -> processorWithProtocol.execute(ingestDocumentWithProtocol) + ); + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + source.put("source_port", 1000); + source.put("destination_port", 2000); + int ianaProtocolNumber = randomIntBetween(1000, 10000); + source.put("iana_protocol_number", ianaProtocolNumber); + IngestDocument ingestDocumentWithProtocolNumber = RandomDocumentPicks.randomIngestDocument(random(), source); + + Processor processorWithProtocolNumber = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + null, + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + randomBoolean() + ); + assertThrows( + "unsupported iana protocol number [" + ianaProtocolNumber + "]", + IllegalArgumentException.class, + () -> processorWithProtocolNumber.execute(ingestDocumentWithProtocolNumber) + ); + } + + public void testResolveIPAndPort() throws Exception { + Map<String, Object> source = new HashMap<>(); + source.put("source_ip", ""); + source.put("destination_ip", "2.2.2.2"); + source.put("source_port", 1000); + source.put("destination_port", 2000); + source.put("protocol", "tcp"); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), source); + + String targetFieldName = randomAlphaOfLength(100); + boolean ignore_missing = randomBoolean(); + Processor processor = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + null, + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + ignore_missing + ); + if (ignore_missing) { + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetFieldName), equalTo(false)); + } else { + assertThrows( + "ip address in the field [source_ip] is null or empty", + IllegalArgumentException.class, + () -> processor.execute(ingestDocument) + ); + } + + source = new HashMap<>(); + source.put("source_ip", "1.1.1"); + source.put("destination_ip", "2.2.2.2"); + source.put("source_port", 1000); + source.put("destination_port", 2000); + source.put("protocol", "tcp"); + IngestDocument ingestDocumentWithInvalidSourceIP = RandomDocumentPicks.randomIngestDocument(random(), source); + Processor processorWithInvalidSourceIP = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + randomBoolean() + ); + + assertThrows( + "ip address in the field [source_ip] is not a valid ipv4/ipv6 address", + IllegalArgumentException.class, + () -> processorWithInvalidSourceIP.execute(ingestDocumentWithInvalidSourceIP) + ); + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", ""); + source.put("source_port", 1000); + source.put("destination_port", 2000); + source.put("protocol", "tcp"); + ignore_missing = randomBoolean(); + IngestDocument ingestDocumentWithEmptyDestIP = RandomDocumentPicks.randomIngestDocument(random(), source); + Processor processorWithEmptyDestIP = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + ignore_missing + ); + if (ignore_missing) { + processorWithEmptyDestIP.execute(ingestDocumentWithEmptyDestIP); + assertThat(ingestDocumentWithEmptyDestIP.hasField(targetFieldName), equalTo(false)); + } else { + assertThrows( + "ip address in the field [destination_ip] is null or empty", + IllegalArgumentException.class, + () -> processorWithEmptyDestIP.execute(ingestDocumentWithEmptyDestIP) + ); + } + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2"); + source.put("source_port", 1000); + source.put("destination_port", 2000); + source.put("protocol", "tcp"); + IngestDocument ingestDocumentWithInvalidDestIP = RandomDocumentPicks.randomIngestDocument(random(), source); + Processor processorWithInvalidDestIP = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + randomBoolean() + ); + assertThrows( + "ip address in the field [destination_ip] is not a valid ipv4/ipv6 address", + IllegalArgumentException.class, + () -> processorWithInvalidDestIP.execute(ingestDocumentWithInvalidDestIP) + ); + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + source.put("source_port", 1000); + source.put("destination_port", 2000); + source.put("protocol", "tcp"); + ignore_missing = randomBoolean(); + IngestDocument normalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), source); + Processor processorWithEmptySourceIPFieldPath = createCommunityIdProcessor( + "", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + ignore_missing + ); + if (ignore_missing) { + processorWithEmptySourceIPFieldPath.execute(normalIngestDocument); + assertThat(normalIngestDocument.hasField(targetFieldName), equalTo(false)); + } else { + assertThrows( + "both source ip field path and destination ip field path cannot be null nor empty", + IllegalArgumentException.class, + () -> processorWithEmptySourceIPFieldPath.execute(normalIngestDocument) + ); + } + ignore_missing = randomBoolean(); + Processor processorWithEmptyDestIPFieldPath = createCommunityIdProcessor( + "source_ip", + "source_port", + "", + "destination_port", + "iana_protocol_number", + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + ignore_missing + ); + if (ignore_missing) { + processorWithEmptyDestIPFieldPath.execute(normalIngestDocument); + assertThat(normalIngestDocument.hasField(targetFieldName), equalTo(false)); + } else { + assertThrows( + "both source ip field path and destination ip field path cannot be null nor empty", + IllegalArgumentException.class, + () -> processorWithEmptyDestIPFieldPath.execute(normalIngestDocument) + ); + } + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + source.put("source_port", null); + source.put("destination_port", 2000); + source.put("protocol", "tcp"); + ignore_missing = randomBoolean(); + IngestDocument ingestDocumentWithEmptySourcePort = RandomDocumentPicks.randomIngestDocument(random(), source); + Processor processorWithEmptySourcePort = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + ignore_missing + ); + if (ignore_missing) { + processorWithEmptySourcePort.execute(ingestDocumentWithEmptySourcePort); + assertThat(ingestDocumentWithEmptySourcePort.hasField(targetFieldName), equalTo(false)); + } else { + assertThrows( + "both source port and destination port field path cannot be null nor empty", + IllegalArgumentException.class, + () -> processorWithEmptySourcePort.execute(ingestDocumentWithEmptySourcePort) + ); + } + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + source.put("source_port", 65536); + source.put("destination_port", 2000); + source.put("protocol", "tcp"); + IngestDocument ingestDocumentWithInvalidSourcePort = RandomDocumentPicks.randomIngestDocument(random(), source); + Processor processorWithInvalidSourcePort = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + randomBoolean() + ); + assertThrows( + "both source port and destination port must be between 0 and 65535, but port in the field path [source_port] is [65536]", + IllegalArgumentException.class, + () -> processorWithInvalidSourcePort.execute(ingestDocumentWithInvalidSourcePort) + ); + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + source.put("source_port", 1000); + source.put("destination_port", null); + source.put("protocol", "tcp"); + ignore_missing = randomBoolean(); + IngestDocument ingestDocumentWithEmptyDestPort = RandomDocumentPicks.randomIngestDocument(random(), source); + Processor processorWithEmptyDestPort = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + ignore_missing + ); + if (ignore_missing) { + processorWithEmptyDestPort.execute(ingestDocumentWithEmptyDestPort); + assertThat(ingestDocumentWithEmptyDestPort.hasField(targetFieldName), equalTo(false)); + } else { + assertThrows( + "both source port and destination port cannot be null, but port in the field path [destination_port] is null", + IllegalArgumentException.class, + () -> processorWithEmptyDestPort.execute(ingestDocumentWithEmptyDestPort) + ); + } + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + source.put("source_port", 1000); + source.put("destination_port", -1); + source.put("protocol", "tcp"); + IngestDocument ingestDocumentWithInvalidDestPort = RandomDocumentPicks.randomIngestDocument(random(), source); + Processor processorWithInvalidDestPort = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + randomBoolean() + ); + assertThrows( + "both source port and destination port cannot be null, but port in the field path [destination_port] is [-1]", + IllegalArgumentException.class, + () -> processorWithInvalidDestPort.execute(ingestDocumentWithInvalidDestPort) + ); + } + + public void testResolveICMPTypeAndCode() throws Exception { + Map<String, Object> source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + int protocolNumber = randomFrom(1, 58); + source.put("iana_protocol_number", protocolNumber); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), source); + String targetFieldName = randomAlphaOfLength(100); + boolean ignoreMissing = randomBoolean(); + Processor processor = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + null, + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + ignoreMissing + ); + if (ignoreMissing) { + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetFieldName), equalTo(false)); + } else { + assertThrows( + "icmp message type field path cannot be null nor empty", + IllegalArgumentException.class, + () -> processor.execute(ingestDocument) + ); + } + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + protocolNumber = randomFrom(1, 58); + source.put("iana_protocol_number", protocolNumber); + source.put("icmp_type", null); + IngestDocument ingestDocumentWithNullType = RandomDocumentPicks.randomIngestDocument(random(), source); + ignoreMissing = randomBoolean(); + Processor processorWithNullType = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + "protocol", + "icmp_type", + null, + randomIntBetween(0, 65535), + targetFieldName, + ignoreMissing + ); + if (ignoreMissing) { + processorWithNullType.execute(ingestDocumentWithNullType); + assertThat(ingestDocumentWithNullType.hasField(targetFieldName), equalTo(false)); + } else { + assertThrows( + "icmp message type cannot be null nor empty", + IllegalArgumentException.class, + () -> processorWithNullType.execute(ingestDocumentWithNullType) + ); + } + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + protocolNumber = randomFrom(1, 58); + source.put("iana_protocol_number", protocolNumber); + int icmpType; + if (randomBoolean()) { + icmpType = randomIntBetween(256, 1000); + } else { + icmpType = randomIntBetween(-100, -1); + } + source.put("icmp_type", icmpType); + IngestDocument ingestDocumentWithInvalidICMPType = RandomDocumentPicks.randomIngestDocument(random(), source); + Processor processorWithInvalidICMPType = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + "protocol", + "icmp_type", + null, + randomIntBetween(0, 65535), + targetFieldName, + false + ); + assertThrows( + "invalid icmp message type [" + icmpType + "]", + IllegalArgumentException.class, + () -> processorWithInvalidICMPType.execute(ingestDocumentWithInvalidICMPType) + ); + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + protocolNumber = randomFrom(1, 58); + source.put("iana_protocol_number", protocolNumber); + if (protocolNumber == 1) { + icmpType = randomIntBetween(3, 6); + } else { + icmpType = randomIntBetween(146, 161); + } + source.put("icmp_type", icmpType); + IngestDocument ingestDocumentWithNoCode = RandomDocumentPicks.randomIngestDocument(random(), source); + ignoreMissing = randomBoolean(); + Processor processorWithNoCode = createCommunityIdProcessor( + "source_ip", + null, + "destination_ip", + null, + "iana_protocol_number", + "protocol", + "icmp_type", + null, + randomIntBetween(0, 65535), + targetFieldName, + ignoreMissing + ); + if (ignoreMissing) { + processorWithNoCode.execute(ingestDocumentWithNoCode); + assertThat(ingestDocumentWithNoCode.hasField(targetFieldName), equalTo(false)); + } else { + assertThrows( + "icmp message code field path cannot be null nor empty", + IllegalArgumentException.class, + () -> processorWithNoCode.execute(ingestDocumentWithNoCode) + ); + } + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + protocolNumber = randomFrom(1, 58); + source.put("iana_protocol_number", protocolNumber); + if (protocolNumber == 1) { + icmpType = randomIntBetween(3, 6); + } else { + icmpType = randomIntBetween(146, 161); + } + source.put("icmp_type", icmpType); + source.put("icmp_code", null); + IngestDocument ingestDocumentWithNullCode = RandomDocumentPicks.randomIngestDocument(random(), source); + ignoreMissing = randomBoolean(); + Processor processorWithNullCode = createCommunityIdProcessor( + "source_ip", + null, + "destination_ip", + null, + "iana_protocol_number", + "protocol", + "icmp_type", + "icmp_code", + randomIntBetween(0, 65535), + targetFieldName, + ignoreMissing + ); + if (ignoreMissing) { + processorWithNullCode.execute(ingestDocumentWithNullCode); + assertThat(ingestDocumentWithNullCode.hasField(targetFieldName), equalTo(false)); + } else { + assertThrows( + "icmp message code cannot be null nor empty", + IllegalArgumentException.class, + () -> processorWithNullCode.execute(ingestDocumentWithNullCode) + ); + } + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + protocolNumber = randomFrom(1, 58); + source.put("iana_protocol_number", protocolNumber); + if (protocolNumber == 1) { + icmpType = randomIntBetween(3, 6); + } else { + icmpType = randomIntBetween(146, 161); + } + source.put("icmp_type", icmpType); + int icmpCode; + if (randomBoolean()) { + icmpCode = randomIntBetween(256, 1000); + } else { + icmpCode = randomIntBetween(-100, -1); + } + source.put("icmp_code", icmpCode); + IngestDocument ingestDocumentWithInvalidCode = RandomDocumentPicks.randomIngestDocument(random(), source); + Processor processorWithInvalidCode = createCommunityIdProcessor( + "source_ip", + null, + "destination_ip", + null, + "iana_protocol_number", + null, + "icmp_type", + "icmp_code", + randomIntBetween(0, 65535), + targetFieldName, + randomBoolean() + ); + assertThrows( + "invalid icmp message code [" + icmpCode + "]", + IllegalArgumentException.class, + () -> processorWithInvalidCode.execute(ingestDocumentWithInvalidCode) + ); + } + + public void testTransportProtocols() throws Exception { + Map<String, Object> source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + source.put("source_port", 1000); + source.put("destination_port", 2000); + boolean isProtocolNameSpecified = randomBoolean(); + if (isProtocolNameSpecified) { + source.put("protocol", randomFrom("tcp", "udp", "sctp")); + } else { + source.put("iana_number", randomFrom(6, 17, 132)); + } + + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), source); + + String targetFieldName = randomAlphaOfLength(100); + Processor processor; + if (isProtocolNameSpecified) { + processor = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + null, + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + randomBoolean() + ); + } else { + processor = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_number", + null, + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + randomBoolean() + ); + } + + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetFieldName), equalTo(true)); + String communityIDHash = ingestDocument.getFieldValue(targetFieldName, String.class); + assertThat(communityIDHash.startsWith("1:"), equalTo(true)); + } + + public void testICMP() throws Exception { + Map<String, Object> source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + boolean isICMP = randomBoolean(); + if (isICMP) { + source.put("protocol", "icmp"); + source.put("type", randomFrom(0, 8, 9, 10, 13, 15, 17, 18)); + } else { + source.put("protocol", "ipv6-icmp"); + source.put("type", randomFrom(128, 129, 130, 131, 133, 134, 135, 136, 139, 140, 144, 145)); + } + + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), source); + + String targetFieldName = randomAlphaOfLength(100); + Processor processor = createCommunityIdProcessor( + "source_ip", + null, + "destination_ip", + null, + null, + "protocol", + "type", + null, + randomIntBetween(0, 65535), + targetFieldName, + randomBoolean() + ); + + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetFieldName), equalTo(true)); + assertThat(ingestDocument.getFieldValue(targetFieldName, String.class).startsWith("1:"), equalTo(true)); + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + isICMP = randomBoolean(); + if (isICMP) { + source.put("protocol", "icmp"); + // see https://www.iana.org/assignments/icmp-parameters/icmp-parameters.xhtml#icmp-parameters-codes-5 + source.put("type", randomIntBetween(3, 6)); + source.put("code", 0); + } else { + source.put("protocol", "ipv6-icmp"); + // see https://www.iana.org/assignments/icmpv6-parameters/icmpv6-parameters.xhtml#icmpv6-parameters-codes-23 + source.put("type", randomIntBetween(146, 161)); + source.put("code", 0); + } + + IngestDocument ingestDocumentWithOnewayFlow = RandomDocumentPicks.randomIngestDocument(random(), source); + + targetFieldName = randomAlphaOfLength(100); + Processor processorWithOnewayFlow = createCommunityIdProcessor( + "source_ip", + null, + "destination_ip", + null, + null, + "protocol", + "type", + "code", + randomIntBetween(0, 65535), + targetFieldName, + randomBoolean() + ); + + processorWithOnewayFlow.execute(ingestDocumentWithOnewayFlow); + assertThat(ingestDocumentWithOnewayFlow.hasField(targetFieldName), equalTo(true)); + assertThat(ingestDocumentWithOnewayFlow.getFieldValue(targetFieldName, String.class).startsWith("1:"), equalTo(true)); + } + + // test that the hash result is consistent with the known value + public void testHashResult() throws Exception { + int index = randomIntBetween(0, CommunityIdHashInstance.values().length - 1); + CommunityIdHashInstance instance = CommunityIdHashInstance.values()[index]; + final boolean isTransportProtocol = instance.name().equals("TCP") + || instance.name().equals("UDP") + || instance.name().equals("SCTP"); + Map<String, Object> source = new HashMap<>(); + source.put("source_ip", instance.getSourceIp()); + source.put("destination_ip", instance.getDestIP()); + if (isTransportProtocol) { + source.put("source_port", instance.getSourcePort()); + source.put("destination_port", instance.getDestPort()); + source.put("iana_number", instance.getProtocolNumber()); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), source); + + String targetFieldName = randomAlphaOfLength(100); + boolean ignore_missing = randomBoolean(); + Processor processor = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_number", + null, + null, + null, + 0, + targetFieldName, + ignore_missing + ); + + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetFieldName), equalTo(true)); + assertThat(ingestDocument.getFieldValue(targetFieldName, String.class), equalTo(instance.getHash())); + + // test the flow tuple in reversed direction, the hash result should be the same value + source = new HashMap<>(); + source.put("source_ip", instance.getDestIP()); + source.put("destination_ip", instance.getSourceIp()); + source.put("source_port", instance.getDestPort()); + source.put("destination_port", instance.getSourcePort()); + source.put("iana_number", instance.getProtocolNumber()); + IngestDocument ingestDocumentWithReversedDirection = RandomDocumentPicks.randomIngestDocument(random(), source); + + targetFieldName = randomAlphaOfLength(100); + Processor processorWithReversedDirection = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_number", + null, + null, + null, + 0, + targetFieldName, + randomBoolean() + ); + + processorWithReversedDirection.execute(ingestDocumentWithReversedDirection); + assertThat(ingestDocumentWithReversedDirection.hasField(targetFieldName), equalTo(true)); + assertThat(ingestDocumentWithReversedDirection.getFieldValue(targetFieldName, String.class), equalTo(instance.getHash())); + } else { + source.put("type", instance.getSourcePort()); + source.put("code", instance.getDestPort()); + source.put("iana_number", instance.getProtocolNumber()); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), source); + + String targetFieldName = randomAlphaOfLength(100); + boolean ignore_missing = randomBoolean(); + Processor processor = createCommunityIdProcessor( + "source_ip", + null, + "destination_ip", + null, + "iana_number", + null, + "type", + "code", + 0, + targetFieldName, + ignore_missing + ); + + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetFieldName), equalTo(true)); + assertThat(ingestDocument.getFieldValue(targetFieldName, String.class), equalTo(instance.getHash())); + } + } + + private enum CommunityIdHashInstance { + TCP("66.35.250.204", "128.232.110.120", 6, 80, 34855, "1:LQU9qZlK+B5F3KDmev6m5PMibrg="), + UDP("8.8.8.8", "192.168.1.52", 17, 53, 54585, "1:d/FP5EW3wiY1vCndhwleRRKHowQ="), + SCTP("192.168.170.8", "192.168.170.56", 132, 7, 7, "1:MP2EtRCAUIZvTw6MxJHLV7N7JDs="), + ICMP("192.168.0.89", "192.168.0.1", 1, 8, 0, "1:X0snYXpgwiv9TZtqg64sgzUn6Dk="), + ICMP_V6("fe80::260:97ff:fe07:69ea", "ff02::1", 58, 134, 0, "1:pkvHqCL88/tg1k4cPigmZXUtL00="); + + private final String sourceIp; + private final String destIP; + private final int protocolNumber; + private final int sourcePort; + private final int destPort; + private final String hash; + + CommunityIdHashInstance(String sourceIp, String destIP, int protocolNumber, int sourcePort, int destPort, String hash) { + this.sourceIp = sourceIp; + this.destIP = destIP; + this.protocolNumber = protocolNumber; + this.sourcePort = sourcePort; + this.destPort = destPort; + this.hash = hash; + } + + private String getSourceIp() { + return this.sourceIp; + } + + private String getDestIP() { + return this.destIP; + } + + private int getProtocolNumber() { + return this.protocolNumber; + } + + private int getSourcePort() { + return this.sourcePort; + } + + private int getDestPort() { + return this.destPort; + } + + private String getHash() { + return this.hash; + } + } + + private static Processor createCommunityIdProcessor( + String sourceIPField, + String sourcePortField, + String destinationIPField, + String destinationPortField, + String ianaProtocolNumberField, + String protocolField, + String icmpTypeField, + String icmpCodeField, + int seed, + String targetField, + boolean ignoreMissing + ) { + return new CommunityIdProcessor( + randomAlphaOfLength(10), + null, + sourceIPField, + sourcePortField, + destinationIPField, + destinationPortField, + ianaProtocolNumberField, + protocolField, + icmpTypeField, + icmpCodeField, + seed, + targetField, + ignoreMissing + ); + } +} diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CopyProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CopyProcessorTests.java index f271bdd342d0b..3259ba85ef340 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CopyProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CopyProcessorTests.java @@ -14,6 +14,9 @@ import org.opensearch.ingest.TestTemplateService; import org.opensearch.test.OpenSearchTestCase; +import java.util.List; +import java.util.Map; + import static org.hamcrest.Matchers.equalTo; public class CopyProcessorTests extends OpenSearchTestCase { @@ -26,8 +29,7 @@ public void testCopyExistingField() throws Exception { processor.execute(ingestDocument); assertThat(ingestDocument.hasField(targetFieldName), equalTo(true)); Object sourceValue = ingestDocument.getFieldValue(sourceFieldName, Object.class); - assertThat(ingestDocument.getFieldValue(targetFieldName, Object.class), equalTo(sourceValue)); - assertThat(ingestDocument.getFieldValue(sourceFieldName, Object.class), equalTo(sourceValue)); + assertDeepCopiedObjectEquals(ingestDocument.getFieldValue(targetFieldName, Object.class), sourceValue); Processor processorWithEmptyTarget = createCopyProcessor(sourceFieldName, "", false, false, false); assertThrows( @@ -75,7 +77,7 @@ public void testCopyWithRemoveSource() throws Exception { Processor processor = createCopyProcessor(sourceFieldName, targetFieldName, false, true, false); processor.execute(ingestDocument); assertThat(ingestDocument.hasField(targetFieldName), equalTo(true)); - assertThat(ingestDocument.getFieldValue(targetFieldName, Object.class), equalTo(sourceValue)); + assertDeepCopiedObjectEquals(ingestDocument.getFieldValue(targetFieldName, Object.class), sourceValue); assertThat(ingestDocument.hasField(sourceFieldName), equalTo(false)); } @@ -97,12 +99,30 @@ public void testCopyToExistingField() throws Exception { Processor processorWithTargetNullValue = createCopyProcessor(sourceFieldName, targetFieldWithNullValue, false, false, false); processorWithTargetNullValue.execute(ingestDocument); assertThat(ingestDocument.hasField(targetFieldWithNullValue), equalTo(true)); - assertThat(ingestDocument.getFieldValue(targetFieldWithNullValue, Object.class), equalTo(sourceValue)); + assertDeepCopiedObjectEquals(ingestDocument.getFieldValue(targetFieldWithNullValue, Object.class), sourceValue); Processor processorWithOverrideTargetIsTrue = createCopyProcessor(sourceFieldName, targetFieldName, false, false, true); processorWithOverrideTargetIsTrue.execute(ingestDocument); assertThat(ingestDocument.hasField(targetFieldName), equalTo(true)); - assertThat(ingestDocument.getFieldValue(targetFieldName, Object.class), equalTo(sourceValue)); + assertDeepCopiedObjectEquals(ingestDocument.getFieldValue(targetFieldName, Object.class), sourceValue); + } + + @SuppressWarnings("unchecked") + private static void assertDeepCopiedObjectEquals(Object expected, Object actual) { + if (expected instanceof Map) { + Map<String, Object> expectedMap = (Map<String, Object>) expected; + Map<String, Object> actualMap = (Map<String, Object>) actual; + assertEquals(expectedMap.size(), actualMap.size()); + for (Map.Entry<String, Object> expectedEntry : expectedMap.entrySet()) { + assertDeepCopiedObjectEquals(expectedEntry.getValue(), actualMap.get(expectedEntry.getKey())); + } + } else if (expected instanceof List) { + assertArrayEquals(((List<?>) expected).toArray(), ((List<?>) actual).toArray()); + } else if (expected instanceof byte[]) { + assertArrayEquals((byte[]) expected, (byte[]) actual); + } else { + assertEquals(expected, actual); + } } private static Processor createCopyProcessor( diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveByPatternProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveByPatternProcessorFactoryTests.java new file mode 100644 index 0000000000000..09ba97ebb4595 --- /dev/null +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveByPatternProcessorFactoryTests.java @@ -0,0 +1,114 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ingest.common; + +import org.opensearch.OpenSearchException; +import org.opensearch.OpenSearchParseException; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.CoreMatchers.equalTo; + +public class RemoveByPatternProcessorFactoryTests extends OpenSearchTestCase { + + private RemoveByPatternProcessor.Factory factory; + + @Before + public void init() { + factory = new RemoveByPatternProcessor.Factory(); + } + + public void testCreateFieldPatterns() throws Exception { + Map<String, Object> config = new HashMap<>(); + config.put("field_pattern", "field1*"); + String processorTag = randomAlphaOfLength(10); + RemoveByPatternProcessor removeByPatternProcessor = factory.create(null, processorTag, null, config); + assertThat(removeByPatternProcessor.getTag(), equalTo(processorTag)); + assertThat(removeByPatternProcessor.getFieldPatterns().get(0), equalTo("field1*")); + + Map<String, Object> config2 = new HashMap<>(); + config2.put("field_pattern", List.of("field1*", "field2*")); + removeByPatternProcessor = factory.create(null, processorTag, null, config2); + assertThat(removeByPatternProcessor.getTag(), equalTo(processorTag)); + assertThat(removeByPatternProcessor.getFieldPatterns().get(0), equalTo("field1*")); + assertThat(removeByPatternProcessor.getFieldPatterns().get(1), equalTo("field2*")); + + Map<String, Object> config3 = new HashMap<>(); + List<String> patterns = Arrays.asList("foo*", "*", " ", ",", "#", ":", "_"); + config3.put("field_pattern", patterns); + Exception exception = expectThrows(OpenSearchParseException.class, () -> factory.create(null, processorTag, null, config3)); + assertThat( + exception.getMessage(), + equalTo( + "[field_pattern] Validation Failed: " + + "1: field_pattern [ ] must not contain the following characters [ , \", *, \\, <, |, ,, >, /, ?];" + + "2: field_pattern [,] must not contain the following characters [ , \", *, \\, <, |, ,, >, /, ?];" + + "3: field_pattern [#] must not contain a '#';" + + "4: field_pattern [:] must not contain a ':';" + + "5: field_pattern [_] must not start with '_';" + ) + ); + } + + public void testCreateExcludeFieldPatterns() throws Exception { + Map<String, Object> config = new HashMap<>(); + config.put("exclude_field_pattern", "field1*"); + String processorTag = randomAlphaOfLength(10); + RemoveByPatternProcessor removeByPatternProcessor = factory.create(null, processorTag, null, config); + assertThat(removeByPatternProcessor.getTag(), equalTo(processorTag)); + assertThat(removeByPatternProcessor.getExcludeFieldPatterns().get(0), equalTo("field1*")); + + Map<String, Object> config2 = new HashMap<>(); + config2.put("exclude_field_pattern", List.of("field1*", "field2*")); + removeByPatternProcessor = factory.create(null, processorTag, null, config2); + assertThat(removeByPatternProcessor.getTag(), equalTo(processorTag)); + assertThat(removeByPatternProcessor.getExcludeFieldPatterns().get(0), equalTo("field1*")); + assertThat(removeByPatternProcessor.getExcludeFieldPatterns().get(1), equalTo("field2*")); + + Map<String, Object> config3 = new HashMap<>(); + List<String> patterns = Arrays.asList("foo*", "*", " ", ",", "#", ":", "_"); + config3.put("exclude_field_pattern", patterns); + Exception exception = expectThrows(OpenSearchParseException.class, () -> factory.create(null, processorTag, null, config3)); + assertThat( + exception.getMessage(), + equalTo( + "[exclude_field_pattern] Validation Failed: " + + "1: exclude_field_pattern [ ] must not contain the following characters [ , \", *, \\, <, |, ,, >, /, ?];" + + "2: exclude_field_pattern [,] must not contain the following characters [ , \", *, \\, <, |, ,, >, /, ?];" + + "3: exclude_field_pattern [#] must not contain a '#';" + + "4: exclude_field_pattern [:] must not contain a ':';" + + "5: exclude_field_pattern [_] must not start with '_';" + ) + ); + } + + public void testCreatePatternsFailed() throws Exception { + Map<String, Object> config = new HashMap<>(); + config.put("field_pattern", List.of("foo*")); + config.put("exclude_field_pattern", List.of("bar*")); + String processorTag = randomAlphaOfLength(10); + OpenSearchException exception = expectThrows( + OpenSearchParseException.class, + () -> factory.create(null, processorTag, null, config) + ); + assertThat(exception.getMessage(), equalTo("[field_pattern] either field_pattern or exclude_field_pattern must be set")); + + Map<String, Object> config2 = new HashMap<>(); + config2.put("field_pattern", null); + config2.put("exclude_field_pattern", null); + + exception = expectThrows(OpenSearchParseException.class, () -> factory.create(null, processorTag, null, config2)); + assertThat(exception.getMessage(), equalTo("[field_pattern] either field_pattern or exclude_field_pattern must be set")); + } +} diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveByPatternProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveByPatternProcessorTests.java new file mode 100644 index 0000000000000..82ff93de1f44e --- /dev/null +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveByPatternProcessorTests.java @@ -0,0 +1,96 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ingest.common; + +import org.opensearch.ingest.IngestDocument; +import org.opensearch.ingest.Processor; +import org.opensearch.ingest.RandomDocumentPicks; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.ArrayList; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; + +public class RemoveByPatternProcessorTests extends OpenSearchTestCase { + + public void testRemoveWithFieldPatterns() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + ingestDocument.setFieldValue("foo_1", "value"); + ingestDocument.setFieldValue("foo_2", "value"); + ingestDocument.setFieldValue("bar_1", "value"); + ingestDocument.setFieldValue("bar_2", "value"); + List<String> fieldPatterns = new ArrayList<>(); + fieldPatterns.add("foo*"); + fieldPatterns.add("_index*"); + fieldPatterns.add("_id*"); + fieldPatterns.add("_version*"); + Processor processor = new RemoveByPatternProcessor(randomAlphaOfLength(10), null, fieldPatterns, null); + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField("foo_1"), equalTo(false)); + assertThat(ingestDocument.hasField("foo_2"), equalTo(false)); + assertThat(ingestDocument.hasField("bar_1"), equalTo(true)); + assertThat(ingestDocument.hasField("bar_2"), equalTo(true)); + assertThat(ingestDocument.hasField(IngestDocument.Metadata.INDEX.getFieldName()), equalTo(true)); + assertThat(ingestDocument.hasField(IngestDocument.Metadata.ID.getFieldName()), equalTo(true)); + assertThat(ingestDocument.hasField(IngestDocument.Metadata.VERSION.getFieldName()), equalTo(true)); + assertThat(ingestDocument.hasField(IngestDocument.Metadata.VERSION_TYPE.getFieldName()), equalTo(true)); + } + + public void testRemoveWithExcludeFieldPatterns() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + ingestDocument.setFieldValue("foo_1", "value"); + ingestDocument.setFieldValue("foo_2", "value"); + ingestDocument.setFieldValue("foo_3", "value"); + List<String> excludeFieldPatterns = new ArrayList<>(); + excludeFieldPatterns.add("foo_3*"); + Processor processorWithExcludeFieldsAndPatterns = new RemoveByPatternProcessor( + randomAlphaOfLength(10), + null, + null, + excludeFieldPatterns + ); + processorWithExcludeFieldsAndPatterns.execute(ingestDocument); + assertThat(ingestDocument.hasField("foo_1"), equalTo(false)); + assertThat(ingestDocument.hasField("foo_2"), equalTo(false)); + assertThat(ingestDocument.hasField("foo_3"), equalTo(true)); + assertThat(ingestDocument.hasField(IngestDocument.Metadata.INDEX.getFieldName()), equalTo(true)); + assertThat(ingestDocument.hasField(IngestDocument.Metadata.ID.getFieldName()), equalTo(true)); + assertThat(ingestDocument.hasField(IngestDocument.Metadata.VERSION.getFieldName()), equalTo(true)); + assertThat(ingestDocument.hasField(IngestDocument.Metadata.VERSION_TYPE.getFieldName()), equalTo(true)); + } + + public void testCreateRemoveByPatternProcessorWithBothFieldsAndExcludeFields() throws Exception { + assertThrows( + "either fieldPatterns and excludeFieldPatterns must be set", + IllegalArgumentException.class, + () -> new RemoveByPatternProcessor(randomAlphaOfLength(10), null, null, null) + ); + + final List<String> fieldPatterns; + if (randomBoolean()) { + fieldPatterns = new ArrayList<>(); + } else { + fieldPatterns = List.of("foo_1*"); + } + + final List<String> excludeFieldPatterns; + if (randomBoolean()) { + excludeFieldPatterns = new ArrayList<>(); + } else { + excludeFieldPatterns = List.of("foo_2*"); + } + + assertThrows( + "either fieldPatterns and excludeFieldPatterns must be set", + IllegalArgumentException.class, + () -> new RemoveByPatternProcessor(randomAlphaOfLength(10), null, fieldPatterns, excludeFieldPatterns) + ); + } +} diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorFactoryTests.java index 179aef2feac0c..6332eeafc387c 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorFactoryTests.java @@ -97,13 +97,13 @@ public void testCreateWithExcludeField() throws Exception { OpenSearchParseException.class, () -> factory.create(null, processorTag, null, config) ); - assertThat(exception.getMessage(), equalTo("[field] ether field or exclude_field must be set")); + assertThat(exception.getMessage(), equalTo("[field] either field or exclude_field must be set")); Map<String, Object> config2 = new HashMap<>(); config2.put("field", "field1"); config2.put("exclude_field", "field2"); exception = expectThrows(OpenSearchParseException.class, () -> factory.create(null, processorTag, null, config2)); - assertThat(exception.getMessage(), equalTo("[field] ether field or exclude_field must be set")); + assertThat(exception.getMessage(), equalTo("[field] either field or exclude_field must be set")); Map<String, Object> config6 = new HashMap<>(); config6.put("exclude_field", "exclude_field"); diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorTests.java index 78a3d36124d45..7fc1d3f2f0a3c 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorTests.java @@ -203,7 +203,7 @@ public void testRemoveMetadataField() throws Exception { public void testCreateRemoveProcessorWithBothFieldsAndExcludeFields() throws Exception { assertThrows( - "ether fields and excludeFields must be set", + "either fields or excludeFields must be set", IllegalArgumentException.class, () -> new RemoveProcessor(randomAlphaOfLength(10), null, null, null, false) ); @@ -223,7 +223,7 @@ public void testCreateRemoveProcessorWithBothFieldsAndExcludeFields() throws Exc } assertThrows( - "ether fields and excludeFields must be set", + "either fields or excludeFields must be set", IllegalArgumentException.class, () -> new RemoveProcessor(randomAlphaOfLength(10), null, fields, excludeFields, false) ); diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/10_basic.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/10_basic.yml index 0719082c887f2..2a816f0386667 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/10_basic.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/10_basic.yml @@ -53,3 +53,36 @@ nodes.info: {} - contains: { nodes.$cluster_manager.ingest.processors: { type: copy } } + +--- +"Remove_by_pattern processor exists": + - skip: + version: " - 2.11.99" + features: contains + reason: "remove_by_pattern processor was introduced in 2.12.0 and contains is a newly added assertion" + - do: + cluster.state: {} + + # Get cluster-manager node id + - set: { cluster_manager_node: cluster_manager } + + - do: + nodes.info: {} + + - contains: { nodes.$cluster_manager.ingest.processors: { type: remove_by_pattern } } + +--- +"Community_id processor exists": + - skip: + version: " - 2.12.99" + features: contains + reason: "community_id processor was introduced in 2.13.0 and contains is a newly added assertion" + - do: + cluster.state: {} + + # Get cluster-manager node id + - set: { cluster_manager_node: cluster_manager } + + - do: + nodes.info: {} + - contains: { nodes.$cluster_manager.ingest.processors: { type: community_id } } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/310_remove_by_pattern_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/310_remove_by_pattern_processor.yml new file mode 100644 index 0000000000000..397eb8f7b6033 --- /dev/null +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/310_remove_by_pattern_processor.yml @@ -0,0 +1,146 @@ +--- +teardown: + - do: + ingest.delete_pipeline: + id: "my_pipeline" + ignore: 404 + +--- +"Test creating remove_by_pattern processor failed": + - skip: + version: " - 2.11.99" + reason: "introduced in 2.12.0" + - do: + catch: /\[field\_pattern\] either field\_pattern or exclude\_field\_pattern must be set/ + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove_by_pattern" : { + "field_pattern" : "foo*", + "exclude_field_pattern" : "bar*" + } + } + ] + } + + - do: + catch: /\[field\_pattern\] either field\_pattern or exclude\_field\_pattern must be set/ + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove_by_pattern" : { + } + } + ] + } + +--- +"Test remove_by_pattern processor with field_pattern": + - skip: + version: " - 2.11.99" + reason: "introduced in 2.12.0" + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove_by_pattern" : { + "field_pattern" : ["foo*", "*a*b"] + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: { + foo1: "bar", + foo2: "bar", + zoo: "bar", + ab: "bar", + aabb: "bar" + } + + - do: + get: + index: test + id: 1 + - match: { _source: {zoo: "bar" }} + +--- +"Test remove_by_pattern processor with exclude_field_pattern": + - skip: + version: " - 2.11.99" + reason: "introduced in 2.12.0" + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove_by_pattern" : { + "exclude_field_pattern": ["foo*", "a*b*"] + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: { + foo1: "bar", + foo2: "bar", + bar: "zoo", + zoo: "bar", + ab: "bar", + aabb: "bar" + } + + - do: + get: + index: test + id: 1 + - match: { _source: { foo1: "bar", foo2: "bar", ab: "bar", aabb: "bar"}} + + +--- +"Test cannot remove metadata fields by remove_by_pattern processor": + - skip: + version: " - 2.11.99" + reason: "introduced in 2.12.0" + - do: + catch: /field\_pattern \[\_id\] must not start with \'\_\'\;/ + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove_by_pattern" : { + "field_pattern": "_id" + } + } + ] + } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/320_community_id_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/320_community_id_processor.yml new file mode 100644 index 0000000000000..6de5371bb49f7 --- /dev/null +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/320_community_id_processor.yml @@ -0,0 +1,370 @@ +--- +teardown: + - do: + ingest.delete_pipeline: + id: "1" + ignore: 404 + +--- +"Test creat community_id processor": + - skip: + version: " - 2.12.99" + reason: "introduced in 2.13" + - do: + catch: /\[source\_ip\_field\] required property is missing/ + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "community_id" : { + "destination_ip_field" : "dest" + } + } + ] + } + - do: + catch: /\[destination\_ip\_field\] required property is missing/ + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "community_id" : { + "source_ip_field" : "src" + } + } + ] + } + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "community_id" : { + "source_ip_field" : "source", + "destination_ip_field" : "dest", + "source_port_field" : "srcPort", + "destination_port_field" : "destPort", + "iana_protocol_number_field" : "iana_number", + "protocol_field" : "protocol", + "icmp_type_field" : "icmp", + "icmp_code_field" : "code", + "seed" : 0, + "target_field" : "community_id", + "ignore_missing" : false + } + } + ] + } + - match: { acknowledged: true } + +--- +"Test community_id processor with ignore_missing": + - skip: + version: " - 2.12.99" + reason: "introduced in 2.13" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "community_id" : { + "source_ip_field" : "source", + "destination_ip_field" : "dest", + "source_port_field" : "srcPort", + "destination_port_field" : "destPort", + "protocol_field" : "protocol" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /ip address in the field \[source\] is null or empty/ + index: + index: test + id: 1 + pipeline: "1" + body: { + dest: "1.1.1.1", + protocol: "tcp" + } + + - do: + catch: /ip address in the field \[dest\] is null or empty/ + index: + index: test + id: 1 + pipeline: "1" + body: { + source: "1.1.1.1", + protocol: "tcp" + } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "community_id" : { + "source_ip_field" : "source", + "destination_ip_field" : "dest", + "source_port_field" : "srcPort", + "destination_port_field" : "destPort", + "protocol_field" : "protocol", + "ignore_missing" : true + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + source: "1.1.1.1", + protocol: "tcp" + } + - do: + get: + index: test + id: 1 + - match: { _source: { source: "1.1.1.1", protocol: "tcp" } } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + dest: "2.2.2.2", + protocol: "tcp" + } + - do: + get: + index: test + id: 1 + - match: { _source: { dest: "2.2.2.2", protocol: "tcp" } } + +--- +"Test community_id processor for tcp": + - skip: + version: " - 2.12.99" + reason: "introduced in 2.13" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "community_id" : { + "source_ip_field" : "source", + "destination_ip_field" : "dest", + "source_port_field" : "srcPort", + "destination_port_field" : "destPort", + "protocol_field" : "protocol" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + source: "66.35.250.204", + dest: "128.232.110.120", + protocol: "tcp", + srcPort: 80, + destPort: 34855 + } + - do: + get: + index: test + id: 1 + - match: { _source.community_id: "1:LQU9qZlK+B5F3KDmev6m5PMibrg=" } + +--- +"Test community_id processor for udp": + - skip: + version: " - 2.12.99" + reason: "introduced in 2.13" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "community_id" : { + "source_ip_field" : "source", + "destination_ip_field" : "dest", + "source_port_field" : "srcPort", + "destination_port_field" : "destPort", + "protocol_field" : "protocol" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + source: "8.8.8.8", + dest: "192.168.1.52", + protocol: "udp", + srcPort: 53, + destPort: 54585 + } + - do: + get: + index: test + id: 1 + - match: { _source.community_id: "1:d/FP5EW3wiY1vCndhwleRRKHowQ=" } + +--- +"Test community_id processor for sctp": + - skip: + version: " - 2.12.99" + reason: "introduced in 2.13" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "community_id" : { + "source_ip_field" : "source", + "destination_ip_field" : "dest", + "source_port_field" : "srcPort", + "destination_port_field" : "destPort", + "protocol_field" : "protocol" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + source: "192.168.170.8", + dest: "192.168.170.56", + protocol: "sctp", + srcPort: 7, + destPort: 7 + } + - do: + get: + index: test + id: 1 + - match: { _source.community_id: "1:MP2EtRCAUIZvTw6MxJHLV7N7JDs=" } + +--- +"Test community_id processor for icmp": + - skip: + version: " - 2.12.99" + reason: "introduced in 2.13" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "community_id" : { + "source_ip_field" : "source", + "destination_ip_field" : "dest", + "icmp_type_field" : "type", + "icmp_code_field" : "code", + "protocol_field" : "protocol" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + source: "192.168.0.89", + dest: "192.168.0.1", + protocol: "icmp", + type: 8, + code: 0 + } + - do: + get: + index: test + id: 1 + - match: { _source.community_id: "1:X0snYXpgwiv9TZtqg64sgzUn6Dk=" } + +--- +"Test community_id processor for icmp-v6": + - skip: + version: " - 2.12.99" + reason: "introduced in 2.13" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "community_id" : { + "source_ip_field" : "source", + "destination_ip_field" : "dest", + "icmp_type_field" : "type", + "icmp_code_field" : "code", + "protocol_field" : "protocol" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + source: "fe80::260:97ff:fe07:69ea", + dest: "ff02::1", + protocol: "ipv6-icmp", + type: 134, + code: 0 + } + - do: + get: + index: test + id: 1 + - match: { _source.community_id: "1:pkvHqCL88/tg1k4cPigmZXUtL00=" } diff --git a/modules/ingest-geoip/licenses/jackson-annotations-2.16.1.jar.sha1 b/modules/ingest-geoip/licenses/jackson-annotations-2.16.1.jar.sha1 deleted file mode 100644 index cbc65687606fc..0000000000000 --- a/modules/ingest-geoip/licenses/jackson-annotations-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fd441d574a71e7d10a4f73de6609f881d8cdfeec \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-annotations-2.17.0.jar.sha1 b/modules/ingest-geoip/licenses/jackson-annotations-2.17.0.jar.sha1 new file mode 100644 index 0000000000000..66bf7ed6ecce8 --- /dev/null +++ b/modules/ingest-geoip/licenses/jackson-annotations-2.17.0.jar.sha1 @@ -0,0 +1 @@ +880a742337010da4c851f843d8cac150e22dff9f \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-databind-2.16.1.jar.sha1 b/modules/ingest-geoip/licenses/jackson-databind-2.16.1.jar.sha1 deleted file mode 100644 index d231db4fd49fc..0000000000000 --- a/modules/ingest-geoip/licenses/jackson-databind-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -02a16efeb840c45af1e2f31753dfe76795278b73 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-databind-2.17.0.jar.sha1 b/modules/ingest-geoip/licenses/jackson-databind-2.17.0.jar.sha1 new file mode 100644 index 0000000000000..c0e4bb0c56849 --- /dev/null +++ b/modules/ingest-geoip/licenses/jackson-databind-2.17.0.jar.sha1 @@ -0,0 +1 @@ +7173e9e1d4bc6d7ca03bc4eeedcd548b8b580b34 \ No newline at end of file diff --git a/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/GeoIpProcessor.java b/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/GeoIpProcessor.java index c7cf27b2e6493..b27c0f9fe0b31 100644 --- a/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/GeoIpProcessor.java +++ b/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/GeoIpProcessor.java @@ -218,6 +218,7 @@ Set<Property> getProperties() { return properties; } + @SuppressWarnings("removal") private Map<String, Object> retrieveCityGeoData(InetAddress ipAddress) { SpecialPermission.check(); CityResponse response = AccessController.doPrivileged( @@ -305,6 +306,7 @@ private Map<String, Object> retrieveCityGeoData(InetAddress ipAddress) { return geoData; } + @SuppressWarnings("removal") private Map<String, Object> retrieveCountryGeoData(InetAddress ipAddress) { SpecialPermission.check(); CountryResponse response = AccessController.doPrivileged( @@ -351,6 +353,7 @@ private Map<String, Object> retrieveCountryGeoData(InetAddress ipAddress) { return geoData; } + @SuppressWarnings("removal") private Map<String, Object> retrieveAsnGeoData(InetAddress ipAddress) { SpecialPermission.check(); AsnResponse response = AccessController.doPrivileged( diff --git a/modules/lang-expression/licenses/antlr4-runtime-4.11.1.jar.sha1 b/modules/lang-expression/licenses/antlr4-runtime-4.11.1.jar.sha1 deleted file mode 100644 index f1b328a6de624..0000000000000 --- a/modules/lang-expression/licenses/antlr4-runtime-4.11.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -069214c1de1960040729702eb58deac8827135e7 \ No newline at end of file diff --git a/modules/lang-expression/licenses/antlr4-runtime-4.13.1.jar.sha1 b/modules/lang-expression/licenses/antlr4-runtime-4.13.1.jar.sha1 new file mode 100644 index 0000000000000..e50b9bb646727 --- /dev/null +++ b/modules/lang-expression/licenses/antlr4-runtime-4.13.1.jar.sha1 @@ -0,0 +1 @@ +17125bae1d965624e265ef49552f6465a2bfa307 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.11.0-snapshot-8a555eb.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..82a17e2b79290 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +00759eaff8f62b38ba66a05f26ab784c268908d3 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.9.1.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.9.1.jar.sha1 deleted file mode 100644 index 402cc36ba3d68..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1782a69d0e83af9cc3c65db0dcd2e7e7c1e5f90e \ No newline at end of file diff --git a/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java b/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java index 8ca28a905f216..8e15488900e5f 100644 --- a/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java +++ b/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java @@ -41,7 +41,6 @@ import org.opensearch.action.update.UpdateRequestBuilder; import org.opensearch.common.lucene.search.function.CombineFunction; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilders; @@ -57,7 +56,7 @@ import org.opensearch.search.aggregations.pipeline.SimpleValue; import org.opensearch.search.sort.SortBuilders; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; import java.util.Arrays; @@ -80,10 +79,10 @@ import static org.hamcrest.Matchers.notNullValue; // TODO: please convert to unit tests! -public class MoreExpressionIT extends ParameterizedOpenSearchIntegTestCase { +public class MoreExpressionIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public MoreExpressionIT(Settings dynamicSettings) { - super(dynamicSettings); + public MoreExpressionIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -94,11 +93,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(ExpressionModulePlugin.class); diff --git a/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/StoredExpressionIT.java b/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/StoredExpressionIT.java index b1cb5356a4405..7465fa1e5ddbe 100644 --- a/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/StoredExpressionIT.java +++ b/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/StoredExpressionIT.java @@ -35,7 +35,6 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.plugins.Plugin; @@ -43,7 +42,7 @@ import org.opensearch.script.ScriptType; import org.opensearch.search.aggregations.AggregationBuilders; import org.opensearch.search.builder.SearchSourceBuilder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.Arrays; @@ -54,10 +53,10 @@ import static org.hamcrest.Matchers.containsString; //TODO: please convert to unit tests! -public class StoredExpressionIT extends ParameterizedOpenSearchIntegTestCase { +public class StoredExpressionIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public StoredExpressionIT(Settings dynamicSettings) { - super(dynamicSettings); + public StoredExpressionIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -68,11 +67,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Settings nodeSettings(int nodeOrdinal) { Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal)); diff --git a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScriptEngine.java b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScriptEngine.java index 5629b3b4a6972..0520177b72b62 100644 --- a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScriptEngine.java +++ b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScriptEngine.java @@ -170,6 +170,7 @@ public String getType() { return NAME; } + @SuppressWarnings("removal") @Override public <T> T compile(String scriptName, String scriptSource, ScriptContext<T> context, Map<String, String> params) { // classloader created here diff --git a/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionFieldScriptTests.java b/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionFieldScriptTests.java index 143ff4f5c51bd..d7be890014add 100644 --- a/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionFieldScriptTests.java +++ b/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionFieldScriptTests.java @@ -77,7 +77,7 @@ public void setUp() throws Exception { when(fieldData.load(any())).thenReturn(atomicFieldData); service = new ExpressionScriptEngine(); - lookup = new SearchLookup(mapperService, (ignored, lookup) -> fieldData); + lookup = new SearchLookup(mapperService, (ignored, lookup) -> fieldData, SearchLookup.UNKNOWN_SHARD_ID); } private FieldScript.LeafFactory compile(String expression) { diff --git a/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionNumberSortScriptTests.java b/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionNumberSortScriptTests.java index 498c0542e9c3e..94a422503d6bd 100644 --- a/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionNumberSortScriptTests.java +++ b/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionNumberSortScriptTests.java @@ -77,7 +77,7 @@ public void setUp() throws Exception { when(fieldData.load(any())).thenReturn(atomicFieldData); service = new ExpressionScriptEngine(); - lookup = new SearchLookup(mapperService, (ignored, lookup) -> fieldData); + lookup = new SearchLookup(mapperService, (ignored, lookup) -> fieldData, SearchLookup.UNKNOWN_SHARD_ID); } private NumberSortScript.LeafFactory compile(String expression) { diff --git a/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionTermsSetQueryTests.java b/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionTermsSetQueryTests.java index 499f94afcb6af..a1d6df80715be 100644 --- a/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionTermsSetQueryTests.java +++ b/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionTermsSetQueryTests.java @@ -77,7 +77,7 @@ public void setUp() throws Exception { when(fieldData.load(any())).thenReturn(atomicFieldData); service = new ExpressionScriptEngine(); - lookup = new SearchLookup(mapperService, (ignored, lookup) -> fieldData); + lookup = new SearchLookup(mapperService, (ignored, lookup) -> fieldData, SearchLookup.UNKNOWN_SHARD_ID); } private TermsSetQueryScript.LeafFactory compile(String expression) { diff --git a/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/MultiSearchTemplateIT.java b/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/MultiSearchTemplateIT.java index e480fbbd22ad2..f7abc220e75d8 100644 --- a/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/MultiSearchTemplateIT.java +++ b/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/MultiSearchTemplateIT.java @@ -37,11 +37,10 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchRequest; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexNotFoundException; import org.opensearch.plugins.Plugin; import org.opensearch.script.ScriptType; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -58,10 +57,10 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.core.Is.is; -public class MultiSearchTemplateIT extends ParameterizedOpenSearchIntegTestCase { +public class MultiSearchTemplateIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public MultiSearchTemplateIT(Settings dynamicSettings) { - super(dynamicSettings); + public MultiSearchTemplateIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -72,11 +71,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(MustacheModulePlugin.class); diff --git a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheScriptEngine.java b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheScriptEngine.java index ec84475b70bb6..842353fdba336 100644 --- a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheScriptEngine.java +++ b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheScriptEngine.java @@ -128,6 +128,7 @@ private class MustacheExecutableScript extends TemplateScript { this.params = params; } + @SuppressWarnings("removal") @Override public String execute() { final StringWriter writer = new StringWriter(); diff --git a/modules/lang-painless/licenses/antlr4-runtime-4.11.1.jar.sha1 b/modules/lang-painless/licenses/antlr4-runtime-4.11.1.jar.sha1 deleted file mode 100644 index f1b328a6de624..0000000000000 --- a/modules/lang-painless/licenses/antlr4-runtime-4.11.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -069214c1de1960040729702eb58deac8827135e7 \ No newline at end of file diff --git a/modules/lang-painless/licenses/antlr4-runtime-4.13.1.jar.sha1 b/modules/lang-painless/licenses/antlr4-runtime-4.13.1.jar.sha1 new file mode 100644 index 0000000000000..e50b9bb646727 --- /dev/null +++ b/modules/lang-painless/licenses/antlr4-runtime-4.13.1.jar.sha1 @@ -0,0 +1 @@ +17125bae1d965624e265ef49552f6465a2bfa307 \ No newline at end of file diff --git a/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistLoader.java b/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistLoader.java index 632fee9187eba..f18a7fb3ba1a9 100644 --- a/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistLoader.java +++ b/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistLoader.java @@ -513,6 +513,7 @@ public static Allowlist loadFromResourceFiles(Class<?> resource, Map<String, All } } + @SuppressWarnings("removal") ClassLoader loader = AccessController.doPrivileged((PrivilegedAction<ClassLoader>) resource::getClassLoader); return new Allowlist(loader, allowlistClasses, allowlistStatics, allowlistClassBindings, Collections.emptyList()); diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/LambdaBootstrap.java b/modules/lang-painless/src/main/java/org/opensearch/painless/LambdaBootstrap.java index d0af4651d2d3b..2bf70882a501b 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/LambdaBootstrap.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/LambdaBootstrap.java @@ -501,6 +501,7 @@ private static void endLambdaClass(ClassWriter cw) { * Defines the {@link Class} for the lambda class using the same {@link Compiler.Loader} * that originally defined the class for the Painless script. */ + @SuppressWarnings("removal") private static Class<?> createLambdaClass(Compiler.Loader loader, ClassWriter cw, Type lambdaClassType) { byte[] classBytes = cw.toByteArray(); diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessScriptEngine.java b/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessScriptEngine.java index e9edfb73c740c..257687bfb98c5 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessScriptEngine.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessScriptEngine.java @@ -67,6 +67,7 @@ /** * Implementation of a ScriptEngine for the Painless language. */ +@SuppressWarnings("removal") public final class PainlessScriptEngine implements ScriptEngine { /** diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/PainlessLexer.java b/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/PainlessLexer.java index 260a2fc0c062c..6e3448e5eea77 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/PainlessLexer.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/PainlessLexer.java @@ -45,10 +45,10 @@ import org.antlr.v4.runtime.atn.PredictionContextCache; import org.antlr.v4.runtime.dfa.DFA; -@SuppressWarnings({ "all", "warnings", "unchecked", "unused", "cast", "CheckReturnValue" }) +@SuppressWarnings({ "all", "warnings", "unchecked", "unused", "cast", "CheckReturnValue", "this-escape" }) abstract class PainlessLexer extends Lexer { static { - RuntimeMetaData.checkVersion("4.11.1", RuntimeMetaData.VERSION); + RuntimeMetaData.checkVersion("4.13.1", RuntimeMetaData.VERSION); } protected static final DFA[] _decisionToDFA; diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/PainlessParser.java b/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/PainlessParser.java index 40e76194f50b2..7ad5d113637c8 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/PainlessParser.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/PainlessParser.java @@ -56,7 +56,7 @@ @SuppressWarnings({ "all", "warnings", "unchecked", "unused", "cast", "CheckReturnValue" }) class PainlessParser extends Parser { static { - RuntimeMetaData.checkVersion("4.11.1", RuntimeMetaData.VERSION); + RuntimeMetaData.checkVersion("4.13.1", RuntimeMetaData.VERSION); } protected static final DFA[] _decisionToDFA; @@ -337,7 +337,7 @@ public Vocabulary getVocabulary() { @Override public String getGrammarFileName() { - return "java-escape"; + return "PainlessParser.g4"; } @Override @@ -425,8 +425,8 @@ public final SourceContext source() throws RecognitionException { setState(87); _errHandler.sync(this); _la = _input.LA(1); - while (((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155080519840L) != 0 - || (((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 4095L) != 0) { + while ((((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155080519840L) != 0) + || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 4095L) != 0)) { { { setState(84); @@ -571,7 +571,7 @@ public final ParametersContext parameters() throws RecognitionException { setState(109); _errHandler.sync(this); _la = _input.LA(1); - if ((((_la - 81)) & ~0x3f) == 0 && ((1L << (_la - 81)) & 7L) != 0) { + if (((((_la - 81)) & ~0x3f) == 0 && ((1L << (_la - 81)) & 7L) != 0)) { { setState(98); decltype(); @@ -1088,8 +1088,8 @@ public final RstatementContext rstatement() throws RecognitionException { setState(140); _errHandler.sync(this); _la = _input.LA(1); - if (((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155034439840L) != 0 - || (((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 4095L) != 0) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155034439840L) != 0) + || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 4095L) != 0)) { { setState(139); initializer(); @@ -1101,8 +1101,8 @@ public final RstatementContext rstatement() throws RecognitionException { setState(144); _errHandler.sync(this); _la = _input.LA(1); - if (((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155034439840L) != 0 - || (((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 2559L) != 0) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155034439840L) != 0) + || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 2559L) != 0)) { { setState(143); expression(); @@ -1114,8 +1114,8 @@ public final RstatementContext rstatement() throws RecognitionException { setState(148); _errHandler.sync(this); _la = _input.LA(1); - if (((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155034439840L) != 0 - || (((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 2559L) != 0) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155034439840L) != 0) + || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 2559L) != 0)) { { setState(147); afterthought(); @@ -1470,8 +1470,8 @@ public final DstatementContext dstatement() throws RecognitionException { setState(193); _errHandler.sync(this); _la = _input.LA(1); - if (((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155034439840L) != 0 - || (((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 2559L) != 0) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155034439840L) != 0) + || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 2559L) != 0)) { { setState(192); expression(); @@ -1661,8 +1661,8 @@ public final BlockContext block() throws RecognitionException { setState(212); _errHandler.sync(this); _la = _input.LA(1); - if (((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155071795360L) != 0 - || (((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 4095L) != 0) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155071795360L) != 0) + || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 4095L) != 0)) { { setState(211); dstatement(); @@ -2491,7 +2491,7 @@ private NoncondexpressionContext noncondexpression(int _p) throws RecognitionExc if (!(precpred(_ctx, 13))) throw new FailedPredicateException(this, "precpred(_ctx, 13)"); setState(269); _la = _input.LA(1); - if (!(((_la) & ~0x3f) == 0 && ((1L << _la) & 7516192768L) != 0)) { + if (!((((_la) & ~0x3f) == 0 && ((1L << _la) & 7516192768L) != 0))) { _errHandler.recoverInline(this); } else { if (_input.LA(1) == Token.EOF) matchedEOF = true; @@ -2545,7 +2545,7 @@ private NoncondexpressionContext noncondexpression(int _p) throws RecognitionExc if (!(precpred(_ctx, 10))) throw new FailedPredicateException(this, "precpred(_ctx, 10)"); setState(278); _la = _input.LA(1); - if (!(((_la) & ~0x3f) == 0 && ((1L << _la) & 240518168576L) != 0)) { + if (!((((_la) & ~0x3f) == 0 && ((1L << _la) & 240518168576L) != 0))) { _errHandler.recoverInline(this); } else { if (_input.LA(1) == Token.EOF) matchedEOF = true; @@ -2563,7 +2563,7 @@ private NoncondexpressionContext noncondexpression(int _p) throws RecognitionExc if (!(precpred(_ctx, 9))) throw new FailedPredicateException(this, "precpred(_ctx, 9)"); setState(281); _la = _input.LA(1); - if (!(((_la) & ~0x3f) == 0 && ((1L << _la) & 4123168604160L) != 0)) { + if (!((((_la) & ~0x3f) == 0 && ((1L << _la) & 4123168604160L) != 0))) { _errHandler.recoverInline(this); } else { if (_input.LA(1) == Token.EOF) matchedEOF = true; @@ -2581,7 +2581,7 @@ private NoncondexpressionContext noncondexpression(int _p) throws RecognitionExc if (!(precpred(_ctx, 7))) throw new FailedPredicateException(this, "precpred(_ctx, 7)"); setState(284); _la = _input.LA(1); - if (!(((_la) & ~0x3f) == 0 && ((1L << _la) & 65970697666560L) != 0)) { + if (!((((_la) & ~0x3f) == 0 && ((1L << _la) & 65970697666560L) != 0))) { _errHandler.recoverInline(this); } else { if (_input.LA(1) == Token.EOF) matchedEOF = true; @@ -2861,7 +2861,7 @@ public final ExpressionContext expression() throws RecognitionException { noncondexpression(0); setState(320); _la = _input.LA(1); - if (!((((_la - 60)) & ~0x3f) == 0 && ((1L << (_la - 60)) & 4095L) != 0)) { + if (!(((((_la - 60)) & ~0x3f) == 0 && ((1L << (_la - 60)) & 4095L) != 0))) { _errHandler.recoverInline(this); } else { if (_input.LA(1) == Token.EOF) matchedEOF = true; @@ -3938,7 +3938,7 @@ public final PrimaryContext primary() throws RecognitionException { enterOuterAlt(_localctx, 2); { setState(400); _la = _input.LA(1); - if (!((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 15L) != 0)) { + if (!(((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 15L) != 0))) { _errHandler.recoverInline(this); } else { if (_input.LA(1) == Token.EOF) matchedEOF = true; @@ -4564,8 +4564,8 @@ public final ArrayinitializerContext arrayinitializer() throws RecognitionExcept setState(469); _errHandler.sync(this); _la = _input.LA(1); - if (((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155034439840L) != 0 - || (((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 2559L) != 0) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155034439840L) != 0) + || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 2559L) != 0)) { { setState(461); expression(); @@ -4923,8 +4923,8 @@ public final ArgumentsContext arguments() throws RecognitionException { setState(524); _errHandler.sync(this); _la = _input.LA(1); - if (((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155101548704L) != 0 - || (((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 4095L) != 0) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155101548704L) != 0) + || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 4095L) != 0)) { { setState(516); argument(); @@ -5104,7 +5104,7 @@ public final LambdaContext lambda() throws RecognitionException { setState(543); _errHandler.sync(this); _la = _input.LA(1); - if ((((_la - 81)) & ~0x3f) == 0 && ((1L << (_la - 81)) & 7L) != 0) { + if (((((_la - 81)) & ~0x3f) == 0 && ((1L << (_la - 81)) & 7L) != 0)) { { setState(535); lamtype(); diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupBuilder.java b/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupBuilder.java index e79eda975f417..e155a890c03d1 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupBuilder.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupBuilder.java @@ -2189,6 +2189,7 @@ private void generateBridgeMethod(PainlessClassBuilder painlessClassBuilder, Pai bridgeClassWriter.visitEnd(); try { + @SuppressWarnings("removal") BridgeLoader bridgeLoader = AccessController.doPrivileged(new PrivilegedAction<BridgeLoader>() { @Override public BridgeLoader run() { diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/DocFieldsPhaseTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/DocFieldsPhaseTests.java index 3418bcf01e19f..691e84176dce3 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/DocFieldsPhaseTests.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/DocFieldsPhaseTests.java @@ -48,6 +48,7 @@ public class DocFieldsPhaseTests extends ScriptTestCase { PainlessLookup lookup = PainlessLookupBuilder.buildFromAllowlists(Allowlist.BASE_ALLOWLISTS); + @SuppressWarnings("removal") ScriptScope compile(String script) { Compiler compiler = new Compiler( MockDocTestScript.CONTEXT.instanceClazz, diff --git a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ChildrenIT.java b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ChildrenIT.java index 5fc0a202ae45e..ab74463382aaa 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ChildrenIT.java +++ b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ChildrenIT.java @@ -41,7 +41,6 @@ import org.opensearch.client.Requests; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.SearchHit; import org.opensearch.search.aggregations.AggregationBuilders; import org.opensearch.search.aggregations.InternalAggregation; @@ -89,11 +88,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testChildrenAggs() throws Exception { SearchResponse searchResponse = client().prepareSearch("test") .setQuery(matchQuery("randomized", true)) diff --git a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ParentIT.java b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ParentIT.java index 04703a65aa19d..4a6157e388777 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ParentIT.java +++ b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ParentIT.java @@ -37,7 +37,6 @@ import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.Aggregation; import org.opensearch.search.aggregations.bucket.MultiBucketsAggregation; import org.opensearch.search.aggregations.bucket.terms.Terms; @@ -76,11 +75,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testSimpleParentAgg() throws Exception { final SearchRequestBuilder searchRequest = client().prepareSearch("test") .setSize(10000) diff --git a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ChildQuerySearchIT.java b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ChildQuerySearchIT.java index c43d6352b26f8..99527c3273c4b 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ChildQuerySearchIT.java +++ b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ChildQuerySearchIT.java @@ -44,7 +44,6 @@ import org.opensearch.common.lucene.search.function.FunctionScoreQuery; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.rest.RestStatus; import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.IdsQueryBuilder; @@ -118,11 +117,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testMultiLevelChild() throws Exception { assertAcked( prepareCreate("test").setMapping( diff --git a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/InnerHitsIT.java b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/InnerHitsIT.java index 39da86c7fd726..4b5470d17c100 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/InnerHitsIT.java +++ b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/InnerHitsIT.java @@ -40,7 +40,6 @@ import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexSettings; import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.InnerHitBuilder; @@ -105,11 +104,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { ArrayList<Class<? extends Plugin>> plugins = new ArrayList<>(super.nodePlugins()); diff --git a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ParentChildTestCase.java b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ParentChildTestCase.java index 8c19c0aafe763..9c0f96cf382a6 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ParentChildTestCase.java +++ b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ParentChildTestCase.java @@ -41,7 +41,7 @@ import org.opensearch.plugins.Plugin; import org.opensearch.test.InternalSettingsPlugin; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.Arrays; @@ -51,10 +51,10 @@ import java.util.Map; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE) -public abstract class ParentChildTestCase extends ParameterizedOpenSearchIntegTestCase { +public abstract class ParentChildTestCase extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public ParentChildTestCase(Settings dynamicSettings) { - super(dynamicSettings); + public ParentChildTestCase(Settings staticSettings) { + super(staticSettings); } @Override diff --git a/modules/percolator/src/internalClusterTest/java/org/opensearch/percolator/PercolatorQuerySearchIT.java b/modules/percolator/src/internalClusterTest/java/org/opensearch/percolator/PercolatorQuerySearchIT.java index c8763c2f3f749..01436404e8a85 100644 --- a/modules/percolator/src/internalClusterTest/java/org/opensearch/percolator/PercolatorQuerySearchIT.java +++ b/modules/percolator/src/internalClusterTest/java/org/opensearch/percolator/PercolatorQuerySearchIT.java @@ -41,7 +41,6 @@ import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.DistanceUnit; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesArray; @@ -57,7 +56,7 @@ import org.opensearch.plugins.Plugin; import org.opensearch.search.fetch.subphase.highlight.HighlightBuilder; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.Arrays; @@ -90,10 +89,10 @@ import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.core.IsNull.notNullValue; -public class PercolatorQuerySearchIT extends ParameterizedOpenSearchIntegTestCase { +public class PercolatorQuerySearchIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public PercolatorQuerySearchIT(Settings dynamicSettings) { - super(dynamicSettings); + public PercolatorQuerySearchIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -104,11 +103,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected boolean addMockGeoShapeFieldMapper() { return false; diff --git a/modules/rank-eval/src/internalClusterTest/java/org/opensearch/index/rankeval/RankEvalRequestIT.java b/modules/rank-eval/src/internalClusterTest/java/org/opensearch/index/rankeval/RankEvalRequestIT.java index cdc3cac1a1f06..488c2e33648e7 100644 --- a/modules/rank-eval/src/internalClusterTest/java/org/opensearch/index/rankeval/RankEvalRequestIT.java +++ b/modules/rank-eval/src/internalClusterTest/java/org/opensearch/index/rankeval/RankEvalRequestIT.java @@ -39,7 +39,6 @@ import org.opensearch.action.search.SearchRequest; import org.opensearch.action.support.IndicesOptions; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.QueryBuilders; @@ -47,7 +46,7 @@ import org.opensearch.indices.IndexClosedException; import org.opensearch.plugins.Plugin; import org.opensearch.search.builder.SearchSourceBuilder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.junit.Before; import java.util.ArrayList; @@ -62,14 +61,14 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.instanceOf; -public class RankEvalRequestIT extends ParameterizedOpenSearchIntegTestCase { +public class RankEvalRequestIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String TEST_INDEX = "test"; private static final String INDEX_ALIAS = "alias0"; private static final int RELEVANT_RATING_1 = 1; - public RankEvalRequestIT(Settings dynamicSettings) { - super(dynamicSettings); + public RankEvalRequestIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -80,11 +79,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(RankEvalModulePlugin.class); diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexModulePlugin.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexModulePlugin.java index c211f937c1dd9..aa48da4cb2421 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexModulePlugin.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexModulePlugin.java @@ -132,6 +132,8 @@ public List<Setting<?>> getSettings() { final List<Setting<?>> settings = new ArrayList<>(); settings.add(TransportReindexAction.REMOTE_CLUSTER_WHITELIST); settings.add(TransportReindexAction.REMOTE_CLUSTER_ALLOWLIST); + settings.add(TransportReindexAction.REMOTE_REINDEX_RETRY_INITIAL_BACKOFF); + settings.add(TransportReindexAction.REMOTE_REINDEX_RETRY_MAX_COUNT); settings.addAll(ReindexSslConfig.getSettings()); return settings; } diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/Reindexer.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/Reindexer.java index 7181a512428ab..c553effc65ab5 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/Reindexer.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/Reindexer.java @@ -54,6 +54,7 @@ import org.opensearch.client.RestClient; import org.opensearch.client.RestClientBuilder; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.logging.Loggers; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; @@ -141,7 +142,8 @@ public void execute(BulkByScrollTask task, ReindexRequest request, ActionListene ParentTaskAssigningClient assigningClient = new ParentTaskAssigningClient(client, clusterService.localNode(), task); AsyncIndexBySearchAction searchAction = new AsyncIndexBySearchAction( task, - logger, + // Added prefix based logger(destination index) to distinguish multiple reindex jobs for easier debugging. + Loggers.getLogger(Reindexer.class, String.valueOf(request.getDestination().index())), assigningClient, threadPool, scriptService, diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/TransportReindexAction.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/TransportReindexAction.java index e624b0619a26e..c9a970a4118b3 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/TransportReindexAction.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/TransportReindexAction.java @@ -42,6 +42,7 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; import org.opensearch.index.reindex.spi.RemoteReindexExtension; import org.opensearch.script.ScriptService; @@ -71,11 +72,32 @@ public class TransportReindexAction extends HandledTransportAction<ReindexReques Function.identity(), Property.NodeScope ); + + public static final Setting<TimeValue> REMOTE_REINDEX_RETRY_INITIAL_BACKOFF = Setting.timeSetting( + "reindex.remote.retry.initial_backoff", + TimeValue.timeValueMillis(500), + TimeValue.timeValueMillis(50), + TimeValue.timeValueMillis(5000), + Property.Dynamic, + Property.NodeScope + ); + + public static final Setting<Integer> REMOTE_REINDEX_RETRY_MAX_COUNT = Setting.intSetting( + "reindex.remote.retry.max_count", + 15, + 1, + 100, + Property.Dynamic, + Property.NodeScope + ); + public static Optional<RemoteReindexExtension> remoteExtension = Optional.empty(); private final ReindexValidator reindexValidator; private final Reindexer reindexer; + private final ClusterService clusterService; + @Inject public TransportReindexAction( Settings settings, @@ -92,10 +114,16 @@ public TransportReindexAction( super(ReindexAction.NAME, transportService, actionFilters, ReindexRequest::new); this.reindexValidator = new ReindexValidator(settings, clusterService, indexNameExpressionResolver, autoCreateIndex); this.reindexer = new Reindexer(clusterService, client, threadPool, scriptService, sslConfig, remoteExtension); + this.clusterService = clusterService; } @Override protected void doExecute(Task task, ReindexRequest request, ActionListener<BulkByScrollResponse> listener) { + if (request.getRemoteInfo() != null) { + request.setMaxRetries(clusterService.getClusterSettings().get(REMOTE_REINDEX_RETRY_MAX_COUNT)); + request.setRetryBackoffInitialTime(clusterService.getClusterSettings().get(REMOTE_REINDEX_RETRY_INITIAL_BACKOFF)); + } + reindexValidator.initialValidation(request); BulkByScrollTask bulkByScrollTask = (BulkByScrollTask) task; reindexer.initTask(bulkByScrollTask, request, new ActionListener<Void>() { diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSource.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSource.java index 5b95ed4b9915b..accaa28283abd 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSource.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSource.java @@ -61,11 +61,14 @@ import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.reindex.RejectAwareActionListener; +import org.opensearch.index.reindex.RetryListener; import org.opensearch.index.reindex.ScrollableHitSource; import org.opensearch.threadpool.ThreadPool; import java.io.IOException; import java.io.InputStream; +import java.net.ConnectException; +import java.util.Arrays; import java.util.function.BiFunction; import java.util.function.Consumer; @@ -99,21 +102,29 @@ public RemoteScrollableHitSource( @Override protected void doStart(RejectAwareActionListener<Response> searchListener) { - lookupRemoteVersion(RejectAwareActionListener.withResponseHandler(searchListener, version -> { + logger.info("Starting remote reindex for {}", Arrays.toString(searchRequest.indices())); + lookupRemoteVersion(RejectAwareActionListener.wrap(version -> { remoteVersion = version; - execute( + logger.trace("Starting initial search"); + executeWithRetries( RemoteRequestBuilders.initialSearch(searchRequest, query, remoteVersion), RESPONSE_PARSER, RejectAwareActionListener.withResponseHandler(searchListener, r -> onStartResponse(searchListener, r)) ); - })); + // Skipping searchListener::onRejection(used for retries) for remote source as we've configured retries at request(scroll) + // level. + }, searchListener::onFailure, searchListener::onFailure)); } void lookupRemoteVersion(RejectAwareActionListener<Version> listener) { + logger.trace("Checking version for remote domain"); + // We're skipping retries for the first call to remote cluster so that we fail fast & respond back immediately + // instead of retrying for longer duration. execute(new Request("GET", ""), MAIN_ACTION_PARSER, listener); } private void onStartResponse(RejectAwareActionListener<Response> searchListener, Response response) { + logger.trace("On initial search response"); if (Strings.hasLength(response.getScrollId()) && response.getHits().isEmpty()) { logger.debug("First response looks like a scan response. Jumping right to the second. scroll=[{}]", response.getScrollId()); doStartNextScroll(response.getScrollId(), timeValueMillis(0), searchListener); @@ -124,12 +135,14 @@ private void onStartResponse(RejectAwareActionListener<Response> searchListener, @Override protected void doStartNextScroll(String scrollId, TimeValue extraKeepAlive, RejectAwareActionListener<Response> searchListener) { + logger.trace("Starting next scroll call"); TimeValue keepAlive = timeValueNanos(searchRequest.scroll().keepAlive().nanos() + extraKeepAlive.nanos()); - execute(RemoteRequestBuilders.scroll(scrollId, keepAlive, remoteVersion), RESPONSE_PARSER, searchListener); + executeWithRetries(RemoteRequestBuilders.scroll(scrollId, keepAlive, remoteVersion), RESPONSE_PARSER, searchListener); } @Override protected void clearScroll(String scrollId, Runnable onCompletion) { + logger.debug("Clearing the scrollID {}", scrollId); client.performRequestAsync(RemoteRequestBuilders.clearScroll(scrollId, remoteVersion), new ResponseListener() { @Override public void onSuccess(org.opensearch.client.Response response) { @@ -180,17 +193,31 @@ protected void cleanup(Runnable onCompletion) { }); } + private void executeWithRetries( + Request request, + BiFunction<XContentParser, MediaType, Response> parser, + RejectAwareActionListener<Response> childListener + ) { + execute(request, parser, new RetryListener(logger, threadPool, backoffPolicy, r -> { + logger.debug("Retrying execute request {}", request.getEndpoint()); + countSearchRetry.run(); + execute(request, parser, r); + }, childListener)); + } + private <T> void execute( Request request, BiFunction<XContentParser, MediaType, T> parser, RejectAwareActionListener<? super T> listener ) { + logger.trace("Executing http request to remote cluster {}", request.getEndpoint()); // Preserve the thread context so headers survive after the call java.util.function.Supplier<ThreadContext.StoredContext> contextSupplier = threadPool.getThreadContext().newRestorableContext(true); try { client.performRequestAsync(request, new ResponseListener() { @Override public void onSuccess(org.opensearch.client.Response response) { + logger.trace("Successfully got response from the remote"); // Restore the thread context to get the precious headers try (ThreadContext.StoredContext ctx = contextSupplier.get()) { assert ctx != null; // eliminates compiler warning @@ -205,7 +232,7 @@ public void onSuccess(org.opensearch.client.Response response) { } if (mediaType == null) { try { - logger.debug("Response didn't include Content-Type: " + bodyMessage(response.getEntity())); + logger.error("Response didn't include Content-Type: " + bodyMessage(response.getEntity())); throw new OpenSearchException( "Response didn't include supported Content-Type, remote is likely not an OpenSearch instance" ); @@ -237,22 +264,28 @@ public void onSuccess(org.opensearch.client.Response response) { public void onFailure(Exception e) { try (ThreadContext.StoredContext ctx = contextSupplier.get()) { assert ctx != null; // eliminates compiler warning + logger.debug("Received response failure {}", e.getMessage()); if (e instanceof ResponseException) { ResponseException re = (ResponseException) e; int statusCode = re.getResponse().getStatusLine().getStatusCode(); e = wrapExceptionToPreserveStatus(statusCode, re.getResponse().getEntity(), re); - if (RestStatus.TOO_MANY_REQUESTS.getStatus() == statusCode) { + // retry all 5xx & 429s. + if (RestStatus.TOO_MANY_REQUESTS.getStatus() == statusCode + || statusCode >= RestStatus.INTERNAL_SERVER_ERROR.getStatus()) { listener.onRejection(e); return; } + } else if (e instanceof ConnectException) { + listener.onRejection(e); + return; } else if (e instanceof ContentTooLongException) { e = new IllegalArgumentException( "Remote responded with a chunk that was too large. Use a smaller batch size.", e ); } - listener.onFailure(e); } + listener.onFailure(e); } }); } catch (Exception e) { diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSourceTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSourceTests.java index f15d0a3c23a5e..8aa66fc3cfd8c 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSourceTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSourceTests.java @@ -42,9 +42,12 @@ import org.apache.hc.core5.http.ContentType; import org.apache.hc.core5.http.HttpEntity; import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ProtocolVersion; import org.apache.hc.core5.http.io.entity.InputStreamEntity; import org.apache.hc.core5.http.io.entity.StringEntity; import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.message.RequestLine; +import org.apache.hc.core5.http.message.StatusLine; import org.apache.hc.core5.http.nio.AsyncPushConsumer; import org.apache.hc.core5.http.nio.AsyncRequestProducer; import org.apache.hc.core5.http.nio.AsyncResponseConsumer; @@ -57,6 +60,7 @@ import org.opensearch.Version; import org.opensearch.action.bulk.BackoffPolicy; import org.opensearch.action.search.SearchRequest; +import org.opensearch.client.ResponseException; import org.opensearch.client.RestClient; import org.opensearch.client.http.HttpUriRequestProducer; import org.opensearch.client.nio.HeapBufferedAsyncResponseConsumer; @@ -83,6 +87,7 @@ import java.io.IOException; import java.io.InputStreamReader; import java.io.UncheckedIOException; +import java.net.ConnectException; import java.net.URL; import java.nio.charset.StandardCharsets; import java.util.Queue; @@ -90,10 +95,13 @@ import java.util.concurrent.Future; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; import java.util.stream.Stream; +import org.mockito.Mockito; + import static org.opensearch.common.unit.TimeValue.timeValueMillis; import static org.opensearch.common.unit.TimeValue.timeValueMinutes; import static org.hamcrest.Matchers.empty; @@ -515,7 +523,7 @@ public void testInvalidJsonThinksRemoteIsNotES() throws IOException { Exception e = expectThrows(RuntimeException.class, () -> sourceWithMockedRemoteCall("some_text.txt").start()); assertEquals( "Error parsing the response, remote is likely not an OpenSearch instance", - e.getCause().getCause().getCause().getMessage() + e.getCause().getCause().getCause().getCause().getMessage() ); } @@ -524,7 +532,7 @@ public void testUnexpectedJsonThinksRemoteIsNotES() throws IOException { Exception e = expectThrows(RuntimeException.class, () -> sourceWithMockedRemoteCall("main/2_3_3.json").start()); assertEquals( "Error parsing the response, remote is likely not an OpenSearch instance", - e.getCause().getCause().getCause().getMessage() + e.getCause().getCause().getCause().getCause().getMessage() ); } @@ -702,4 +710,105 @@ private static ClassicHttpRequest getRequest(AsyncRequestProducer requestProduce assertThat(requestProducer, instanceOf(HttpUriRequestProducer.class)); return ((HttpUriRequestProducer) requestProducer).getRequest(); } + + RemoteScrollableHitSource createRemoteSourceWithFailure( + boolean shouldMockRemoteVersion, + Exception failure, + AtomicInteger invocationCount + ) { + CloseableHttpAsyncClient httpClient = new CloseableHttpAsyncClient() { + + @Override + public void close() throws IOException {} + + @Override + public void close(CloseMode closeMode) {} + + @Override + public void start() {} + + @Override + public void register(String hostname, String uriPattern, Supplier<AsyncPushConsumer> supplier) {} + + @Override + public void initiateShutdown() {} + + @Override + public IOReactorStatus getStatus() { + return null; + } + + @Override + protected <T> Future<T> doExecute( + HttpHost target, + AsyncRequestProducer requestProducer, + AsyncResponseConsumer<T> responseConsumer, + HandlerFactory<AsyncPushConsumer> pushHandlerFactory, + HttpContext context, + FutureCallback<T> callback + ) { + invocationCount.getAndIncrement(); + callback.failed(failure); + return null; + } + + @Override + public void awaitShutdown(org.apache.hc.core5.util.TimeValue waitTime) throws InterruptedException {} + }; + return sourceWithMockedClient(shouldMockRemoteVersion, httpClient); + } + + void verifyRetries(boolean shouldMockRemoteVersion, Exception failureResponse, boolean expectedToRetry) { + retriesAllowed = 5; + AtomicInteger invocations = new AtomicInteger(); + invocations.set(0); + RemoteScrollableHitSource source = createRemoteSourceWithFailure(shouldMockRemoteVersion, failureResponse, invocations); + + Throwable e = expectThrows(RuntimeException.class, source::start); + int expectedInvocations = 0; + if (shouldMockRemoteVersion) { + expectedInvocations += 1; // first search + if (expectedToRetry) expectedInvocations += retriesAllowed; + } else { + expectedInvocations = 1; // the first should fail and not trigger any retry. + } + + assertEquals(expectedInvocations, invocations.get()); + + // Unwrap the some artifacts from the test + while (e.getMessage().equals("failed")) { + e = e.getCause(); + } + // There is an additional wrapper for ResponseException. + if (failureResponse instanceof ResponseException) { + e = e.getCause(); + } + + assertSame(failureResponse, e); + } + + ResponseException withResponseCode(int statusCode, String errorMsg) throws IOException { + org.opensearch.client.Response mockResponse = Mockito.mock(org.opensearch.client.Response.class); + Mockito.when(mockResponse.getEntity()).thenReturn(new StringEntity(errorMsg, ContentType.TEXT_PLAIN)); + Mockito.when(mockResponse.getStatusLine()).thenReturn(new StatusLine(new BasicClassicHttpResponse(statusCode, errorMsg))); + Mockito.when(mockResponse.getRequestLine()).thenReturn(new RequestLine("GET", "/", new ProtocolVersion("https", 1, 1))); + return new ResponseException(mockResponse); + } + + public void testRetryOnCallFailure() throws Exception { + // First call succeeds. Search calls failing with 5xxs and 429s should be retried but not 400s. + verifyRetries(true, withResponseCode(500, "Internal Server Error"), true); + verifyRetries(true, withResponseCode(429, "Too many requests"), true); + verifyRetries(true, withResponseCode(400, "Client Error"), false); + + // First call succeeds. Search call failed with exceptions other than ResponseException + verifyRetries(true, new ConnectException("blah"), true); // should retry connect exceptions. + verifyRetries(true, new RuntimeException("foobar"), false); + + // First call(remote version lookup) failed and no retries expected + verifyRetries(false, withResponseCode(500, "Internal Server Error"), false); + verifyRetries(false, withResponseCode(429, "Too many requests"), false); + verifyRetries(false, withResponseCode(400, "Client Error"), false); + verifyRetries(false, new ConnectException("blah"), false); + } } diff --git a/modules/repository-url/src/main/java/org/opensearch/common/blobstore/url/URLBlobContainer.java b/modules/repository-url/src/main/java/org/opensearch/common/blobstore/url/URLBlobContainer.java index b13a4d5a39a5b..02e858cb8d1f2 100644 --- a/modules/repository-url/src/main/java/org/opensearch/common/blobstore/url/URLBlobContainer.java +++ b/modules/repository-url/src/main/java/org/opensearch/common/blobstore/url/URLBlobContainer.java @@ -157,6 +157,7 @@ public void writeBlobAtomic(String blobName, InputStream inputStream, long blobS throw new UnsupportedOperationException("URL repository doesn't support this operation"); } + @SuppressWarnings("removal") @SuppressForbidden(reason = "We call connect in doPrivileged and provide SocketPermission") private static InputStream getInputStream(URL url) throws IOException { try { diff --git a/modules/systemd/src/main/java/org/opensearch/systemd/Libsystemd.java b/modules/systemd/src/main/java/org/opensearch/systemd/Libsystemd.java index 93e2e28718d51..05c6222d3d89a 100644 --- a/modules/systemd/src/main/java/org/opensearch/systemd/Libsystemd.java +++ b/modules/systemd/src/main/java/org/opensearch/systemd/Libsystemd.java @@ -40,6 +40,7 @@ /** * Provides access to the native method sd_notify from libsystemd. */ +@SuppressWarnings("removal") class Libsystemd { static { diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.104.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.104.Final.jar.sha1 deleted file mode 100644 index 30f215e47f8ad..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -756797174b94a3aee11ce83522473f3c18287a43 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..beb44fc0f4cf9 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +8509a72b8a5a2d33d611e99254aed39765c3ad82 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.104.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.104.Final.jar.sha1 deleted file mode 100644 index 9ed9b896d4b4e..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f51fcfd3baac88b2c0b8dc715932ad5622d17429 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..4c74bb06fd83b --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +0a1d32debf2ed07c5852ab5b2904c43adb76c39e \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.104.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.104.Final.jar.sha1 deleted file mode 100644 index 478e7cfba1470..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2db1556de1b8dc07695604bf51a0a133263ad63f \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..38eb2e5bad80a --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +04d8e9e51b7254bd26a42fe17bdcae32e4c6ebb3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 deleted file mode 100644 index f0242709f34f7..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5bb757929f7c4d1bf12740a378a99643caaad1ac \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..5b3d3311edc9f --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +3885ffe7dd05c9773df70c61009f34a5a8a383ec \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.104.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.104.Final.jar.sha1 deleted file mode 100644 index 1b533eea3b3b3..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ec5fc4a7c5475eb20805e14f7274aa28872b5ba1 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..bbe91c6ccfb1d --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +4f17a547530d64becd7179507b25f4154bcfba57 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.104.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.104.Final.jar.sha1 deleted file mode 100644 index 70777be4dc636..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -18c0e659950cdef5f12c36eccfa14cbd2ad2049d \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..ba27b38632622 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +d4c6b05f4d9aca117981297fb7f02953102ebb5e \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.104.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.104.Final.jar.sha1 deleted file mode 100644 index d7c15af9312fe..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dfa4fe5c3a6eabb7af09902eb63266829876d8a2 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..3bc0f7b3fed09 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +dfee84308341a42131dd0f8ac0e1e02d627c19f3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.104.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.104.Final.jar.sha1 deleted file mode 100644 index 5cacaf11a29ce..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da7b263b6fedc5add70e78ee8927c8bd2b9bb589 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..19419999300dd --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +d6a105c621b47d1410e0e09419d7209d2d46e914 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 deleted file mode 100644 index 64797bf11aedc..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d4da9f7237ac3ac292891e0b2d5364acbce128cf \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..407ecaffdad30 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +4d61d4959741109b3eccd7337f11fc89fa90a74a \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.11.0-snapshot-8a555eb.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..0643f16dc1052 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +44a4e095d7e047a9452d81b224905b72c830f8ae \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.9.1.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.9.1.jar.sha1 deleted file mode 100644 index dde9b7c100dc7..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -147cb42a90a29501d9ca6094ea0db1d213f3076a \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.11.0-snapshot-8a555eb.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..3f4d49a78791b --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +7f57fe7322e6d3a9b4edcc3da0b1ee0791a814ec \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.9.1.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.9.1.jar.sha1 deleted file mode 100644 index b70a22e9db096..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b034dd3a975763e083c7e11b5d0f7d516ab72590 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/src/main/java/org/opensearch/index/analysis/KuromojiCompletionAnalyzerProvider.java b/plugins/analysis-kuromoji/src/main/java/org/opensearch/index/analysis/KuromojiCompletionAnalyzerProvider.java new file mode 100644 index 0000000000000..314daab1801a6 --- /dev/null +++ b/plugins/analysis-kuromoji/src/main/java/org/opensearch/index/analysis/KuromojiCompletionAnalyzerProvider.java @@ -0,0 +1,34 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.analysis; + +import org.apache.lucene.analysis.ja.JapaneseCompletionAnalyzer; +import org.apache.lucene.analysis.ja.JapaneseCompletionFilter; +import org.apache.lucene.analysis.ja.dict.UserDictionary; +import org.opensearch.common.settings.Settings; +import org.opensearch.env.Environment; +import org.opensearch.index.IndexSettings; + +public class KuromojiCompletionAnalyzerProvider extends AbstractIndexAnalyzerProvider<JapaneseCompletionAnalyzer> { + + private final JapaneseCompletionAnalyzer analyzer; + + public KuromojiCompletionAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + super(indexSettings, name, settings); + final JapaneseCompletionFilter.Mode mode = KuromojiCompletionFilterFactory.getMode(settings); + final UserDictionary userDictionary = KuromojiTokenizerFactory.getUserDictionary(env, settings); + analyzer = new JapaneseCompletionAnalyzer(userDictionary, mode); + } + + @Override + public JapaneseCompletionAnalyzer get() { + return this.analyzer; + } + +} diff --git a/plugins/analysis-kuromoji/src/main/java/org/opensearch/index/analysis/KuromojiCompletionFilterFactory.java b/plugins/analysis-kuromoji/src/main/java/org/opensearch/index/analysis/KuromojiCompletionFilterFactory.java new file mode 100644 index 0000000000000..1459c19de46db --- /dev/null +++ b/plugins/analysis-kuromoji/src/main/java/org/opensearch/index/analysis/KuromojiCompletionFilterFactory.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.analysis; + +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.ja.JapaneseCompletionFilter; +import org.apache.lucene.analysis.ja.JapaneseCompletionFilter.Mode; +import org.opensearch.common.settings.Settings; +import org.opensearch.env.Environment; +import org.opensearch.index.IndexSettings; + +public class KuromojiCompletionFilterFactory extends AbstractTokenFilterFactory { + private final Mode mode; + + public KuromojiCompletionFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + super(indexSettings, name, settings); + this.mode = getMode(settings); + } + + public static Mode getMode(Settings settings) { + String modeSetting = settings.get("mode", null); + if (modeSetting != null) { + if ("index".equalsIgnoreCase(modeSetting)) { + return Mode.INDEX; + } else if ("query".equalsIgnoreCase(modeSetting)) { + return Mode.QUERY; + } + } + return Mode.INDEX; + } + + @Override + public TokenStream create(TokenStream tokenStream) { + return new JapaneseCompletionFilter(tokenStream, mode); + } +} diff --git a/plugins/analysis-kuromoji/src/main/java/org/opensearch/plugin/analysis/kuromoji/AnalysisKuromojiPlugin.java b/plugins/analysis-kuromoji/src/main/java/org/opensearch/plugin/analysis/kuromoji/AnalysisKuromojiPlugin.java index 76d3df8c2e76c..c429e8e4dd830 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/opensearch/plugin/analysis/kuromoji/AnalysisKuromojiPlugin.java +++ b/plugins/analysis-kuromoji/src/main/java/org/opensearch/plugin/analysis/kuromoji/AnalysisKuromojiPlugin.java @@ -38,6 +38,8 @@ import org.opensearch.index.analysis.JapaneseStopTokenFilterFactory; import org.opensearch.index.analysis.KuromojiAnalyzerProvider; import org.opensearch.index.analysis.KuromojiBaseFormFilterFactory; +import org.opensearch.index.analysis.KuromojiCompletionAnalyzerProvider; +import org.opensearch.index.analysis.KuromojiCompletionFilterFactory; import org.opensearch.index.analysis.KuromojiIterationMarkCharFilterFactory; import org.opensearch.index.analysis.KuromojiKatakanaStemmerFactory; import org.opensearch.index.analysis.KuromojiNumberFilterFactory; @@ -70,6 +72,7 @@ public Map<String, AnalysisProvider<TokenFilterFactory>> getTokenFilters() { extra.put("kuromoji_stemmer", KuromojiKatakanaStemmerFactory::new); extra.put("ja_stop", JapaneseStopTokenFilterFactory::new); extra.put("kuromoji_number", KuromojiNumberFilterFactory::new); + extra.put("kuromoji_completion", KuromojiCompletionFilterFactory::new); return extra; } @@ -80,6 +83,9 @@ public Map<String, AnalysisProvider<TokenizerFactory>> getTokenizers() { @Override public Map<String, AnalysisProvider<AnalyzerProvider<? extends Analyzer>>> getAnalyzers() { - return singletonMap("kuromoji", KuromojiAnalyzerProvider::new); + Map<String, AnalysisProvider<AnalyzerProvider<? extends Analyzer>>> extra = new HashMap<>(); + extra.put("kuromoji", KuromojiAnalyzerProvider::new); + extra.put("kuromoji_completion", KuromojiCompletionAnalyzerProvider::new); + return extra; } } diff --git a/plugins/analysis-kuromoji/src/test/java/org/opensearch/index/analysis/AnalysisKuromojiFactoryTests.java b/plugins/analysis-kuromoji/src/test/java/org/opensearch/index/analysis/AnalysisKuromojiFactoryTests.java index a76406d4dc925..b6b953f9ba417 100644 --- a/plugins/analysis-kuromoji/src/test/java/org/opensearch/index/analysis/AnalysisKuromojiFactoryTests.java +++ b/plugins/analysis-kuromoji/src/test/java/org/opensearch/index/analysis/AnalysisKuromojiFactoryTests.java @@ -59,6 +59,7 @@ protected Map<String, Class<?>> getTokenFilters() { filters.put("japanesereadingform", KuromojiReadingFormFilterFactory.class); filters.put("japanesekatakanastem", KuromojiKatakanaStemmerFactory.class); filters.put("japanesenumber", KuromojiNumberFilterFactory.class); + filters.put("japanesecompletion", KuromojiCompletionFilterFactory.class); return filters; } diff --git a/plugins/analysis-kuromoji/src/test/java/org/opensearch/index/analysis/KuromojiAnalysisTests.java b/plugins/analysis-kuromoji/src/test/java/org/opensearch/index/analysis/KuromojiAnalysisTests.java index ffc2db6672899..ec18041f451fc 100644 --- a/plugins/analysis-kuromoji/src/test/java/org/opensearch/index/analysis/KuromojiAnalysisTests.java +++ b/plugins/analysis-kuromoji/src/test/java/org/opensearch/index/analysis/KuromojiAnalysisTests.java @@ -36,6 +36,7 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.ja.JapaneseAnalyzer; +import org.apache.lucene.analysis.ja.JapaneseCompletionAnalyzer; import org.apache.lucene.analysis.ja.JapaneseTokenizer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.opensearch.Version; @@ -85,6 +86,15 @@ public void testDefaultsKuromojiAnalysis() throws IOException { filterFactory = analysis.tokenFilter.get("kuromoji_number"); assertThat(filterFactory, instanceOf(KuromojiNumberFilterFactory.class)); + filterFactory = analysis.tokenFilter.get("kuromoji_completion"); + assertThat(filterFactory, instanceOf(KuromojiCompletionFilterFactory.class)); + + filterFactory = analysis.tokenFilter.get("kuromoji_completion_index"); + assertThat(filterFactory, instanceOf(KuromojiCompletionFilterFactory.class)); + + filterFactory = analysis.tokenFilter.get("kuromoji_completion_query"); + assertThat(filterFactory, instanceOf(KuromojiCompletionFilterFactory.class)); + IndexAnalyzers indexAnalyzers = analysis.indexAnalyzers; NamedAnalyzer analyzer = indexAnalyzers.get("kuromoji"); assertThat(analyzer.analyzer(), instanceOf(JapaneseAnalyzer.class)); @@ -93,6 +103,15 @@ public void testDefaultsKuromojiAnalysis() throws IOException { assertThat(analyzer.analyzer(), instanceOf(CustomAnalyzer.class)); assertThat(analyzer.analyzer().tokenStream(null, new StringReader("")), instanceOf(JapaneseTokenizer.class)); + analyzer = indexAnalyzers.get("kuromoji_completion"); + assertThat(analyzer.analyzer(), instanceOf(JapaneseCompletionAnalyzer.class)); + + analyzer = indexAnalyzers.get("kuromoji_completion_index"); + assertThat(analyzer.analyzer(), instanceOf(JapaneseCompletionAnalyzer.class)); + + analyzer = indexAnalyzers.get("kuromoji_completion_query"); + assertThat(analyzer.analyzer(), instanceOf(JapaneseCompletionAnalyzer.class)); + CharFilterFactory charFilterFactory = analysis.charFilter.get("kuromoji_iteration_mark"); assertThat(charFilterFactory, instanceOf(KuromojiIterationMarkCharFilterFactory.class)); @@ -199,6 +218,32 @@ public void testKatakanaStemFilter() throws IOException { assertSimpleTSOutput(tokenFilter.create(tokenizer), expected_tokens_katakana); } + public void testJapaneseCompletionFilter() throws IOException { + TestAnalysis analysis = createTestAnalysis(); + + String source = "寿司がおいしいね"; + String[] expected_tokens = new String[] { "寿司", "susi", "sushi", "が", "ga", "おいしい", "oisii", "oishii", "ね", "ne" }; + + // mode = INDEX(default) + Tokenizer tokenizer = new JapaneseTokenizer(null, true, JapaneseTokenizer.Mode.SEARCH); + tokenizer.setReader(new StringReader(source)); + TokenFilterFactory tokenFilter = analysis.tokenFilter.get("kuromoji_completion"); + assertSimpleTSOutput(tokenFilter.create(tokenizer), expected_tokens); + + // mode = INDEX + tokenizer = new JapaneseTokenizer(null, true, JapaneseTokenizer.Mode.SEARCH); + tokenizer.setReader(new StringReader(source)); + tokenFilter = analysis.tokenFilter.get("kuromoji_completion_index"); + assertSimpleTSOutput(tokenFilter.create(tokenizer), expected_tokens); + + // mode = QUERY + tokenizer = new JapaneseTokenizer(null, true, JapaneseTokenizer.Mode.SEARCH); + tokenizer.setReader(new StringReader(source)); + tokenFilter = analysis.tokenFilter.get("kuromoji_completion_query"); + expected_tokens = new String[] { "寿司", "susi", "sushi", "がおいしいね", "gaoisiine", "gaoishiine" }; + assertSimpleTSOutput(tokenFilter.create(tokenizer), expected_tokens); + } + public void testIterationMarkCharFilter() throws IOException { TestAnalysis analysis = createTestAnalysis(); // test only kanji @@ -414,6 +459,30 @@ public void testDiscardCompoundToken() throws Exception { assertSimpleTSOutput(tokenizer, expected); } + public void testJapaneseCompletionAnalyzer() throws Exception { + TestAnalysis analysis = createTestAnalysis(); + IndexAnalyzers indexAnalyzers = analysis.indexAnalyzers; + NamedAnalyzer analyzer = indexAnalyzers.get("kuromoji_completion"); + + // mode = INDEX(default) + try (TokenStream stream = analyzer.tokenStream("", "寿司がおいしいね")) { + assertTokenStreamContents(stream, new String[] { "寿司", "susi", "sushi", "が", "ga", "おいしい", "oisii", "oishii", "ね", "ne" }); + } + + // mode = INDEX + analyzer = indexAnalyzers.get("kuromoji_completion_index"); + try (TokenStream stream = analyzer.tokenStream("", "寿司がおいしいね")) { + assertTokenStreamContents(stream, new String[] { "寿司", "susi", "sushi", "が", "ga", "おいしい", "oisii", "oishii", "ね", "ne" }); + } + + // mode = QUERY + analyzer = indexAnalyzers.get("kuromoji_completion_query"); + try (TokenStream stream = analyzer.tokenStream("", "寿司がおいしいね")) { + assertTokenStreamContents(stream, new String[] { "寿司", "susi", "sushi", "がおいしいね", "gaoisiine", "gaoishiine" }); + } + + } + private TestAnalysis createTestAnalysis(Settings analysisSettings) throws IOException { InputStream dict = KuromojiAnalysisTests.class.getResourceAsStream("user_dict.txt"); Path home = createTempDir(); diff --git a/plugins/analysis-kuromoji/src/test/resources/org/opensearch/index/analysis/kuromoji_analysis.json b/plugins/analysis-kuromoji/src/test/resources/org/opensearch/index/analysis/kuromoji_analysis.json index a55947f53e34b..3e952b51e4ece 100644 --- a/plugins/analysis-kuromoji/src/test/resources/org/opensearch/index/analysis/kuromoji_analysis.json +++ b/plugins/analysis-kuromoji/src/test/resources/org/opensearch/index/analysis/kuromoji_analysis.json @@ -17,6 +17,14 @@ "ja_stop" : { "type": "ja_stop", "stopwords": ["_japanese_", "スピード"] + }, + "kuromoji_completion_index" : { + "type" : "kuromoji_completion", + "mode" : "index" + }, + "kuromoji_completion_query" : { + "type" : "kuromoji_completion", + "mode" : "query" } }, @@ -70,6 +78,14 @@ "my_analyzer" : { "type" : "custom", "tokenizer" : "kuromoji_tokenizer" + }, + "kuromoji_completion_index" : { + "type" : "kuromoji_completion", + "mode" : "index" + }, + "kuromoji_completion_query" : { + "type" : "kuromoji_completion", + "mode" : "query" } } diff --git a/plugins/analysis-kuromoji/src/yamlRestTest/resources/rest-api-spec/test/analysis_kuromoji/10_basic.yml b/plugins/analysis-kuromoji/src/yamlRestTest/resources/rest-api-spec/test/analysis_kuromoji/10_basic.yml index 1cca2b728e0aa..3363591ded5ca 100644 --- a/plugins/analysis-kuromoji/src/yamlRestTest/resources/rest-api-spec/test/analysis_kuromoji/10_basic.yml +++ b/plugins/analysis-kuromoji/src/yamlRestTest/resources/rest-api-spec/test/analysis_kuromoji/10_basic.yml @@ -16,6 +16,24 @@ - match: { tokens.5.token: 飲む } - match: { tokens.6.token: 行く } --- +"Completion Analyzer": + - do: + indices.analyze: + body: + text: 寿司がおいしいね + analyzer: kuromoji_completion + - length: { tokens: 10 } + - match: { tokens.0.token: "寿司" } + - match: { tokens.1.token: "susi" } + - match: { tokens.2.token: "sushi" } + - match: { tokens.3.token: "が" } + - match: { tokens.4.token: "ga" } + - match: { tokens.5.token: "おいしい" } + - match: { tokens.6.token: "oisii" } + - match: { tokens.7.token: "oishii" } + - match: { tokens.8.token: "ね" } + - match: { tokens.9.token: "ne" } +--- "Tokenizer": - do: indices.analyze: @@ -57,3 +75,15 @@ filter: [kuromoji_stemmer] - length: { tokens: 1 } - match: { tokens.0.token: サーバ } +--- +"Completion filter": + - do: + indices.analyze: + body: + text: 寿司 + tokenizer: kuromoji_tokenizer + filter: [kuromoji_completion] + - length: { tokens: 3 } + - match: { tokens.0.token: "寿司" } + - match: { tokens.1.token: "susi" } + - match: { tokens.2.token: "sushi" } diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.11.0-snapshot-8a555eb.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..1f110011ca9c6 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +9929da235100f8df323cfed165b8111fb2840093 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.9.1.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.9.1.jar.sha1 deleted file mode 100644 index 323f165c62790..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c405f2f7d0fc127d88dfbadd753469b2028fdf52 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.11.0-snapshot-8a555eb.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..8d6bf9fa0fa1b --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +8564c86d880c6ce002250002e2fd0936cbfff61d \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.9.1.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.9.1.jar.sha1 deleted file mode 100644 index dd659ddf4de95..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -970e5775876c2d7e1b9af7421a4b17d96f63faf4 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.11.0-snapshot-8a555eb.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..cbe4aec98fae4 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +7e71777cfb5beb4ffd5b03030576d2f062eef13c \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.9.1.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.9.1.jar.sha1 deleted file mode 100644 index ed0e81d8f1f75..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2421e5238e9b8484929291744d709dd743c01da1 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.11.0-snapshot-8a555eb.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..e1c7aecc104d0 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +a597265bd6fb0a7e954e948a295d31507dd73cce \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.9.1.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.9.1.jar.sha1 deleted file mode 100644 index fd8e000088180..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a23e7de4cd9ae7af285c89dc1c55e0ac3f157fd3 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.11.0-snapshot-8a555eb.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..eefa2809f3540 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +c9e534845bb08985d7fa21e2e71a14bc68c46089 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.9.1.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.9.1.jar.sha1 deleted file mode 100644 index d0e7a3b0c751c..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8d9bce1ea51db279878c51091dd9aefc7b335da4 \ No newline at end of file diff --git a/plugins/cache-ehcache/build.gradle b/plugins/cache-ehcache/build.gradle new file mode 100644 index 0000000000000..65e7daaaacf26 --- /dev/null +++ b/plugins/cache-ehcache/build.gradle @@ -0,0 +1,97 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +import org.apache.tools.ant.taskdefs.condition.Os +import org.opensearch.gradle.Architecture +import org.opensearch.gradle.OS +import org.opensearch.gradle.info.BuildParams + +apply plugin: 'opensearch.internal-cluster-test' + +opensearchplugin { + description 'Ehcache based cache implementation.' + classname 'org.opensearch.cache.EhcacheCachePlugin' +} + +versions << [ + 'ehcache' : '3.10.8' +] + +dependencies { + api "org.ehcache:ehcache:${versions.ehcache}" +} + +thirdPartyAudit { + ignoreViolations( + 'org.ehcache.impl.internal.concurrent.ConcurrentHashMap', + 'org.ehcache.impl.internal.concurrent.ConcurrentHashMap$CounterCell', + 'org.ehcache.impl.internal.concurrent.ConcurrentHashMap$TreeBin', + 'org.ehcache.impl.internal.concurrent.ThreadLocalRandomUtil', + 'org.ehcache.sizeof.impl.UnsafeSizeOf' + ) + + ignoreMissingClasses( + 'javax.cache.Cache', + 'javax.cache.Cache$Entry', + 'javax.cache.CacheException', + 'javax.cache.CacheManager', + 'javax.cache.configuration.CacheEntryListenerConfiguration', + 'javax.cache.configuration.CompleteConfiguration', + 'javax.cache.configuration.Configuration', + 'javax.cache.configuration.Factory', + 'javax.cache.configuration.OptionalFeature', + 'javax.cache.event.CacheEntryCreatedListener', + 'javax.cache.event.CacheEntryEvent', + 'javax.cache.event.CacheEntryEventFilter', + 'javax.cache.event.CacheEntryExpiredListener', + 'javax.cache.event.CacheEntryListener', + 'javax.cache.event.CacheEntryRemovedListener', + 'javax.cache.event.CacheEntryUpdatedListener', + 'javax.cache.event.EventType', + 'javax.cache.expiry.Duration', + 'javax.cache.expiry.EternalExpiryPolicy', + 'javax.cache.expiry.ExpiryPolicy', + 'javax.cache.integration.CacheLoader', + 'javax.cache.integration.CacheLoaderException', + 'javax.cache.integration.CacheWriter', + 'javax.cache.integration.CacheWriterException', + 'javax.cache.integration.CompletionListener', + 'javax.cache.management.CacheMXBean', + 'javax.cache.management.CacheStatisticsMXBean', + 'javax.cache.processor.EntryProcessor', + 'javax.cache.processor.EntryProcessorResult', + 'javax.cache.processor.MutableEntry', + 'javax.cache.spi.CachingProvider', + 'javax.xml.bind.JAXBContext', + 'javax.xml.bind.JAXBElement', + 'javax.xml.bind.Marshaller', + 'javax.xml.bind.Unmarshaller', + 'javax.xml.bind.annotation.XmlElement', + 'javax.xml.bind.annotation.XmlRootElement', + 'javax.xml.bind.annotation.XmlSchema', + 'javax.xml.bind.annotation.adapters.XmlAdapter', + 'org.osgi.framework.BundleActivator', + 'org.osgi.framework.BundleContext', + 'org.osgi.framework.ServiceReference', + 'org.slf4j.Logger', + 'org.slf4j.LoggerFactory', + 'org.slf4j.Marker', + 'org.slf4j.event.Level' + ) +} + +tasks.named("bundlePlugin").configure { + from('config/cache-ehcache') { + into 'config' + } +} + +test { + // TODO: Adding permission in plugin-security.policy doesn't seem to work. + systemProperty 'tests.security.manager', 'false' +} diff --git a/plugins/cache-ehcache/licenses/ehcache-3.10.8.jar.sha1 b/plugins/cache-ehcache/licenses/ehcache-3.10.8.jar.sha1 new file mode 100644 index 0000000000000..dee07e9238ebf --- /dev/null +++ b/plugins/cache-ehcache/licenses/ehcache-3.10.8.jar.sha1 @@ -0,0 +1 @@ +f0d50ede46609db78413ca7f4250d348a597b101 \ No newline at end of file diff --git a/plugins/cache-ehcache/licenses/ehcache-LICENSE.txt b/plugins/cache-ehcache/licenses/ehcache-LICENSE.txt new file mode 100644 index 0000000000000..8dada3edaf50d --- /dev/null +++ b/plugins/cache-ehcache/licenses/ehcache-LICENSE.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/cache-ehcache/licenses/ehcache-NOTICE.txt b/plugins/cache-ehcache/licenses/ehcache-NOTICE.txt new file mode 100644 index 0000000000000..1dbd38242cc98 --- /dev/null +++ b/plugins/cache-ehcache/licenses/ehcache-NOTICE.txt @@ -0,0 +1,5 @@ +Ehcache V3 +Copyright 2014-2023 Terracotta, Inc. + +The product includes software from the Apache Commons Lang project, +under the Apache License 2.0 (see: org.ehcache.impl.internal.classes.commonslang) diff --git a/plugins/cache-ehcache/src/internalClusterTest/java/org/opensearch/cache/EhcacheDiskCacheIT.java b/plugins/cache-ehcache/src/internalClusterTest/java/org/opensearch/cache/EhcacheDiskCacheIT.java new file mode 100644 index 0000000000000..c68455463ee3d --- /dev/null +++ b/plugins/cache-ehcache/src/internalClusterTest/java/org/opensearch/cache/EhcacheDiskCacheIT.java @@ -0,0 +1,48 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache; + +import org.opensearch.action.admin.cluster.node.info.NodeInfo; +import org.opensearch.action.admin.cluster.node.info.NodesInfoRequest; +import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.opensearch.action.admin.cluster.node.info.PluginsAndModules; +import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.PluginInfo; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.Assert; + +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +public class EhcacheDiskCacheIT extends OpenSearchIntegTestCase { + + @Override + protected Collection<Class<? extends Plugin>> nodePlugins() { + return Arrays.asList(EhcacheCachePlugin.class); + } + + public void testPluginsAreInstalled() { + NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(); + nodesInfoRequest.addMetric(NodesInfoRequest.Metric.PLUGINS.metricName()); + NodesInfoResponse nodesInfoResponse = OpenSearchIntegTestCase.client().admin().cluster().nodesInfo(nodesInfoRequest).actionGet(); + List<PluginInfo> pluginInfos = nodesInfoResponse.getNodes() + .stream() + .flatMap( + (Function<NodeInfo, Stream<PluginInfo>>) nodeInfo -> nodeInfo.getInfo(PluginsAndModules.class).getPluginInfos().stream() + ) + .collect(Collectors.toList()); + Assert.assertTrue( + pluginInfos.stream().anyMatch(pluginInfo -> pluginInfo.getName().equals("org.opensearch.cache.EhcacheCachePlugin")) + ); + } +} diff --git a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheCachePlugin.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheCachePlugin.java new file mode 100644 index 0000000000000..ceda96e4a7d7d --- /dev/null +++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheCachePlugin.java @@ -0,0 +1,56 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache; + +import org.opensearch.cache.store.disk.EhcacheDiskCache; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.ICache; +import org.opensearch.common.settings.Setting; +import org.opensearch.plugins.CachePlugin; +import org.opensearch.plugins.Plugin; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static org.opensearch.cache.EhcacheDiskCacheSettings.CACHE_TYPE_MAP; + +/** + * Ehcache based cache plugin. + */ +public class EhcacheCachePlugin extends Plugin implements CachePlugin { + + private static final String EHCACHE_CACHE_PLUGIN = "EhcachePlugin"; + + /** + * Default constructor to avoid javadoc related failures. + */ + public EhcacheCachePlugin() {} + + @Override + public Map<String, ICache.Factory> getCacheFactoryMap() { + return Map.of(EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME, new EhcacheDiskCache.EhcacheDiskCacheFactory()); + } + + @Override + public List<Setting<?>> getSettings() { + List<Setting<?>> settingList = new ArrayList<>(); + for (Map.Entry<CacheType, Map<String, Setting<?>>> entry : CACHE_TYPE_MAP.entrySet()) { + for (Map.Entry<String, Setting<?>> entry1 : entry.getValue().entrySet()) { + settingList.add(entry1.getValue()); + } + } + return settingList; + } + + @Override + public String getName() { + return EHCACHE_CACHE_PLUGIN; + } +} diff --git a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheDiskCacheSettings.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheDiskCacheSettings.java new file mode 100644 index 0000000000000..837fd6b268ce6 --- /dev/null +++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheDiskCacheSettings.java @@ -0,0 +1,222 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache; + +import org.opensearch.cache.store.disk.EhcacheDiskCache; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.unit.TimeValue; + +import java.util.HashMap; +import java.util.Map; + +import static org.opensearch.common.settings.Setting.Property.NodeScope; + +/** + * Settings related to ehcache disk cache. + */ +public class EhcacheDiskCacheSettings { + + /** + * Ehcache disk write minimum threads for its pool + * + * Setting pattern: {cache_type}.ehcache_disk.min_threads + */ + + public static final Setting.AffixSetting<Integer> DISK_WRITE_MINIMUM_THREADS_SETTING = Setting.suffixKeySetting( + EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + ".min_threads", + (key) -> Setting.intSetting(key, 2, 1, 5, NodeScope) + ); + + /** + * Ehcache disk write maximum threads for its pool + * + * Setting pattern: {cache_type}.ehcache_disk.max_threads + */ + public static final Setting.AffixSetting<Integer> DISK_WRITE_MAXIMUM_THREADS_SETTING = Setting.suffixKeySetting( + EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + ".max_threads", + (key) -> Setting.intSetting(key, 2, 1, 20, NodeScope) + ); + + /** + * Not be to confused with number of disk segments, this is different. Defines + * distinct write queues created for disk store where a group of segments share a write queue. This is + * implemented with ehcache using a partitioned thread pool exectutor By default all segments share a single write + * queue ie write concurrency is 1. Check OffHeapDiskStoreConfiguration and DiskWriteThreadPool. + * + * Default is 1 within ehcache. + * + * + */ + public static final Setting.AffixSetting<Integer> DISK_WRITE_CONCURRENCY_SETTING = Setting.suffixKeySetting( + EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + ".concurrency", + (key) -> Setting.intSetting(key, 1, 1, 3, NodeScope) + ); + + /** + * Defines how many segments the disk cache is separated into. Higher number achieves greater concurrency but + * will hold that many file pointers. Default is 16. + * + * Default value is 16 within Ehcache. + */ + public static final Setting.AffixSetting<Integer> DISK_SEGMENTS_SETTING = Setting.suffixKeySetting( + EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + ".segments", + (key) -> Setting.intSetting(key, 16, 1, 32, NodeScope) + ); + + /** + * Storage path for disk cache. + */ + public static final Setting.AffixSetting<String> DISK_STORAGE_PATH_SETTING = Setting.suffixKeySetting( + EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + ".storage.path", + (key) -> Setting.simpleString(key, "", NodeScope) + ); + + /** + * Disk cache alias. + */ + public static final Setting.AffixSetting<String> DISK_CACHE_ALIAS_SETTING = Setting.suffixKeySetting( + EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + ".alias", + (key) -> Setting.simpleString(key, "", NodeScope) + ); + + /** + * Disk cache expire after access setting. + */ + public static final Setting.AffixSetting<TimeValue> DISK_CACHE_EXPIRE_AFTER_ACCESS_SETTING = Setting.suffixKeySetting( + EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + ".expire_after_access", + (key) -> Setting.positiveTimeSetting(key, TimeValue.MAX_VALUE, NodeScope) + ); + + /** + * Disk cache max size setting. + */ + public static final Setting.AffixSetting<Long> DISK_CACHE_MAX_SIZE_IN_BYTES_SETTING = Setting.suffixKeySetting( + EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + ".max_size_in_bytes", + (key) -> Setting.longSetting(key, 1073741824L, NodeScope) + ); + + /** + * Disk cache listener mode setting. + */ + public static final Setting.AffixSetting<Boolean> DISK_CACHE_LISTENER_MODE_SYNC_SETTING = Setting.suffixKeySetting( + EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + ".is_event_listener_sync", + (key) -> Setting.boolSetting(key, false, NodeScope) + ); + + /** + * Key for disk segment. + */ + public static final String DISK_SEGMENT_KEY = "disk_segment"; + /** + * Key for max size. + */ + public static final String DISK_MAX_SIZE_IN_BYTES_KEY = "max_size_in_bytes"; + /** + * Key for expire after access. + */ + public static final String DISK_CACHE_EXPIRE_AFTER_ACCESS_KEY = "disk_cache_expire_after_access_key"; + /** + * Key for cache alias. + */ + public static final String DISK_CACHE_ALIAS_KEY = "disk_cache_alias"; + /** + * Key for disk segment. + */ + public static final String DISK_SEGMENTS_KEY = "disk_segments"; + /** + * Key for disk write concurrency. + */ + public static final String DISK_WRITE_CONCURRENCY_KEY = "disk_write_concurrency"; + /** + * Key for max threads. + */ + public static final String DISK_WRITE_MAXIMUM_THREADS_KEY = "disk_write_max_threads"; + /** + * Key for min threads. + */ + public static final String DISK_WRITE_MIN_THREADS_KEY = "disk_write_min_threads"; + /** + * Key for storage path. + */ + public static final String DISK_STORAGE_PATH_KEY = "disk_storage_path"; + /** + * Key for listener mode + */ + public static final String DISK_LISTENER_MODE_SYNC_KEY = "disk_listener_mode"; + + /** + * Map of key to setting. + */ + private static final Map<String, Setting.AffixSetting<?>> KEY_SETTING_MAP = Map.of( + DISK_SEGMENT_KEY, + DISK_SEGMENTS_SETTING, + DISK_CACHE_EXPIRE_AFTER_ACCESS_KEY, + DISK_CACHE_EXPIRE_AFTER_ACCESS_SETTING, + DISK_CACHE_ALIAS_KEY, + DISK_CACHE_ALIAS_SETTING, + DISK_WRITE_CONCURRENCY_KEY, + DISK_WRITE_CONCURRENCY_SETTING, + DISK_WRITE_MAXIMUM_THREADS_KEY, + DISK_WRITE_MAXIMUM_THREADS_SETTING, + DISK_WRITE_MIN_THREADS_KEY, + DISK_WRITE_MINIMUM_THREADS_SETTING, + DISK_STORAGE_PATH_KEY, + DISK_STORAGE_PATH_SETTING, + DISK_MAX_SIZE_IN_BYTES_KEY, + DISK_CACHE_MAX_SIZE_IN_BYTES_SETTING, + DISK_LISTENER_MODE_SYNC_KEY, + DISK_CACHE_LISTENER_MODE_SYNC_SETTING + ); + + /** + * Map to store desired settings for a cache type. + */ + public static final Map<CacheType, Map<String, Setting<?>>> CACHE_TYPE_MAP = getCacheTypeMap(); + + /** + * Used to form concrete setting for cache types and return desired map + * @return map of cacheType and associated settings. + */ + private static final Map<CacheType, Map<String, Setting<?>>> getCacheTypeMap() { + Map<CacheType, Map<String, Setting<?>>> cacheTypeMap = new HashMap<>(); + for (CacheType cacheType : CacheType.values()) { + Map<String, Setting<?>> settingMap = new HashMap<>(); + for (Map.Entry<String, Setting.AffixSetting<?>> entry : KEY_SETTING_MAP.entrySet()) { + settingMap.put(entry.getKey(), entry.getValue().getConcreteSettingForNamespace(cacheType.getSettingPrefix())); + } + cacheTypeMap.put(cacheType, settingMap); + } + return cacheTypeMap; + } + + /** + * Fetches setting list for a combination of cache type and store name. + * @param cacheType cache type + * @return settings + */ + public static final Map<String, Setting<?>> getSettingListForCacheType(CacheType cacheType) { + Map<String, Setting<?>> cacheTypeSettings = CACHE_TYPE_MAP.get(cacheType); + if (cacheTypeSettings == null) { + throw new IllegalArgumentException( + "No settings exist for cache store name: " + + EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + + "associated with " + + "cache type: " + + cacheType + ); + } + return cacheTypeSettings; + } + + /** + * Default constructor. Added to fix javadocs. + */ + public EhcacheDiskCacheSettings() {} +} diff --git a/server/src/main/java/org/opensearch/common/cache/store/listeners/package-info.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/package-info.java similarity index 66% rename from server/src/main/java/org/opensearch/common/cache/store/listeners/package-info.java rename to plugins/cache-ehcache/src/main/java/org/opensearch/cache/package-info.java index c3222ca3ffb62..f9be1c3dbf826 100644 --- a/server/src/main/java/org/opensearch/common/cache/store/listeners/package-info.java +++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/package-info.java @@ -6,5 +6,5 @@ * compatible open source license. */ -/** Package related to tiered cache listeners */ -package org.opensearch.common.cache.store.listeners; +/** Base package for cache plugin */ +package org.opensearch.cache; diff --git a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java new file mode 100644 index 0000000000000..edb2c900be46c --- /dev/null +++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java @@ -0,0 +1,790 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache.store.disk; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.OpenSearchException; +import org.opensearch.cache.EhcacheDiskCacheSettings; +import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.ICache; +import org.opensearch.common.cache.LoadAwareCacheLoader; +import org.opensearch.common.cache.RemovalListener; +import org.opensearch.common.cache.RemovalNotification; +import org.opensearch.common.cache.RemovalReason; +import org.opensearch.common.cache.serializer.Serializer; +import org.opensearch.common.cache.store.builders.ICacheBuilder; +import org.opensearch.common.cache.store.config.CacheConfig; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.metrics.CounterMetric; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.io.IOUtils; + +import java.io.File; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.time.Duration; +import java.util.Arrays; +import java.util.Iterator; +import java.util.Map; +import java.util.NoSuchElementException; +import java.util.Objects; +import java.util.UUID; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutionException; +import java.util.function.BiFunction; +import java.util.function.Supplier; + +import org.ehcache.Cache; +import org.ehcache.CachePersistenceException; +import org.ehcache.PersistentCacheManager; +import org.ehcache.config.builders.CacheConfigurationBuilder; +import org.ehcache.config.builders.CacheEventListenerConfigurationBuilder; +import org.ehcache.config.builders.CacheManagerBuilder; +import org.ehcache.config.builders.PooledExecutionServiceConfigurationBuilder; +import org.ehcache.config.builders.ResourcePoolsBuilder; +import org.ehcache.config.units.MemoryUnit; +import org.ehcache.core.spi.service.FileBasedPersistenceContext; +import org.ehcache.event.CacheEvent; +import org.ehcache.event.CacheEventListener; +import org.ehcache.event.EventType; +import org.ehcache.expiry.ExpiryPolicy; +import org.ehcache.impl.config.store.disk.OffHeapDiskStoreConfiguration; +import org.ehcache.spi.loaderwriter.CacheLoadingException; +import org.ehcache.spi.loaderwriter.CacheWritingException; +import org.ehcache.spi.serialization.SerializerException; + +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_CACHE_ALIAS_KEY; +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_CACHE_EXPIRE_AFTER_ACCESS_KEY; +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_LISTENER_MODE_SYNC_KEY; +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_MAX_SIZE_IN_BYTES_KEY; +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_SEGMENT_KEY; +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_STORAGE_PATH_KEY; +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_WRITE_CONCURRENCY_KEY; +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_WRITE_MAXIMUM_THREADS_KEY; +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_WRITE_MIN_THREADS_KEY; + +/** + * This variant of disk cache uses Ehcache underneath. + * @param <K> Type of key. + * @param <V> Type of value. + * + * @opensearch.experimental + * + */ +@ExperimentalApi +public class EhcacheDiskCache<K, V> implements ICache<K, V> { + + private static final Logger logger = LogManager.getLogger(EhcacheDiskCache.class); + + // Unique id associated with this cache. + private final static String UNIQUE_ID = UUID.randomUUID().toString(); + private final static String THREAD_POOL_ALIAS_PREFIX = "ehcachePool"; + private final static int MINIMUM_MAX_SIZE_IN_BYTES = 1024 * 100; // 100KB + + // A Cache manager can create many caches. + private final PersistentCacheManager cacheManager; + + // Disk cache. Using ByteArrayWrapper to compare two byte[] by values rather than the default reference checks + private Cache<K, ByteArrayWrapper> cache; + private final long maxWeightInBytes; + private final String storagePath; + private final Class<K> keyType; + private final Class<V> valueType; + private final TimeValue expireAfterAccess; + private final EhCacheEventListener ehCacheEventListener; + private final String threadPoolAlias; + private final Settings settings; + private final RemovalListener<K, V> removalListener; + private final CacheType cacheType; + private final String diskCacheAlias; + // TODO: Move count to stats once those changes are ready. + private final CounterMetric entries = new CounterMetric(); + + private final Serializer<K, byte[]> keySerializer; + private final Serializer<V, byte[]> valueSerializer; + + /** + * Used in computeIfAbsent to synchronize loading of a given key. This is needed as ehcache doesn't provide a + * computeIfAbsent method. + */ + Map<K, CompletableFuture<Tuple<K, V>>> completableFutureMap = new ConcurrentHashMap<>(); + + private EhcacheDiskCache(Builder<K, V> builder) { + this.keyType = Objects.requireNonNull(builder.keyType, "Key type shouldn't be null"); + this.valueType = Objects.requireNonNull(builder.valueType, "Value type shouldn't be null"); + this.expireAfterAccess = Objects.requireNonNull(builder.getExpireAfterAcess(), "ExpireAfterAccess value shouldn't " + "be null"); + this.maxWeightInBytes = builder.getMaxWeightInBytes(); + if (this.maxWeightInBytes <= MINIMUM_MAX_SIZE_IN_BYTES) { + throw new IllegalArgumentException("Ehcache Disk tier cache size should be greater than " + MINIMUM_MAX_SIZE_IN_BYTES); + } + this.cacheType = Objects.requireNonNull(builder.cacheType, "Cache type shouldn't be null"); + if (builder.diskCacheAlias == null || builder.diskCacheAlias.isBlank()) { + this.diskCacheAlias = "ehcacheDiskCache#" + this.cacheType; + } else { + this.diskCacheAlias = builder.diskCacheAlias; + } + this.storagePath = builder.storagePath; + if (this.storagePath == null || this.storagePath.isBlank()) { + throw new IllegalArgumentException("Storage path shouldn't be null or empty"); + } + if (builder.threadPoolAlias == null || builder.threadPoolAlias.isBlank()) { + this.threadPoolAlias = THREAD_POOL_ALIAS_PREFIX + "DiskWrite#" + UNIQUE_ID; + } else { + this.threadPoolAlias = builder.threadPoolAlias; + } + this.settings = Objects.requireNonNull(builder.getSettings(), "Settings objects shouldn't be null"); + this.keySerializer = Objects.requireNonNull(builder.keySerializer, "Key serializer shouldn't be null"); + this.valueSerializer = Objects.requireNonNull(builder.valueSerializer, "Value serializer shouldn't be null"); + this.cacheManager = buildCacheManager(); + Objects.requireNonNull(builder.getRemovalListener(), "Removal listener can't be null"); + this.removalListener = builder.getRemovalListener(); + this.ehCacheEventListener = new EhCacheEventListener(builder.getRemovalListener()); + this.cache = buildCache(Duration.ofMillis(expireAfterAccess.getMillis()), builder); + } + + private Cache<K, ByteArrayWrapper> buildCache(Duration expireAfterAccess, Builder<K, V> builder) { + try { + return this.cacheManager.createCache( + this.diskCacheAlias, + CacheConfigurationBuilder.newCacheConfigurationBuilder( + this.keyType, + ByteArrayWrapper.class, + ResourcePoolsBuilder.newResourcePoolsBuilder().disk(maxWeightInBytes, MemoryUnit.B) + ).withExpiry(new ExpiryPolicy<>() { + @Override + public Duration getExpiryForCreation(K key, ByteArrayWrapper value) { + return INFINITE; + } + + @Override + public Duration getExpiryForAccess(K key, Supplier<? extends ByteArrayWrapper> value) { + return expireAfterAccess; + } + + @Override + public Duration getExpiryForUpdate(K key, Supplier<? extends ByteArrayWrapper> oldValue, ByteArrayWrapper newValue) { + return INFINITE; + } + }) + .withService(getListenerConfiguration(builder)) + .withService( + new OffHeapDiskStoreConfiguration( + this.threadPoolAlias, + (Integer) EhcacheDiskCacheSettings.getSettingListForCacheType(cacheType) + .get(DISK_WRITE_CONCURRENCY_KEY) + .get(settings), + (Integer) EhcacheDiskCacheSettings.getSettingListForCacheType(cacheType).get(DISK_SEGMENT_KEY).get(settings) + ) + ) + .withKeySerializer(new KeySerializerWrapper<K>(keySerializer)) + .withValueSerializer(new ByteArrayWrapperSerializer()) + // We pass ByteArrayWrapperSerializer as ehcache's value serializer. If V is an interface, and we pass its + // serializer directly to ehcache, ehcache requires the classes match exactly before/after serialization. + // This is not always feasible or necessary, like for BytesReference. So, we handle the value serialization + // before V hits ehcache. + ); + } catch (IllegalArgumentException ex) { + logger.error("Ehcache disk cache initialization failed due to illegal argument: {}", ex.getMessage()); + throw ex; + } catch (IllegalStateException ex) { + logger.error("Ehcache disk cache initialization failed: {}", ex.getMessage()); + throw ex; + } + } + + private CacheEventListenerConfigurationBuilder getListenerConfiguration(Builder<K, V> builder) { + CacheEventListenerConfigurationBuilder configurationBuilder = CacheEventListenerConfigurationBuilder.newEventListenerConfiguration( + this.ehCacheEventListener, + EventType.EVICTED, + EventType.EXPIRED, + EventType.REMOVED, + EventType.UPDATED, + EventType.CREATED + ).unordered(); + if (builder.isEventListenerModeSync) { + return configurationBuilder.synchronous(); + } else { + return configurationBuilder.asynchronous(); + } + } + + // Package private for testing + Map<K, CompletableFuture<Tuple<K, V>>> getCompletableFutureMap() { + return completableFutureMap; + } + + @SuppressForbidden(reason = "Ehcache uses File.io") + private PersistentCacheManager buildCacheManager() { + // In case we use multiple ehCaches, we can define this cache manager at a global level. + return CacheManagerBuilder.newCacheManagerBuilder() + .with(CacheManagerBuilder.persistence(new File(storagePath))) + + .using( + PooledExecutionServiceConfigurationBuilder.newPooledExecutionServiceConfigurationBuilder() + .defaultPool(THREAD_POOL_ALIAS_PREFIX + "Default#" + UNIQUE_ID, 1, 3) // Default pool used for other tasks + // like event listeners + .pool( + this.threadPoolAlias, + (Integer) EhcacheDiskCacheSettings.getSettingListForCacheType(cacheType) + .get(DISK_WRITE_MIN_THREADS_KEY) + .get(settings), + (Integer) EhcacheDiskCacheSettings.getSettingListForCacheType(cacheType) + .get(DISK_WRITE_MAXIMUM_THREADS_KEY) + .get(settings) + ) + .build() + ) + .build(true); + } + + @Override + public V get(K key) { + if (key == null) { + throw new IllegalArgumentException("Key passed to ehcache disk cache was null."); + } + V value; + try { + value = deserializeValue(cache.get(key)); + } catch (CacheLoadingException ex) { + throw new OpenSearchException("Exception occurred while trying to fetch item from ehcache disk cache"); + } + return value; + } + + /** + * Puts the item into cache. + * @param key Type of key. + * @param value Type of value. + */ + @Override + public void put(K key, V value) { + try { + cache.put(key, serializeValue(value)); + } catch (CacheWritingException ex) { + throw new OpenSearchException("Exception occurred while put item to ehcache disk cache"); + } + } + + /** + * Computes the value using loader in case key is not present, otherwise fetches it. + * @param key Type of key + * @param loader loader to load the value in case key is missing + * @return value + * @throws Exception when either internal get or put calls fail. + */ + @Override + public V computeIfAbsent(K key, LoadAwareCacheLoader<K, V> loader) throws Exception { + // Ehache doesn't provide any computeIfAbsent function. Exposes putIfAbsent but that works differently and is + // not performant in case there are multiple concurrent request for same key. Below is our own custom + // implementation of computeIfAbsent on top of ehcache. Inspired by OpenSearch Cache implementation. + V value = deserializeValue(cache.get(key)); + if (value == null) { + value = compute(key, loader); + } + return value; + } + + private V compute(K key, LoadAwareCacheLoader<K, V> loader) throws Exception { + // A future that returns a pair of key/value. + CompletableFuture<Tuple<K, V>> completableFuture = new CompletableFuture<>(); + // Only one of the threads will succeed putting a future into map for the same key. + // Rest will fetch existing future. + CompletableFuture<Tuple<K, V>> future = completableFutureMap.putIfAbsent(key, completableFuture); + // Handler to handle results post processing. Takes a tuple<key, value> or exception as an input and returns + // the value. Also before returning value, puts the value in cache. + BiFunction<Tuple<K, V>, Throwable, V> handler = (pair, ex) -> { + V value = null; + if (pair != null) { + cache.put(pair.v1(), serializeValue(pair.v2())); + value = pair.v2(); // Returning a value itself assuming that a next get should return the same. Should + // be safe to assume if we got no exception and reached here. + } + completableFutureMap.remove(key); // Remove key from map as not needed anymore. + return value; + }; + CompletableFuture<V> completableValue; + if (future == null) { + future = completableFuture; + completableValue = future.handle(handler); + V value; + try { + value = loader.load(key); + } catch (Exception ex) { + future.completeExceptionally(ex); + throw new ExecutionException(ex); + } + if (value == null) { + NullPointerException npe = new NullPointerException("loader returned a null value"); + future.completeExceptionally(npe); + throw new ExecutionException(npe); + } else { + future.complete(new Tuple<>(key, value)); + } + + } else { + completableValue = future.handle(handler); + } + V value; + try { + value = completableValue.get(); + if (future.isCompletedExceptionally()) { + future.get(); // call get to force the exception to be thrown for other concurrent callers + throw new IllegalStateException("Future completed exceptionally but no error thrown"); + } + } catch (InterruptedException ex) { + throw new IllegalStateException(ex); + } + return value; + } + + /** + * Invalidate the item. + * @param key key to be invalidated. + */ + @Override + public void invalidate(K key) { + try { + cache.remove(key); + } catch (CacheWritingException ex) { + // Handle + throw new RuntimeException(ex); + } + + } + + @Override + public void invalidateAll() { + cache.clear(); + this.entries.dec(this.entries.count()); // reset to zero. + } + + /** + * Provides a way to iterate over disk cache keys. + * @return Iterable + */ + @Override + public Iterable<K> keys() { + return () -> new EhCacheKeyIterator<>(cache.iterator()); + } + + /** + * Gives the current count of keys in disk cache. + * @return current count of keys + */ + @Override + public long count() { + return entries.count(); + } + + @Override + public void refresh() { + // TODO: ehcache doesn't provide a way to refresh a cache. + } + + @Override + @SuppressForbidden(reason = "Ehcache uses File.io") + public void close() { + cacheManager.removeCache(this.diskCacheAlias); + cacheManager.close(); + try { + cacheManager.destroyCache(this.diskCacheAlias); + // Delete all the disk cache related files/data + Path ehcacheDirectory = Paths.get(this.storagePath); + if (Files.exists(ehcacheDirectory)) { + IOUtils.rm(ehcacheDirectory); + } + } catch (CachePersistenceException e) { + throw new OpenSearchException("Exception occurred while destroying ehcache and associated data", e); + } catch (IOException e) { + logger.error(() -> new ParameterizedMessage("Failed to delete ehcache disk cache data under path: {}", this.storagePath)); + } + } + + /** + * This iterator wraps ehCache iterator and only iterates over its keys. + * @param <K> Type of key + */ + class EhCacheKeyIterator<K> implements Iterator<K> { + + Iterator<Cache.Entry<K, ByteArrayWrapper>> iterator; + + EhCacheKeyIterator(Iterator<Cache.Entry<K, ByteArrayWrapper>> iterator) { + this.iterator = iterator; + } + + @Override + public boolean hasNext() { + return iterator.hasNext(); + } + + @Override + public K next() { + if (!hasNext()) { + throw new NoSuchElementException(); + } + return iterator.next().getKey(); + } + + @Override + public void remove() { + iterator.remove(); // Calls underlying ehcache iterator.remove() + } + } + + /** + * Wrapper over Ehcache original listener to listen to desired events and notify desired subscribers. + */ + class EhCacheEventListener implements CacheEventListener<K, ByteArrayWrapper> { + + private final RemovalListener<K, V> removalListener; + + EhCacheEventListener(RemovalListener<K, V> removalListener) { + this.removalListener = removalListener; + } + + @Override + public void onEvent(CacheEvent<? extends K, ? extends ByteArrayWrapper> event) { + switch (event.getType()) { + case CREATED: + entries.inc(); + assert event.getOldValue() == null; + break; + case EVICTED: + this.removalListener.onRemoval( + new RemovalNotification<>(event.getKey(), deserializeValue(event.getOldValue()), RemovalReason.EVICTED) + ); + entries.dec(); + assert event.getNewValue() == null; + break; + case REMOVED: + entries.dec(); + this.removalListener.onRemoval( + new RemovalNotification<>(event.getKey(), deserializeValue(event.getOldValue()), RemovalReason.EXPLICIT) + ); + assert event.getNewValue() == null; + break; + case EXPIRED: + this.removalListener.onRemoval( + new RemovalNotification<>(event.getKey(), deserializeValue(event.getOldValue()), RemovalReason.INVALIDATED) + ); + entries.dec(); + assert event.getNewValue() == null; + break; + case UPDATED: + break; + default: + break; + } + } + } + + /** + * Wrapper over Serializer which is compatible with ehcache's serializer requirements. + */ + private class KeySerializerWrapper<T> implements org.ehcache.spi.serialization.Serializer<T> { + private Serializer<T, byte[]> serializer; + + public KeySerializerWrapper(Serializer<T, byte[]> keySerializer) { + this.serializer = keySerializer; + } + + // This constructor must be present, but does not have to work as we are not actually persisting the disk + // cache after a restart. + // See https://www.ehcache.org/documentation/3.0/serializers-copiers.html#persistent-vs-transient-caches + public KeySerializerWrapper(ClassLoader classLoader, FileBasedPersistenceContext persistenceContext) {} + + @Override + public ByteBuffer serialize(T object) throws SerializerException { + return ByteBuffer.wrap(serializer.serialize(object)); + } + + @Override + public T read(ByteBuffer binary) throws ClassNotFoundException, SerializerException { + byte[] arr = new byte[binary.remaining()]; + binary.get(arr); + return serializer.deserialize(arr); + } + + @Override + public boolean equals(T object, ByteBuffer binary) throws ClassNotFoundException, SerializerException { + byte[] arr = new byte[binary.remaining()]; + binary.get(arr); + return serializer.equals(object, arr); + } + } + + /** + * Wrapper allowing Ehcache to serialize ByteArrayWrapper. + */ + private static class ByteArrayWrapperSerializer implements org.ehcache.spi.serialization.Serializer<ByteArrayWrapper> { + public ByteArrayWrapperSerializer() {} + + // This constructor must be present, but does not have to work as we are not actually persisting the disk + // cache after a restart. + // See https://www.ehcache.org/documentation/3.0/serializers-copiers.html#persistent-vs-transient-caches + public ByteArrayWrapperSerializer(ClassLoader classLoader, FileBasedPersistenceContext persistenceContext) {} + + @Override + public ByteBuffer serialize(ByteArrayWrapper object) throws SerializerException { + return ByteBuffer.wrap(object.value); + } + + @Override + public ByteArrayWrapper read(ByteBuffer binary) throws ClassNotFoundException, SerializerException { + byte[] arr = new byte[binary.remaining()]; + binary.get(arr); + return new ByteArrayWrapper(arr); + } + + @Override + public boolean equals(ByteArrayWrapper object, ByteBuffer binary) throws ClassNotFoundException, SerializerException { + byte[] arr = new byte[binary.remaining()]; + binary.get(arr); + return Arrays.equals(arr, object.value); + } + } + + /** + * Transform a value from V to ByteArrayWrapper, which can be passed to ehcache. + * @param value the value + * @return the serialized value + */ + private ByteArrayWrapper serializeValue(V value) { + ByteArrayWrapper result = new ByteArrayWrapper(valueSerializer.serialize(value)); + return result; + } + + /** + * Transform a ByteArrayWrapper, which comes from ehcache, back to V. + * @param binary the serialized value + * @return the deserialized value + */ + private V deserializeValue(ByteArrayWrapper binary) { + if (binary == null) { + return null; + } + return valueSerializer.deserialize(binary.value); + } + + /** + * Factory to create an ehcache disk cache. + */ + public static class EhcacheDiskCacheFactory implements ICache.Factory { + + /** + * Ehcache disk cache name. + */ + public static final String EHCACHE_DISK_CACHE_NAME = "ehcache_disk"; + + /** + * Default constructor. + */ + public EhcacheDiskCacheFactory() {} + + @Override + @SuppressWarnings({ "unchecked" }) // Required to ensure the serializers output byte[] + public <K, V> ICache<K, V> create(CacheConfig<K, V> config, CacheType cacheType, Map<String, Factory> cacheFactories) { + Map<String, Setting<?>> settingList = EhcacheDiskCacheSettings.getSettingListForCacheType(cacheType); + Settings settings = config.getSettings(); + + Serializer<K, byte[]> keySerializer = null; + try { + keySerializer = (Serializer<K, byte[]>) config.getKeySerializer(); + } catch (ClassCastException e) { + throw new IllegalArgumentException("EhcacheDiskCache requires a key serializer of type Serializer<K, byte[]>"); + } + + Serializer<V, byte[]> valueSerializer = null; + try { + valueSerializer = (Serializer<V, byte[]>) config.getValueSerializer(); + } catch (ClassCastException e) { + throw new IllegalArgumentException("EhcacheDiskCache requires a value serializer of type Serializer<V, byte[]>"); + } + + return new Builder<K, V>().setStoragePath((String) settingList.get(DISK_STORAGE_PATH_KEY).get(settings)) + .setDiskCacheAlias((String) settingList.get(DISK_CACHE_ALIAS_KEY).get(settings)) + .setIsEventListenerModeSync((Boolean) settingList.get(DISK_LISTENER_MODE_SYNC_KEY).get(settings)) + .setCacheType(cacheType) + .setKeyType((config.getKeyType())) + .setValueType(config.getValueType()) + .setKeySerializer(keySerializer) + .setValueSerializer(valueSerializer) + .setRemovalListener(config.getRemovalListener()) + .setExpireAfterAccess((TimeValue) settingList.get(DISK_CACHE_EXPIRE_AFTER_ACCESS_KEY).get(settings)) + .setMaximumWeightInBytes((Long) settingList.get(DISK_MAX_SIZE_IN_BYTES_KEY).get(settings)) + .setSettings(settings) + .build(); + } + + @Override + public String getCacheName() { + return EHCACHE_DISK_CACHE_NAME; + } + } + + /** + * Builder object to build Ehcache disk tier. + * @param <K> Type of key + * @param <V> Type of value + */ + public static class Builder<K, V> extends ICacheBuilder<K, V> { + + private CacheType cacheType; + private String storagePath; + + private String threadPoolAlias; + + private String diskCacheAlias; + + // Provides capability to make ehCache event listener to run in sync mode. Used for testing too. + private boolean isEventListenerModeSync; + + private Class<K> keyType; + + private Class<V> valueType; + private Serializer<K, byte[]> keySerializer; + private Serializer<V, byte[]> valueSerializer; + + /** + * Default constructor. Added to fix javadocs. + */ + public Builder() {} + + /** + * Sets the desired cache type. + * @param cacheType cache type + * @return builder + */ + public Builder<K, V> setCacheType(CacheType cacheType) { + this.cacheType = cacheType; + return this; + } + + /** + * Sets the key type of value. + * @param keyType type of key + * @return builder + */ + public Builder<K, V> setKeyType(Class<K> keyType) { + this.keyType = keyType; + return this; + } + + /** + * Sets the class type of value. + * @param valueType type of value + * @return builder + */ + public Builder<K, V> setValueType(Class<V> valueType) { + this.valueType = valueType; + return this; + } + + /** + * Desired storage path for disk cache. + * @param storagePath path for disk cache + * @return builder + */ + public Builder<K, V> setStoragePath(String storagePath) { + this.storagePath = storagePath; + return this; + } + + /** + * Thread pool alias for the cache. + * @param threadPoolAlias alias + * @return builder + */ + public Builder<K, V> setThreadPoolAlias(String threadPoolAlias) { + this.threadPoolAlias = threadPoolAlias; + return this; + } + + /** + * Cache alias + * @param diskCacheAlias disk cache alias + * @return builder + */ + public Builder<K, V> setDiskCacheAlias(String diskCacheAlias) { + this.diskCacheAlias = diskCacheAlias; + return this; + } + + /** + * Determines whether event listener is triggered async/sync. + * @param isEventListenerModeSync mode sync + * @return builder + */ + public Builder<K, V> setIsEventListenerModeSync(boolean isEventListenerModeSync) { + this.isEventListenerModeSync = isEventListenerModeSync; + return this; + } + + /** + * Sets the key serializer for this cache. + * @param keySerializer the key serializer + * @return builder + */ + public Builder<K, V> setKeySerializer(Serializer<K, byte[]> keySerializer) { + this.keySerializer = keySerializer; + return this; + } + + /** + * Sets the value serializer for this cache. + * @param valueSerializer the value serializer + * @return builder + */ + public Builder<K, V> setValueSerializer(Serializer<V, byte[]> valueSerializer) { + this.valueSerializer = valueSerializer; + return this; + } + + @Override + public EhcacheDiskCache<K, V> build() { + return new EhcacheDiskCache<>(this); + } + } + + /** + * A wrapper over byte[], with equals() that works using Arrays.equals(). + * Necessary due to a bug in Ehcache. + */ + static class ByteArrayWrapper { + private final byte[] value; + + public ByteArrayWrapper(byte[] value) { + this.value = value; + } + + @Override + public boolean equals(Object o) { + if (o == null || o.getClass() != ByteArrayWrapper.class) { + return false; + } + ByteArrayWrapper other = (ByteArrayWrapper) o; + return Arrays.equals(this.value, other.value); + } + + @Override + public int hashCode() { + return Arrays.hashCode(value); + } + } +} diff --git a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/package-info.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/package-info.java new file mode 100644 index 0000000000000..79f8eec2f3f4c --- /dev/null +++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/package-info.java @@ -0,0 +1,11 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +/** + * Base package for disk cache related stuff. + */ +package org.opensearch.cache.store.disk; diff --git a/plugins/cache-ehcache/src/main/plugin-metadata/plugin-security.policy b/plugins/cache-ehcache/src/main/plugin-metadata/plugin-security.policy new file mode 100644 index 0000000000000..40007eea62dba --- /dev/null +++ b/plugins/cache-ehcache/src/main/plugin-metadata/plugin-security.policy @@ -0,0 +1,13 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +grant { + permission java.lang.RuntimePermission "accessClassInPackage.sun.misc"; + permission java.lang.RuntimePermission "createClassLoader"; +}; + diff --git a/plugins/cache-ehcache/src/test/java/org/opensearch/cache/EhcachePluginTests.java b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/EhcachePluginTests.java new file mode 100644 index 0000000000000..538a45456ddc3 --- /dev/null +++ b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/EhcachePluginTests.java @@ -0,0 +1,26 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache; + +import org.opensearch.cache.store.disk.EhcacheDiskCache; +import org.opensearch.common.cache.ICache; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Map; + +public class EhcachePluginTests extends OpenSearchTestCase { + + private EhcacheCachePlugin ehcacheCachePlugin = new EhcacheCachePlugin(); + + public void testGetCacheStoreTypeMap() { + Map<String, ICache.Factory> factoryMap = ehcacheCachePlugin.getCacheFactoryMap(); + assertNotNull(factoryMap); + assertNotNull(factoryMap.get(EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME)); + } +} diff --git a/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java new file mode 100644 index 0000000000000..8c0ab62baee54 --- /dev/null +++ b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java @@ -0,0 +1,678 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache.store.disk; + +import org.opensearch.cache.EhcacheDiskCacheSettings; +import org.opensearch.common.Randomness; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.ICache; +import org.opensearch.common.cache.LoadAwareCacheLoader; +import org.opensearch.common.cache.RemovalListener; +import org.opensearch.common.cache.RemovalNotification; +import org.opensearch.common.cache.serializer.BytesReferenceSerializer; +import org.opensearch.common.cache.serializer.Serializer; +import org.opensearch.common.cache.store.config.CacheConfig; +import org.opensearch.common.metrics.CounterMetric; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.bytes.CompositeBytesReference; +import org.opensearch.env.NodeEnvironment; +import org.opensearch.test.OpenSearchSingleNodeTestCase; + +import java.io.IOException; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.UUID; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Phaser; + +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_LISTENER_MODE_SYNC_KEY; +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_MAX_SIZE_IN_BYTES_KEY; +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_STORAGE_PATH_KEY; +import static org.hamcrest.CoreMatchers.instanceOf; + +public class EhCacheDiskCacheTests extends OpenSearchSingleNodeTestCase { + + private static final int CACHE_SIZE_IN_BYTES = 1024 * 101; + + public void testBasicGetAndPut() throws IOException { + Settings settings = Settings.builder().build(); + MockRemovalListener<String, String> removalListener = new MockRemovalListener<>(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + ICache<String, String> ehcacheTest = new EhcacheDiskCache.Builder<String, String>().setThreadPoolAlias("ehcacheTest") + .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") + .setIsEventListenerModeSync(true) + .setKeyType(String.class) + .setValueType(String.class) + .setKeySerializer(new StringSerializer()) + .setValueSerializer(new StringSerializer()) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .build(); + int randomKeys = randomIntBetween(10, 100); + Map<String, String> keyValueMap = new HashMap<>(); + for (int i = 0; i < randomKeys; i++) { + keyValueMap.put(UUID.randomUUID().toString(), UUID.randomUUID().toString()); + } + for (Map.Entry<String, String> entry : keyValueMap.entrySet()) { + ehcacheTest.put(entry.getKey(), entry.getValue()); + } + for (Map.Entry<String, String> entry : keyValueMap.entrySet()) { + String value = ehcacheTest.get(entry.getKey()); + assertEquals(entry.getValue(), value); + } + assertEquals(randomKeys, ehcacheTest.count()); + + // Validate misses + int expectedNumberOfMisses = randomIntBetween(10, 200); + for (int i = 0; i < expectedNumberOfMisses; i++) { + ehcacheTest.get(UUID.randomUUID().toString()); + } + + ehcacheTest.close(); + } + } + + public void testBasicGetAndPutUsingFactory() throws IOException { + MockRemovalListener<String, String> removalListener = new MockRemovalListener<>(); + try (NodeEnvironment env = newNodeEnvironment(Settings.EMPTY)) { + ICache.Factory ehcacheFactory = new EhcacheDiskCache.EhcacheDiskCacheFactory(); + ICache<String, String> ehcacheTest = ehcacheFactory.create( + new CacheConfig.Builder<String, String>().setValueType(String.class) + .setKeyType(String.class) + .setRemovalListener(removalListener) + .setKeySerializer(new StringSerializer()) + .setValueSerializer(new StringSerializer()) + .setSettings( + Settings.builder() + .put( + EhcacheDiskCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(DISK_MAX_SIZE_IN_BYTES_KEY) + .getKey(), + CACHE_SIZE_IN_BYTES + ) + .put( + EhcacheDiskCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(DISK_STORAGE_PATH_KEY) + .getKey(), + env.nodePaths()[0].indicesPath.toString() + "/request_cache" + ) + .put( + EhcacheDiskCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(DISK_LISTENER_MODE_SYNC_KEY) + .getKey(), + true + ) + .build() + ) + .build(), + CacheType.INDICES_REQUEST_CACHE, + Map.of() + ); + int randomKeys = randomIntBetween(10, 100); + Map<String, String> keyValueMap = new HashMap<>(); + for (int i = 0; i < randomKeys; i++) { + keyValueMap.put(UUID.randomUUID().toString(), UUID.randomUUID().toString()); + } + for (Map.Entry<String, String> entry : keyValueMap.entrySet()) { + ehcacheTest.put(entry.getKey(), entry.getValue()); + } + for (Map.Entry<String, String> entry : keyValueMap.entrySet()) { + String value = ehcacheTest.get(entry.getKey()); + assertEquals(entry.getValue(), value); + } + assertEquals(randomKeys, ehcacheTest.count()); + + // Validate misses + int expectedNumberOfMisses = randomIntBetween(10, 200); + for (int i = 0; i < expectedNumberOfMisses; i++) { + ehcacheTest.get(UUID.randomUUID().toString()); + } + + ehcacheTest.close(); + } + } + + public void testConcurrentPut() throws Exception { + Settings settings = Settings.builder().build(); + MockRemovalListener<String, String> removalListener = new MockRemovalListener<>(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + ICache<String, String> ehcacheTest = new EhcacheDiskCache.Builder<String, String>().setDiskCacheAlias("test1") + .setThreadPoolAlias("ehcacheTest") + .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") + .setIsEventListenerModeSync(true) // For accurate count + .setKeyType(String.class) + .setValueType(String.class) + .setKeySerializer(new StringSerializer()) + .setValueSerializer(new StringSerializer()) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .build(); + int randomKeys = randomIntBetween(20, 100); + Thread[] threads = new Thread[randomKeys]; + Phaser phaser = new Phaser(randomKeys + 1); + CountDownLatch countDownLatch = new CountDownLatch(randomKeys); + Map<String, String> keyValueMap = new HashMap<>(); + int j = 0; + for (int i = 0; i < randomKeys; i++) { + keyValueMap.put(UUID.randomUUID().toString(), UUID.randomUUID().toString()); + } + for (Map.Entry<String, String> entry : keyValueMap.entrySet()) { + threads[j] = new Thread(() -> { + phaser.arriveAndAwaitAdvance(); + ehcacheTest.put(entry.getKey(), entry.getValue()); + countDownLatch.countDown(); + }); + threads[j].start(); + j++; + } + phaser.arriveAndAwaitAdvance(); // Will trigger parallel puts above. + countDownLatch.await(); // Wait for all threads to finish + for (Map.Entry<String, String> entry : keyValueMap.entrySet()) { + String value = ehcacheTest.get(entry.getKey()); + assertEquals(entry.getValue(), value); + } + assertEquals(randomKeys, ehcacheTest.count()); + ehcacheTest.close(); + } + } + + public void testEhcacheParallelGets() throws Exception { + Settings settings = Settings.builder().build(); + MockRemovalListener<String, String> removalListener = new MockRemovalListener<>(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + ICache<String, String> ehcacheTest = new EhcacheDiskCache.Builder<String, String>().setDiskCacheAlias("test1") + .setThreadPoolAlias("ehcacheTest") + .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") + .setIsEventListenerModeSync(true) // For accurate count + .setKeyType(String.class) + .setValueType(String.class) + .setKeySerializer(new StringSerializer()) + .setValueSerializer(new StringSerializer()) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .build(); + int randomKeys = randomIntBetween(20, 100); + Thread[] threads = new Thread[randomKeys]; + Phaser phaser = new Phaser(randomKeys + 1); + CountDownLatch countDownLatch = new CountDownLatch(randomKeys); + Map<String, String> keyValueMap = new HashMap<>(); + int j = 0; + for (int i = 0; i < randomKeys; i++) { + keyValueMap.put(UUID.randomUUID().toString(), UUID.randomUUID().toString()); + } + for (Map.Entry<String, String> entry : keyValueMap.entrySet()) { + ehcacheTest.put(entry.getKey(), entry.getValue()); + } + assertEquals(keyValueMap.size(), ehcacheTest.count()); + for (Map.Entry<String, String> entry : keyValueMap.entrySet()) { + threads[j] = new Thread(() -> { + phaser.arriveAndAwaitAdvance(); + assertEquals(entry.getValue(), ehcacheTest.get(entry.getKey())); + countDownLatch.countDown(); + }); + threads[j].start(); + j++; + } + phaser.arriveAndAwaitAdvance(); // Will trigger parallel puts above. + countDownLatch.await(); // Wait for all threads to finish + ehcacheTest.close(); + } + } + + public void testEhcacheKeyIterator() throws Exception { + Settings settings = Settings.builder().build(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + ICache<String, String> ehcacheTest = new EhcacheDiskCache.Builder<String, String>().setDiskCacheAlias("test1") + .setThreadPoolAlias("ehcacheTest") + .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") + .setIsEventListenerModeSync(true) + .setKeyType(String.class) + .setValueType(String.class) + .setKeySerializer(new StringSerializer()) + .setValueSerializer(new StringSerializer()) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(new MockRemovalListener<>()) + .build(); + + int randomKeys = randomIntBetween(2, 100); + Map<String, String> keyValueMap = new HashMap<>(); + for (int i = 0; i < randomKeys; i++) { + keyValueMap.put(UUID.randomUUID().toString(), UUID.randomUUID().toString()); + } + for (Map.Entry<String, String> entry : keyValueMap.entrySet()) { + ehcacheTest.put(entry.getKey(), entry.getValue()); + } + Iterator<String> keys = ehcacheTest.keys().iterator(); + int keysCount = 0; + while (keys.hasNext()) { + String key = keys.next(); + keysCount++; + assertNotNull(ehcacheTest.get(key)); + } + assertEquals(keysCount, randomKeys); + ehcacheTest.close(); + } + } + + public void testEvictions() throws Exception { + Settings settings = Settings.builder().build(); + MockRemovalListener<String, String> removalListener = new MockRemovalListener<>(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + ICache<String, String> ehcacheTest = new EhcacheDiskCache.Builder<String, String>().setDiskCacheAlias("test1") + .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") + .setIsEventListenerModeSync(true) + .setThreadPoolAlias("ehcacheTest") + .setKeyType(String.class) + .setValueType(String.class) + .setKeySerializer(new StringSerializer()) + .setValueSerializer(new StringSerializer()) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .build(); + + // Generate a string with 100 characters + String value = generateRandomString(100); + + // Trying to generate more than 100kb to cause evictions. + for (int i = 0; i < 1000; i++) { + String key = "Key" + i; + ehcacheTest.put(key, value); + } + assertEquals(660, removalListener.evictionMetric.count()); + ehcacheTest.close(); + } + } + + public void testComputeIfAbsentConcurrently() throws Exception { + Settings settings = Settings.builder().build(); + MockRemovalListener<String, String> removalListener = new MockRemovalListener<>(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + ICache<String, String> ehcacheTest = new EhcacheDiskCache.Builder<String, String>().setDiskCacheAlias("test1") + .setIsEventListenerModeSync(true) + .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") + .setThreadPoolAlias("ehcacheTest") + .setKeyType(String.class) + .setValueType(String.class) + .setKeySerializer(new StringSerializer()) + .setValueSerializer(new StringSerializer()) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .build(); + + int numberOfRequest = 2;// randomIntBetween(200, 400); + String key = UUID.randomUUID().toString(); + String value = "dummy"; + Thread[] threads = new Thread[numberOfRequest]; + Phaser phaser = new Phaser(numberOfRequest + 1); + CountDownLatch countDownLatch = new CountDownLatch(numberOfRequest); + + List<LoadAwareCacheLoader<String, String>> loadAwareCacheLoaderList = new CopyOnWriteArrayList<>(); + + // Try to hit different request with the same key concurrently. Verify value is only loaded once. + for (int i = 0; i < numberOfRequest; i++) { + threads[i] = new Thread(() -> { + LoadAwareCacheLoader<String, String> loadAwareCacheLoader = new LoadAwareCacheLoader<>() { + boolean isLoaded; + + @Override + public boolean isLoaded() { + return isLoaded; + } + + @Override + public String load(String key) { + isLoaded = true; + return value; + } + }; + loadAwareCacheLoaderList.add(loadAwareCacheLoader); + phaser.arriveAndAwaitAdvance(); + try { + assertEquals(value, ehcacheTest.computeIfAbsent(key, loadAwareCacheLoader)); + } catch (Exception e) { + throw new RuntimeException(e); + } + countDownLatch.countDown(); + }); + threads[i].start(); + } + phaser.arriveAndAwaitAdvance(); + countDownLatch.await(); + int numberOfTimesValueLoaded = 0; + for (int i = 0; i < numberOfRequest; i++) { + if (loadAwareCacheLoaderList.get(i).isLoaded()) { + numberOfTimesValueLoaded++; + } + } + assertEquals(1, numberOfTimesValueLoaded); + assertEquals(0, ((EhcacheDiskCache) ehcacheTest).getCompletableFutureMap().size()); + assertEquals(1, ehcacheTest.count()); + ehcacheTest.close(); + } + } + + public void testComputeIfAbsentConcurrentlyAndThrowsException() throws Exception { + Settings settings = Settings.builder().build(); + MockRemovalListener<String, String> removalListener = new MockRemovalListener<>(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + ICache<String, String> ehcacheTest = new EhcacheDiskCache.Builder<String, String>().setDiskCacheAlias("test1") + .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") + .setIsEventListenerModeSync(true) + .setThreadPoolAlias("ehcacheTest") + .setKeyType(String.class) + .setValueType(String.class) + .setKeySerializer(new StringSerializer()) + .setValueSerializer(new StringSerializer()) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .build(); + + int numberOfRequest = randomIntBetween(200, 400); + String key = UUID.randomUUID().toString(); + Thread[] threads = new Thread[numberOfRequest]; + Phaser phaser = new Phaser(numberOfRequest + 1); + CountDownLatch countDownLatch = new CountDownLatch(numberOfRequest); + + List<LoadAwareCacheLoader<String, String>> loadAwareCacheLoaderList = new CopyOnWriteArrayList<>(); + + // Try to hit different request with the same key concurrently. Loader throws exception. + for (int i = 0; i < numberOfRequest; i++) { + threads[i] = new Thread(() -> { + LoadAwareCacheLoader<String, String> loadAwareCacheLoader = new LoadAwareCacheLoader<>() { + boolean isLoaded; + + @Override + public boolean isLoaded() { + return isLoaded; + } + + @Override + public String load(String key) throws Exception { + isLoaded = true; + throw new RuntimeException("Exception"); + } + }; + loadAwareCacheLoaderList.add(loadAwareCacheLoader); + phaser.arriveAndAwaitAdvance(); + assertThrows(ExecutionException.class, () -> ehcacheTest.computeIfAbsent(key, loadAwareCacheLoader)); + countDownLatch.countDown(); + }); + threads[i].start(); + } + phaser.arriveAndAwaitAdvance(); + countDownLatch.await(); + + assertEquals(0, ((EhcacheDiskCache) ehcacheTest).getCompletableFutureMap().size()); + ehcacheTest.close(); + } + } + + public void testComputeIfAbsentWithNullValueLoading() throws Exception { + Settings settings = Settings.builder().build(); + MockRemovalListener<String, String> removalListener = new MockRemovalListener<>(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + ICache<String, String> ehcacheTest = new EhcacheDiskCache.Builder<String, String>().setDiskCacheAlias("test1") + .setThreadPoolAlias("ehcacheTest") + .setIsEventListenerModeSync(true) + .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") + .setKeyType(String.class) + .setValueType(String.class) + .setKeySerializer(new StringSerializer()) + .setValueSerializer(new StringSerializer()) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .build(); + + int numberOfRequest = randomIntBetween(200, 400); + String key = UUID.randomUUID().toString(); + Thread[] threads = new Thread[numberOfRequest]; + Phaser phaser = new Phaser(numberOfRequest + 1); + CountDownLatch countDownLatch = new CountDownLatch(numberOfRequest); + + List<LoadAwareCacheLoader<String, String>> loadAwareCacheLoaderList = new CopyOnWriteArrayList<>(); + + // Try to hit different request with the same key concurrently. Loader throws exception. + for (int i = 0; i < numberOfRequest; i++) { + threads[i] = new Thread(() -> { + LoadAwareCacheLoader<String, String> loadAwareCacheLoader = new LoadAwareCacheLoader<>() { + boolean isLoaded; + + @Override + public boolean isLoaded() { + return isLoaded; + } + + @Override + public String load(String key) throws Exception { + isLoaded = true; + return null; + } + }; + loadAwareCacheLoaderList.add(loadAwareCacheLoader); + phaser.arriveAndAwaitAdvance(); + try { + ehcacheTest.computeIfAbsent(key, loadAwareCacheLoader); + } catch (Exception ex) { + assertThat(ex.getCause(), instanceOf(NullPointerException.class)); + } + assertThrows(ExecutionException.class, () -> ehcacheTest.computeIfAbsent(key, loadAwareCacheLoader)); + countDownLatch.countDown(); + }); + threads[i].start(); + } + phaser.arriveAndAwaitAdvance(); + countDownLatch.await(); + + assertEquals(0, ((EhcacheDiskCache) ehcacheTest).getCompletableFutureMap().size()); + ehcacheTest.close(); + } + } + + public void testEhcacheKeyIteratorWithRemove() throws IOException { + Settings settings = Settings.builder().build(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + ICache<String, String> ehcacheTest = new EhcacheDiskCache.Builder<String, String>().setDiskCacheAlias("test1") + .setThreadPoolAlias("ehcacheTest") + .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") + .setIsEventListenerModeSync(true) + .setKeyType(String.class) + .setValueType(String.class) + .setKeySerializer(new StringSerializer()) + .setValueSerializer(new StringSerializer()) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(new MockRemovalListener<>()) + .build(); + + int randomKeys = randomIntBetween(2, 100); + for (int i = 0; i < randomKeys; i++) { + ehcacheTest.put(UUID.randomUUID().toString(), UUID.randomUUID().toString()); + } + long originalSize = ehcacheTest.count(); + assertEquals(randomKeys, originalSize); + + // Now try removing subset of keys and verify + List<String> removedKeyList = new ArrayList<>(); + for (Iterator<String> iterator = ehcacheTest.keys().iterator(); iterator.hasNext();) { + String key = iterator.next(); + if (randomBoolean()) { + removedKeyList.add(key); + iterator.remove(); + } + } + // Verify the removed key doesn't exist anymore. + for (String ehcacheKey : removedKeyList) { + assertNull(ehcacheTest.get(ehcacheKey)); + } + // Verify ehcache entry size again. + assertEquals(originalSize - removedKeyList.size(), ehcacheTest.count()); + ehcacheTest.close(); + } + + } + + public void testInvalidateAll() throws Exception { + Settings settings = Settings.builder().build(); + MockRemovalListener<String, String> removalListener = new MockRemovalListener<>(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + ICache<String, String> ehcacheTest = new EhcacheDiskCache.Builder<String, String>().setThreadPoolAlias("ehcacheTest") + .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") + .setIsEventListenerModeSync(true) + .setKeyType(String.class) + .setValueType(String.class) + .setKeySerializer(new StringSerializer()) + .setValueSerializer(new StringSerializer()) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .build(); + int randomKeys = randomIntBetween(10, 100); + Map<String, String> keyValueMap = new HashMap<>(); + for (int i = 0; i < randomKeys; i++) { + keyValueMap.put(UUID.randomUUID().toString(), UUID.randomUUID().toString()); + } + for (Map.Entry<String, String> entry : keyValueMap.entrySet()) { + ehcacheTest.put(entry.getKey(), entry.getValue()); + } + ehcacheTest.invalidateAll(); // clear all the entries. + for (Map.Entry<String, String> entry : keyValueMap.entrySet()) { + // Verify that value is null for a removed entry. + assertNull(ehcacheTest.get(entry.getKey())); + } + assertEquals(0, ehcacheTest.count()); + ehcacheTest.close(); + } + } + + public void testBasicGetAndPutBytesReference() throws Exception { + Settings settings = Settings.builder().build(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + ICache<String, BytesReference> ehCacheDiskCachingTier = new EhcacheDiskCache.Builder<String, BytesReference>() + .setThreadPoolAlias("ehcacheTest") + .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") + .setKeySerializer(new StringSerializer()) + .setValueSerializer(new BytesReferenceSerializer()) + .setKeyType(String.class) + .setValueType(BytesReference.class) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES * 20) // bigger so no evictions happen + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setRemovalListener(new MockRemovalListener<>()) + .build(); + int randomKeys = randomIntBetween(10, 100); + int valueLength = 100; + Random rand = Randomness.get(); + Map<String, BytesReference> keyValueMap = new HashMap<>(); + for (int i = 0; i < randomKeys; i++) { + byte[] valueBytes = new byte[valueLength]; + rand.nextBytes(valueBytes); + keyValueMap.put(UUID.randomUUID().toString(), new BytesArray(valueBytes)); + + // Test a non-BytesArray implementation of BytesReference. + byte[] compositeBytes1 = new byte[valueLength]; + byte[] compositeBytes2 = new byte[valueLength]; + rand.nextBytes(compositeBytes1); + rand.nextBytes(compositeBytes2); + BytesReference composite = CompositeBytesReference.of(new BytesArray(compositeBytes1), new BytesArray(compositeBytes2)); + keyValueMap.put(UUID.randomUUID().toString(), composite); + } + for (Map.Entry<String, BytesReference> entry : keyValueMap.entrySet()) { + ehCacheDiskCachingTier.put(entry.getKey(), entry.getValue()); + } + for (Map.Entry<String, BytesReference> entry : keyValueMap.entrySet()) { + BytesReference value = ehCacheDiskCachingTier.get(entry.getKey()); + assertEquals(entry.getValue(), value); + } + ehCacheDiskCachingTier.close(); + } + } + + private static String generateRandomString(int length) { + String characters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"; + StringBuilder randomString = new StringBuilder(length); + + for (int i = 0; i < length; i++) { + int index = (int) (randomDouble() * characters.length()); + randomString.append(characters.charAt(index)); + } + + return randomString.toString(); + } + + static class MockRemovalListener<K, V> implements RemovalListener<K, V> { + + CounterMetric evictionMetric = new CounterMetric(); + + @Override + public void onRemoval(RemovalNotification<K, V> notification) { + evictionMetric.inc(); + } + } + + static class StringSerializer implements Serializer<String, byte[]> { + private final Charset charset = StandardCharsets.UTF_8; + + @Override + public byte[] serialize(String object) { + return object.getBytes(charset); + } + + @Override + public String deserialize(byte[] bytes) { + if (bytes == null) { + return null; + } + return new String(bytes, charset); + } + + public boolean equals(String object, byte[] bytes) { + return object.equals(deserialize(bytes)); + } + } +} diff --git a/plugins/crypto-kms/licenses/annotations-2.20.55.jar.sha1 b/plugins/crypto-kms/licenses/annotations-2.20.55.jar.sha1 deleted file mode 100644 index 5a626eeb5725b..0000000000000 --- a/plugins/crypto-kms/licenses/annotations-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -330e9d0e5f2401fffba5afe30f3740f400e8308d \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/annotations-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/annotations-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..8d30ad649916b --- /dev/null +++ b/plugins/crypto-kms/licenses/annotations-2.20.86.jar.sha1 @@ -0,0 +1 @@ +7e785e9ecb1230e52e9daa713335f38809ddcb74 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/apache-client-2.20.55.jar.sha1 b/plugins/crypto-kms/licenses/apache-client-2.20.55.jar.sha1 deleted file mode 100644 index 3ee96bb6e4076..0000000000000 --- a/plugins/crypto-kms/licenses/apache-client-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5c149885667d41a306769505cfa481cfddf6f113 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/apache-client-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/apache-client-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..e7ae36581925c --- /dev/null +++ b/plugins/crypto-kms/licenses/apache-client-2.20.86.jar.sha1 @@ -0,0 +1 @@ +af31c4d3abec23b73061c6965364a6e3abbcc01a \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/auth-2.20.55.jar.sha1 b/plugins/crypto-kms/licenses/auth-2.20.55.jar.sha1 deleted file mode 100644 index 010464bdf9fd1..0000000000000 --- a/plugins/crypto-kms/licenses/auth-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e21f00a8a2096d5044f3eff176944256e01a175e \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/auth-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/auth-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..e4c1b29cea894 --- /dev/null +++ b/plugins/crypto-kms/licenses/auth-2.20.86.jar.sha1 @@ -0,0 +1 @@ +f2da82f33776ce4814a3ab53b5ccb82a5d135936 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/aws-core-2.20.55.jar.sha1 b/plugins/crypto-kms/licenses/aws-core-2.20.55.jar.sha1 deleted file mode 100644 index 4b4ee1db864a8..0000000000000 --- a/plugins/crypto-kms/licenses/aws-core-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -734427c2cece98a8cb90871b78d2311e4a7ef746 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/aws-core-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/aws-core-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..d42a15c4da413 --- /dev/null +++ b/plugins/crypto-kms/licenses/aws-core-2.20.86.jar.sha1 @@ -0,0 +1 @@ +ea126147c3d17a8b3075e3122ec9c2b94fe1f6d5 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/aws-json-protocol-2.20.55.jar.sha1 b/plugins/crypto-kms/licenses/aws-json-protocol-2.20.55.jar.sha1 deleted file mode 100644 index 45a88305c1928..0000000000000 --- a/plugins/crypto-kms/licenses/aws-json-protocol-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a52731c86b974aefa5bbb1c545f407811a0163b1 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/aws-json-protocol-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/aws-json-protocol-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..ee08d240fbfba --- /dev/null +++ b/plugins/crypto-kms/licenses/aws-json-protocol-2.20.86.jar.sha1 @@ -0,0 +1 @@ +8b9d09c1aa9d3f2119267f0b6549ae1810512c7b \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/aws-query-protocol-2.20.55.jar.sha1 b/plugins/crypto-kms/licenses/aws-query-protocol-2.20.55.jar.sha1 deleted file mode 100644 index ba5f43378730c..0000000000000 --- a/plugins/crypto-kms/licenses/aws-query-protocol-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ac116215cc85366f0bdffee53b4c21e7a7fe03ef \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/aws-query-protocol-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/aws-query-protocol-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..9b19f570d56fb --- /dev/null +++ b/plugins/crypto-kms/licenses/aws-query-protocol-2.20.86.jar.sha1 @@ -0,0 +1 @@ +e001792ec1a681f5bc6ee4157d572173416304ad \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/endpoints-spi-2.20.55.jar.sha1 b/plugins/crypto-kms/licenses/endpoints-spi-2.20.55.jar.sha1 deleted file mode 100644 index 5bc0e31166c77..0000000000000 --- a/plugins/crypto-kms/licenses/endpoints-spi-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -085f82038ee86a7d6cd568fe7edd842978d92de3 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/endpoints-spi-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/endpoints-spi-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..16f9db1fd6327 --- /dev/null +++ b/plugins/crypto-kms/licenses/endpoints-spi-2.20.86.jar.sha1 @@ -0,0 +1 @@ +2b9075dd0ed32da97f95229f55c01425353e8cba \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/http-client-spi-2.20.55.jar.sha1 b/plugins/crypto-kms/licenses/http-client-spi-2.20.55.jar.sha1 deleted file mode 100644 index 523cf43dcb2e9..0000000000000 --- a/plugins/crypto-kms/licenses/http-client-spi-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -34f9b10c1a46038a0ceebdd750ba3a413a862ceb \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/http-client-spi-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/http-client-spi-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..0662e15b1f3e6 --- /dev/null +++ b/plugins/crypto-kms/licenses/http-client-spi-2.20.86.jar.sha1 @@ -0,0 +1 @@ +776bfc86fabc6e8c792ea4650a281d0bec5e9708 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-annotations-2.16.1.jar.sha1 b/plugins/crypto-kms/licenses/jackson-annotations-2.16.1.jar.sha1 deleted file mode 100644 index cbc65687606fc..0000000000000 --- a/plugins/crypto-kms/licenses/jackson-annotations-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fd441d574a71e7d10a4f73de6609f881d8cdfeec \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-annotations-2.17.0.jar.sha1 b/plugins/crypto-kms/licenses/jackson-annotations-2.17.0.jar.sha1 new file mode 100644 index 0000000000000..66bf7ed6ecce8 --- /dev/null +++ b/plugins/crypto-kms/licenses/jackson-annotations-2.17.0.jar.sha1 @@ -0,0 +1 @@ +880a742337010da4c851f843d8cac150e22dff9f \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-databind-2.16.1.jar.sha1 b/plugins/crypto-kms/licenses/jackson-databind-2.16.1.jar.sha1 deleted file mode 100644 index d231db4fd49fc..0000000000000 --- a/plugins/crypto-kms/licenses/jackson-databind-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -02a16efeb840c45af1e2f31753dfe76795278b73 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-databind-2.17.0.jar.sha1 b/plugins/crypto-kms/licenses/jackson-databind-2.17.0.jar.sha1 new file mode 100644 index 0000000000000..c0e4bb0c56849 --- /dev/null +++ b/plugins/crypto-kms/licenses/jackson-databind-2.17.0.jar.sha1 @@ -0,0 +1 @@ +7173e9e1d4bc6d7ca03bc4eeedcd548b8b580b34 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/json-utils-2.20.55.jar.sha1 b/plugins/crypto-kms/licenses/json-utils-2.20.55.jar.sha1 deleted file mode 100644 index a19b00e62f8b5..0000000000000 --- a/plugins/crypto-kms/licenses/json-utils-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cd6710900e3190eac4c4496ae529ce08680dd320 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/json-utils-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/json-utils-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..7011f8c3e6c78 --- /dev/null +++ b/plugins/crypto-kms/licenses/json-utils-2.20.86.jar.sha1 @@ -0,0 +1 @@ +5dd418ad48e3bfd8c3fa05ff29a955b91c1af666 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/kms-2.20.55.jar.sha1 b/plugins/crypto-kms/licenses/kms-2.20.55.jar.sha1 deleted file mode 100644 index 0b4e98f59a066..0000000000000 --- a/plugins/crypto-kms/licenses/kms-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -389780132dd417ab58e0bb9b269d738ff839605f \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/kms-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/kms-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..32c4e9f432898 --- /dev/null +++ b/plugins/crypto-kms/licenses/kms-2.20.86.jar.sha1 @@ -0,0 +1 @@ +6a81c2f14acaa7b9dcdc80c715d6e44d815a818a \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/metrics-spi-2.20.55.jar.sha1 b/plugins/crypto-kms/licenses/metrics-spi-2.20.55.jar.sha1 deleted file mode 100644 index db6701d87892a..0000000000000 --- a/plugins/crypto-kms/licenses/metrics-spi-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8a0eae705b27465516f3b09cc9918e40963d534d \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/metrics-spi-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/metrics-spi-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..bbd88bb9e1b0c --- /dev/null +++ b/plugins/crypto-kms/licenses/metrics-spi-2.20.86.jar.sha1 @@ -0,0 +1 @@ +74a65d0f8decd0b3057fb500ca5409ff5778752a \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/profiles-2.20.55.jar.sha1 b/plugins/crypto-kms/licenses/profiles-2.20.55.jar.sha1 deleted file mode 100644 index b7104cf0939e6..0000000000000 --- a/plugins/crypto-kms/licenses/profiles-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -959aad08b2f24057bf286c761b49e3af31a0a623 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/profiles-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/profiles-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..425ce9b92f9f2 --- /dev/null +++ b/plugins/crypto-kms/licenses/profiles-2.20.86.jar.sha1 @@ -0,0 +1 @@ +27a8f4aa488d1d3ef947865ee0190f16d10a3cc7 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/protocol-core-2.20.55.jar.sha1 b/plugins/crypto-kms/licenses/protocol-core-2.20.55.jar.sha1 deleted file mode 100644 index 4dee45f4d9dd3..0000000000000 --- a/plugins/crypto-kms/licenses/protocol-core-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0935e3ab32962a890f1d13bf39ba2167d9d692f9 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/protocol-core-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/protocol-core-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..8de58699d8d82 --- /dev/null +++ b/plugins/crypto-kms/licenses/protocol-core-2.20.86.jar.sha1 @@ -0,0 +1 @@ +bd85984ac6327a50d20e7957ecebf4fa3ad7766b \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/regions-2.20.55.jar.sha1 b/plugins/crypto-kms/licenses/regions-2.20.55.jar.sha1 deleted file mode 100644 index 993fc2f97de62..0000000000000 --- a/plugins/crypto-kms/licenses/regions-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a117c19b4a30e902f4f1cc4bef6b5c10cc9aef31 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/regions-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/regions-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..266bc76ad6f77 --- /dev/null +++ b/plugins/crypto-kms/licenses/regions-2.20.86.jar.sha1 @@ -0,0 +1 @@ +04fd460ce1c633986ecef1b4218d3e7067a7087d \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/sdk-core-2.20.55.jar.sha1 b/plugins/crypto-kms/licenses/sdk-core-2.20.55.jar.sha1 deleted file mode 100644 index 5f12be9c08c5b..0000000000000 --- a/plugins/crypto-kms/licenses/sdk-core-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8f2347feaf2575560ca89a2caa8d0243dbeb17a9 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/sdk-core-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/sdk-core-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..9eca40e6b9a9a --- /dev/null +++ b/plugins/crypto-kms/licenses/sdk-core-2.20.86.jar.sha1 @@ -0,0 +1 @@ +1b9df9ca5e4918fab05db3b703b2873e83104c30 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/third-party-jackson-core-2.20.55.jar.sha1 b/plugins/crypto-kms/licenses/third-party-jackson-core-2.20.55.jar.sha1 deleted file mode 100644 index e7eebbb98f1fe..0000000000000 --- a/plugins/crypto-kms/licenses/third-party-jackson-core-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -956912f26056fc7d46b2db566362fe5f7a8c0e14 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/third-party-jackson-core-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/third-party-jackson-core-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..c9c3d4dc53505 --- /dev/null +++ b/plugins/crypto-kms/licenses/third-party-jackson-core-2.20.86.jar.sha1 @@ -0,0 +1 @@ +5ad6e7f7d52d8a5390b2daf2fd8ffcab97fe3102 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/utils-2.20.55.jar.sha1 b/plugins/crypto-kms/licenses/utils-2.20.55.jar.sha1 deleted file mode 100644 index fc4cde604e33c..0000000000000 --- a/plugins/crypto-kms/licenses/utils-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d3e1bbbc19795eadbeb4dd963a94647576644097 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/utils-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/utils-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..b91a3b3047570 --- /dev/null +++ b/plugins/crypto-kms/licenses/utils-2.20.86.jar.sha1 @@ -0,0 +1 @@ +7a61f8b3c54ecf3dc785830d4f482f19ca52bc57 \ No newline at end of file diff --git a/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/SocketAccess.java b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/SocketAccess.java index 5b026c30017ca..f3d0f278c7ce7 100644 --- a/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/SocketAccess.java +++ b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/SocketAccess.java @@ -19,6 +19,7 @@ * {@link SocketPermission} 'connect' to establish connections. This class wraps the operations requiring access in * {@link AccessController#doPrivileged(PrivilegedAction)} blocks. */ +@SuppressWarnings("removal") public final class SocketAccess { private SocketAccess() {} diff --git a/plugins/discovery-azure-classic/src/internalClusterTest/java/org/opensearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java b/plugins/discovery-azure-classic/src/internalClusterTest/java/org/opensearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java index ad622e68f5ccb..a4b733ec7d894 100644 --- a/plugins/discovery-azure-classic/src/internalClusterTest/java/org/opensearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java +++ b/plugins/discovery-azure-classic/src/internalClusterTest/java/org/opensearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java @@ -296,6 +296,7 @@ private static SSLContext getSSLContext() throws Exception { * The {@link HttpsServer} in the JDK has issues with TLSv1.3 when running in a JDK prior to * 12.0.1 so we pin to TLSv1.2 when running on an earlier JDK */ + @SuppressWarnings("removal") private static String getProtocol() { if (Runtime.version().compareTo(Version.parse("12")) < 0) { return "TLSv1.2"; diff --git a/plugins/discovery-azure-classic/src/main/java/org/opensearch/cloud/azure/classic/management/AzureComputeServiceImpl.java b/plugins/discovery-azure-classic/src/main/java/org/opensearch/cloud/azure/classic/management/AzureComputeServiceImpl.java index 1bac80e576199..6e21feca7f5fb 100644 --- a/plugins/discovery-azure-classic/src/main/java/org/opensearch/cloud/azure/classic/management/AzureComputeServiceImpl.java +++ b/plugins/discovery-azure-classic/src/main/java/org/opensearch/cloud/azure/classic/management/AzureComputeServiceImpl.java @@ -112,6 +112,7 @@ private static String getRequiredSetting(Settings settings, Setting<String> sett return value; } + @SuppressWarnings("removal") @Override public HostedServiceGetDetailedResponse getServiceDetails() { SpecialPermission.check(); diff --git a/plugins/discovery-ec2/licenses/annotations-2.20.55.jar.sha1 b/plugins/discovery-ec2/licenses/annotations-2.20.55.jar.sha1 deleted file mode 100644 index 5a626eeb5725b..0000000000000 --- a/plugins/discovery-ec2/licenses/annotations-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -330e9d0e5f2401fffba5afe30f3740f400e8308d \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/annotations-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/annotations-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..8d30ad649916b --- /dev/null +++ b/plugins/discovery-ec2/licenses/annotations-2.20.86.jar.sha1 @@ -0,0 +1 @@ +7e785e9ecb1230e52e9daa713335f38809ddcb74 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/apache-client-2.20.55.jar.sha1 b/plugins/discovery-ec2/licenses/apache-client-2.20.55.jar.sha1 deleted file mode 100644 index 3ee96bb6e4076..0000000000000 --- a/plugins/discovery-ec2/licenses/apache-client-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5c149885667d41a306769505cfa481cfddf6f113 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/apache-client-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/apache-client-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..e7ae36581925c --- /dev/null +++ b/plugins/discovery-ec2/licenses/apache-client-2.20.86.jar.sha1 @@ -0,0 +1 @@ +af31c4d3abec23b73061c6965364a6e3abbcc01a \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/auth-2.20.55.jar.sha1 b/plugins/discovery-ec2/licenses/auth-2.20.55.jar.sha1 deleted file mode 100644 index 010464bdf9fd1..0000000000000 --- a/plugins/discovery-ec2/licenses/auth-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e21f00a8a2096d5044f3eff176944256e01a175e \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/auth-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/auth-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..e4c1b29cea894 --- /dev/null +++ b/plugins/discovery-ec2/licenses/auth-2.20.86.jar.sha1 @@ -0,0 +1 @@ +f2da82f33776ce4814a3ab53b5ccb82a5d135936 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-core-2.20.55.jar.sha1 b/plugins/discovery-ec2/licenses/aws-core-2.20.55.jar.sha1 deleted file mode 100644 index 4b4ee1db864a8..0000000000000 --- a/plugins/discovery-ec2/licenses/aws-core-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -734427c2cece98a8cb90871b78d2311e4a7ef746 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-core-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/aws-core-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..d42a15c4da413 --- /dev/null +++ b/plugins/discovery-ec2/licenses/aws-core-2.20.86.jar.sha1 @@ -0,0 +1 @@ +ea126147c3d17a8b3075e3122ec9c2b94fe1f6d5 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-json-protocol-2.20.55.jar.sha1 b/plugins/discovery-ec2/licenses/aws-json-protocol-2.20.55.jar.sha1 deleted file mode 100644 index 45a88305c1928..0000000000000 --- a/plugins/discovery-ec2/licenses/aws-json-protocol-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a52731c86b974aefa5bbb1c545f407811a0163b1 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-json-protocol-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/aws-json-protocol-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..ee08d240fbfba --- /dev/null +++ b/plugins/discovery-ec2/licenses/aws-json-protocol-2.20.86.jar.sha1 @@ -0,0 +1 @@ +8b9d09c1aa9d3f2119267f0b6549ae1810512c7b \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-query-protocol-2.20.55.jar.sha1 b/plugins/discovery-ec2/licenses/aws-query-protocol-2.20.55.jar.sha1 deleted file mode 100644 index ba5f43378730c..0000000000000 --- a/plugins/discovery-ec2/licenses/aws-query-protocol-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ac116215cc85366f0bdffee53b4c21e7a7fe03ef \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-query-protocol-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/aws-query-protocol-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..9b19f570d56fb --- /dev/null +++ b/plugins/discovery-ec2/licenses/aws-query-protocol-2.20.86.jar.sha1 @@ -0,0 +1 @@ +e001792ec1a681f5bc6ee4157d572173416304ad \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/ec2-2.20.55.jar.sha1 b/plugins/discovery-ec2/licenses/ec2-2.20.55.jar.sha1 deleted file mode 100644 index f123343bfe27e..0000000000000 --- a/plugins/discovery-ec2/licenses/ec2-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c095e527442835130b18387da6b1d01f365a6dbf \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/ec2-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/ec2-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..18c43cfc7516d --- /dev/null +++ b/plugins/discovery-ec2/licenses/ec2-2.20.86.jar.sha1 @@ -0,0 +1 @@ +3522a0829622a9c80152e6e2528bb79166f0b709 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/endpoints-spi-2.20.55.jar.sha1 b/plugins/discovery-ec2/licenses/endpoints-spi-2.20.55.jar.sha1 deleted file mode 100644 index 5bc0e31166c77..0000000000000 --- a/plugins/discovery-ec2/licenses/endpoints-spi-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -085f82038ee86a7d6cd568fe7edd842978d92de3 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/endpoints-spi-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/endpoints-spi-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..16f9db1fd6327 --- /dev/null +++ b/plugins/discovery-ec2/licenses/endpoints-spi-2.20.86.jar.sha1 @@ -0,0 +1 @@ +2b9075dd0ed32da97f95229f55c01425353e8cba \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/http-client-spi-2.20.55.jar.sha1 b/plugins/discovery-ec2/licenses/http-client-spi-2.20.55.jar.sha1 deleted file mode 100644 index 523cf43dcb2e9..0000000000000 --- a/plugins/discovery-ec2/licenses/http-client-spi-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -34f9b10c1a46038a0ceebdd750ba3a413a862ceb \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/http-client-spi-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/http-client-spi-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..0662e15b1f3e6 --- /dev/null +++ b/plugins/discovery-ec2/licenses/http-client-spi-2.20.86.jar.sha1 @@ -0,0 +1 @@ +776bfc86fabc6e8c792ea4650a281d0bec5e9708 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-annotations-2.16.1.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-annotations-2.16.1.jar.sha1 deleted file mode 100644 index cbc65687606fc..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-annotations-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fd441d574a71e7d10a4f73de6609f881d8cdfeec \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-annotations-2.17.0.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-annotations-2.17.0.jar.sha1 new file mode 100644 index 0000000000000..66bf7ed6ecce8 --- /dev/null +++ b/plugins/discovery-ec2/licenses/jackson-annotations-2.17.0.jar.sha1 @@ -0,0 +1 @@ +880a742337010da4c851f843d8cac150e22dff9f \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.16.1.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.16.1.jar.sha1 deleted file mode 100644 index d231db4fd49fc..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-databind-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -02a16efeb840c45af1e2f31753dfe76795278b73 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.17.0.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.17.0.jar.sha1 new file mode 100644 index 0000000000000..c0e4bb0c56849 --- /dev/null +++ b/plugins/discovery-ec2/licenses/jackson-databind-2.17.0.jar.sha1 @@ -0,0 +1 @@ +7173e9e1d4bc6d7ca03bc4eeedcd548b8b580b34 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/json-utils-2.20.55.jar.sha1 b/plugins/discovery-ec2/licenses/json-utils-2.20.55.jar.sha1 deleted file mode 100644 index a19b00e62f8b5..0000000000000 --- a/plugins/discovery-ec2/licenses/json-utils-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cd6710900e3190eac4c4496ae529ce08680dd320 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/json-utils-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/json-utils-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..7011f8c3e6c78 --- /dev/null +++ b/plugins/discovery-ec2/licenses/json-utils-2.20.86.jar.sha1 @@ -0,0 +1 @@ +5dd418ad48e3bfd8c3fa05ff29a955b91c1af666 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/metrics-spi-2.20.55.jar.sha1 b/plugins/discovery-ec2/licenses/metrics-spi-2.20.55.jar.sha1 deleted file mode 100644 index db6701d87892a..0000000000000 --- a/plugins/discovery-ec2/licenses/metrics-spi-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8a0eae705b27465516f3b09cc9918e40963d534d \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/metrics-spi-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/metrics-spi-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..bbd88bb9e1b0c --- /dev/null +++ b/plugins/discovery-ec2/licenses/metrics-spi-2.20.86.jar.sha1 @@ -0,0 +1 @@ +74a65d0f8decd0b3057fb500ca5409ff5778752a \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/profiles-2.20.55.jar.sha1 b/plugins/discovery-ec2/licenses/profiles-2.20.55.jar.sha1 deleted file mode 100644 index b7104cf0939e6..0000000000000 --- a/plugins/discovery-ec2/licenses/profiles-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -959aad08b2f24057bf286c761b49e3af31a0a623 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/profiles-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/profiles-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..425ce9b92f9f2 --- /dev/null +++ b/plugins/discovery-ec2/licenses/profiles-2.20.86.jar.sha1 @@ -0,0 +1 @@ +27a8f4aa488d1d3ef947865ee0190f16d10a3cc7 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/protocol-core-2.20.55.jar.sha1 b/plugins/discovery-ec2/licenses/protocol-core-2.20.55.jar.sha1 deleted file mode 100644 index 4dee45f4d9dd3..0000000000000 --- a/plugins/discovery-ec2/licenses/protocol-core-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0935e3ab32962a890f1d13bf39ba2167d9d692f9 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/protocol-core-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/protocol-core-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..8de58699d8d82 --- /dev/null +++ b/plugins/discovery-ec2/licenses/protocol-core-2.20.86.jar.sha1 @@ -0,0 +1 @@ +bd85984ac6327a50d20e7957ecebf4fa3ad7766b \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/regions-2.20.55.jar.sha1 b/plugins/discovery-ec2/licenses/regions-2.20.55.jar.sha1 deleted file mode 100644 index 993fc2f97de62..0000000000000 --- a/plugins/discovery-ec2/licenses/regions-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a117c19b4a30e902f4f1cc4bef6b5c10cc9aef31 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/regions-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/regions-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..266bc76ad6f77 --- /dev/null +++ b/plugins/discovery-ec2/licenses/regions-2.20.86.jar.sha1 @@ -0,0 +1 @@ +04fd460ce1c633986ecef1b4218d3e7067a7087d \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/sdk-core-2.20.55.jar.sha1 b/plugins/discovery-ec2/licenses/sdk-core-2.20.55.jar.sha1 deleted file mode 100644 index 5f12be9c08c5b..0000000000000 --- a/plugins/discovery-ec2/licenses/sdk-core-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8f2347feaf2575560ca89a2caa8d0243dbeb17a9 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/sdk-core-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/sdk-core-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..9eca40e6b9a9a --- /dev/null +++ b/plugins/discovery-ec2/licenses/sdk-core-2.20.86.jar.sha1 @@ -0,0 +1 @@ +1b9df9ca5e4918fab05db3b703b2873e83104c30 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/third-party-jackson-core-2.20.55.jar.sha1 b/plugins/discovery-ec2/licenses/third-party-jackson-core-2.20.55.jar.sha1 deleted file mode 100644 index e7eebbb98f1fe..0000000000000 --- a/plugins/discovery-ec2/licenses/third-party-jackson-core-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -956912f26056fc7d46b2db566362fe5f7a8c0e14 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/third-party-jackson-core-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/third-party-jackson-core-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..c9c3d4dc53505 --- /dev/null +++ b/plugins/discovery-ec2/licenses/third-party-jackson-core-2.20.86.jar.sha1 @@ -0,0 +1 @@ +5ad6e7f7d52d8a5390b2daf2fd8ffcab97fe3102 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/utils-2.20.55.jar.sha1 b/plugins/discovery-ec2/licenses/utils-2.20.55.jar.sha1 deleted file mode 100644 index fc4cde604e33c..0000000000000 --- a/plugins/discovery-ec2/licenses/utils-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d3e1bbbc19795eadbeb4dd963a94647576644097 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/utils-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/utils-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..b91a3b3047570 --- /dev/null +++ b/plugins/discovery-ec2/licenses/utils-2.20.86.jar.sha1 @@ -0,0 +1 @@ +7a61f8b3c54ecf3dc785830d4f482f19ca52bc57 \ No newline at end of file diff --git a/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/SocketAccess.java b/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/SocketAccess.java index c6605002c4462..0125ae4d19c3e 100644 --- a/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/SocketAccess.java +++ b/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/SocketAccess.java @@ -46,6 +46,7 @@ * {@link SocketPermission} 'connect' to establish connections. This class wraps the operations requiring access in * {@link AccessController#doPrivileged(PrivilegedAction)} blocks. */ +@SuppressWarnings("removal") final class SocketAccess { private SocketAccess() {} diff --git a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2NetworkTests.java b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2NetworkTests.java index b4ed613c0d8dd..9518fac442111 100644 --- a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2NetworkTests.java +++ b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2NetworkTests.java @@ -89,6 +89,7 @@ public static void startHttp() throws Exception { httpServer.start(); } + @SuppressWarnings("removal") @Before public void setup() { // redirect EC2 metadata service to httpServer @@ -116,6 +117,7 @@ public void testNetworkHostEc2() throws IOException { /** * Test for network.host: _ec2_ */ + @SuppressWarnings("removal") public void testNetworkHostUnableToResolveEc2() { // redirect EC2 metadata service to unknown location AccessController.doPrivileged( diff --git a/plugins/discovery-gce/build.gradle b/plugins/discovery-gce/build.gradle index c8b52d3afcd45..85efcc43fd65a 100644 --- a/plugins/discovery-gce/build.gradle +++ b/plugins/discovery-gce/build.gradle @@ -24,7 +24,7 @@ versions << [ dependencies { api "com.google.apis:google-api-services-compute:v1-rev160-${versions.google}" api "com.google.api-client:google-api-client:${versions.google}" - api "com.google.oauth-client:google-oauth-client:1.34.1" + api "com.google.oauth-client:google-oauth-client:1.35.0" api "com.google.http-client:google-http-client:${versions.google}" api "com.google.http-client:google-http-client-jackson2:${versions.google}" api 'com.google.code.findbugs:jsr305:3.0.2' diff --git a/plugins/discovery-gce/licenses/google-oauth-client-1.34.1.jar.sha1 b/plugins/discovery-gce/licenses/google-oauth-client-1.34.1.jar.sha1 deleted file mode 100644 index a8434bd380761..0000000000000 --- a/plugins/discovery-gce/licenses/google-oauth-client-1.34.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4a4f88c5e13143f882268c98239fb85c3b2c6cb2 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-oauth-client-1.35.0.jar.sha1 b/plugins/discovery-gce/licenses/google-oauth-client-1.35.0.jar.sha1 new file mode 100644 index 0000000000000..a52e79088c7ca --- /dev/null +++ b/plugins/discovery-gce/licenses/google-oauth-client-1.35.0.jar.sha1 @@ -0,0 +1 @@ +2f52003156e40ba8be5f349a2716a77428896e69 \ No newline at end of file diff --git a/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/util/Access.java b/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/util/Access.java index 1401f7ca26ce6..c46bfedbd8507 100644 --- a/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/util/Access.java +++ b/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/util/Access.java @@ -48,6 +48,7 @@ * {@code connect}. This class wraps the operations requiring access in * {@link AccessController#doPrivileged(PrivilegedAction)} blocks. */ +@SuppressWarnings("removal") public final class Access { private Access() {} diff --git a/plugins/identity-shiro/build.gradle b/plugins/identity-shiro/build.gradle index baa3464d0a98e..222443efcb214 100644 --- a/plugins/identity-shiro/build.gradle +++ b/plugins/identity-shiro/build.gradle @@ -17,7 +17,7 @@ opensearchplugin { } dependencies { - implementation 'org.apache.shiro:shiro-core:1.11.0' + implementation 'org.apache.shiro:shiro-core:1.13.0' // Needed for shiro implementation "org.slf4j:slf4j-api:${versions.slf4j}" @@ -28,7 +28,7 @@ dependencies { implementation 'org.passay:passay:1.6.3' - implementation "org.bouncycastle:bcprov-jdk15to18:${versions.bouncycastle}" + implementation "org.bouncycastle:bcprov-jdk18on:${versions.bouncycastle}" testImplementation project(path: ':modules:transport-netty4') // for http testImplementation project(path: ':plugins:transport-nio') // for http diff --git a/plugins/identity-shiro/licenses/bcprov-jdk15to18-1.76.jar.sha1 b/plugins/identity-shiro/licenses/bcprov-jdk15to18-1.76.jar.sha1 deleted file mode 100644 index 2e96c404bef98..0000000000000 --- a/plugins/identity-shiro/licenses/bcprov-jdk15to18-1.76.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0cb53f10290a634808555bc4b34328fdab1001f2 \ No newline at end of file diff --git a/plugins/identity-shiro/licenses/bcprov-jdk18on-1.77.jar.sha1 b/plugins/identity-shiro/licenses/bcprov-jdk18on-1.77.jar.sha1 new file mode 100644 index 0000000000000..3e780df9559a9 --- /dev/null +++ b/plugins/identity-shiro/licenses/bcprov-jdk18on-1.77.jar.sha1 @@ -0,0 +1 @@ +2cc971b6c20949c1ff98d1a4bc741ee848a09523 \ No newline at end of file diff --git a/plugins/identity-shiro/licenses/bcprov-jdk15to18-LICENSE.txt b/plugins/identity-shiro/licenses/bcprov-jdk18on-LICENSE.txt similarity index 100% rename from plugins/identity-shiro/licenses/bcprov-jdk15to18-LICENSE.txt rename to plugins/identity-shiro/licenses/bcprov-jdk18on-LICENSE.txt diff --git a/plugins/identity-shiro/licenses/bcprov-jdk15to18-NOTICE.txt b/plugins/identity-shiro/licenses/bcprov-jdk18on-NOTICE.txt similarity index 100% rename from plugins/identity-shiro/licenses/bcprov-jdk15to18-NOTICE.txt rename to plugins/identity-shiro/licenses/bcprov-jdk18on-NOTICE.txt diff --git a/plugins/identity-shiro/licenses/shiro-core-1.11.0.jar.sha1 b/plugins/identity-shiro/licenses/shiro-core-1.11.0.jar.sha1 deleted file mode 100644 index 67c33e15ec689..0000000000000 --- a/plugins/identity-shiro/licenses/shiro-core-1.11.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -033a70c87e91968a299f1ee00f4e95050312346d \ No newline at end of file diff --git a/plugins/identity-shiro/licenses/shiro-core-1.13.0.jar.sha1 b/plugins/identity-shiro/licenses/shiro-core-1.13.0.jar.sha1 new file mode 100644 index 0000000000000..25bd4d9acd166 --- /dev/null +++ b/plugins/identity-shiro/licenses/shiro-core-1.13.0.jar.sha1 @@ -0,0 +1 @@ +7e542e3d614b197bf10005e98e19f9f19cb943e7 \ No newline at end of file diff --git a/plugins/identity-shiro/licenses/slf4j-api-LICENSE.txt b/plugins/identity-shiro/licenses/slf4j-api-LICENSE.txt index 8fda22f4d72f6..2be7689435062 100644 --- a/plugins/identity-shiro/licenses/slf4j-api-LICENSE.txt +++ b/plugins/identity-shiro/licenses/slf4j-api-LICENSE.txt @@ -1,4 +1,4 @@ -Copyright (c) 2004-2014 QOS.ch +Copyright (c) 2004-2022 QOS.ch All rights reserved. Permission is hereby granted, free of charge, to any person obtaining @@ -18,4 +18,4 @@ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 22db73ad86796..6da34c4c9caf2 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -71,9 +71,9 @@ dependencies { api "org.apache.pdfbox:fontbox:${versions.pdfbox}" api "org.apache.pdfbox:jempbox:1.8.17" api "commons-logging:commons-logging:${versions.commonslogging}" - api "org.bouncycastle:bcmail-jdk15to18:${versions.bouncycastle}" - api "org.bouncycastle:bcprov-jdk15to18:${versions.bouncycastle}" - api "org.bouncycastle:bcpkix-jdk15to18:${versions.bouncycastle}" + api "org.bouncycastle:bcmail-jdk18on:${versions.bouncycastle}" + api "org.bouncycastle:bcprov-jdk18on:${versions.bouncycastle}" + api "org.bouncycastle:bcpkix-jdk18on:${versions.bouncycastle}" // OpenOffice api "org.apache.poi:poi-ooxml:${versions.poi}" api "org.apache.poi:poi:${versions.poi}" diff --git a/plugins/ingest-attachment/licenses/bcmail-jdk15to18-1.76.jar.sha1 b/plugins/ingest-attachment/licenses/bcmail-jdk15to18-1.76.jar.sha1 deleted file mode 100644 index 46010d64015ad..0000000000000 --- a/plugins/ingest-attachment/licenses/bcmail-jdk15to18-1.76.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -23d8bcad6b57912e4633ca9955926ffcdf3c5c71 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcmail-jdk18on-1.77.jar.sha1 b/plugins/ingest-attachment/licenses/bcmail-jdk18on-1.77.jar.sha1 new file mode 100644 index 0000000000000..f71659316b8cd --- /dev/null +++ b/plugins/ingest-attachment/licenses/bcmail-jdk18on-1.77.jar.sha1 @@ -0,0 +1 @@ +f2bb8aa55dc901ee8b8aae7d1007c03592d65e03 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcmail-jdk15to18-LICENSE.txt b/plugins/ingest-attachment/licenses/bcmail-jdk18on-LICENSE.txt similarity index 100% rename from plugins/ingest-attachment/licenses/bcmail-jdk15to18-LICENSE.txt rename to plugins/ingest-attachment/licenses/bcmail-jdk18on-LICENSE.txt diff --git a/plugins/ingest-attachment/licenses/bcmail-jdk15to18-NOTICE.txt b/plugins/ingest-attachment/licenses/bcmail-jdk18on-NOTICE.txt similarity index 100% rename from plugins/ingest-attachment/licenses/bcmail-jdk15to18-NOTICE.txt rename to plugins/ingest-attachment/licenses/bcmail-jdk18on-NOTICE.txt diff --git a/plugins/ingest-attachment/licenses/bcpkix-jdk15to18-1.76.jar.sha1 b/plugins/ingest-attachment/licenses/bcpkix-jdk15to18-1.76.jar.sha1 deleted file mode 100644 index a843d972ac681..0000000000000 --- a/plugins/ingest-attachment/licenses/bcpkix-jdk15to18-1.76.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3ee440dfa1c557c1cc0c46b5dadf5ef3896ccebb \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcpkix-jdk18on-1.77.jar.sha1 b/plugins/ingest-attachment/licenses/bcpkix-jdk18on-1.77.jar.sha1 new file mode 100644 index 0000000000000..05a8b2d5729bd --- /dev/null +++ b/plugins/ingest-attachment/licenses/bcpkix-jdk18on-1.77.jar.sha1 @@ -0,0 +1 @@ +ed953791ba0229747dd0fd9911e3d76a462acfd3 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcpkix-jdk15to18-LICENSE.txt b/plugins/ingest-attachment/licenses/bcpkix-jdk18on-LICENSE.txt similarity index 100% rename from plugins/ingest-attachment/licenses/bcpkix-jdk15to18-LICENSE.txt rename to plugins/ingest-attachment/licenses/bcpkix-jdk18on-LICENSE.txt diff --git a/plugins/ingest-attachment/licenses/bcpkix-jdk15to18-NOTICE.txt b/plugins/ingest-attachment/licenses/bcpkix-jdk18on-NOTICE.txt similarity index 100% rename from plugins/ingest-attachment/licenses/bcpkix-jdk15to18-NOTICE.txt rename to plugins/ingest-attachment/licenses/bcpkix-jdk18on-NOTICE.txt diff --git a/plugins/ingest-attachment/licenses/bcprov-jdk15to18-1.76.jar.sha1 b/plugins/ingest-attachment/licenses/bcprov-jdk15to18-1.76.jar.sha1 deleted file mode 100644 index 2e96c404bef98..0000000000000 --- a/plugins/ingest-attachment/licenses/bcprov-jdk15to18-1.76.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0cb53f10290a634808555bc4b34328fdab1001f2 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcprov-jdk18on-1.77.jar.sha1 b/plugins/ingest-attachment/licenses/bcprov-jdk18on-1.77.jar.sha1 new file mode 100644 index 0000000000000..3e780df9559a9 --- /dev/null +++ b/plugins/ingest-attachment/licenses/bcprov-jdk18on-1.77.jar.sha1 @@ -0,0 +1 @@ +2cc971b6c20949c1ff98d1a4bc741ee848a09523 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcprov-jdk15to18-LICENSE.txt b/plugins/ingest-attachment/licenses/bcprov-jdk18on-LICENSE.txt similarity index 100% rename from plugins/ingest-attachment/licenses/bcprov-jdk15to18-LICENSE.txt rename to plugins/ingest-attachment/licenses/bcprov-jdk18on-LICENSE.txt diff --git a/plugins/ingest-attachment/licenses/bcprov-jdk15to18-NOTICE.txt b/plugins/ingest-attachment/licenses/bcprov-jdk18on-NOTICE.txt similarity index 100% rename from plugins/ingest-attachment/licenses/bcprov-jdk15to18-NOTICE.txt rename to plugins/ingest-attachment/licenses/bcprov-jdk18on-NOTICE.txt diff --git a/plugins/ingest-attachment/licenses/slf4j-api-LICENSE.txt b/plugins/ingest-attachment/licenses/slf4j-api-LICENSE.txt index 8fda22f4d72f6..2be7689435062 100644 --- a/plugins/ingest-attachment/licenses/slf4j-api-LICENSE.txt +++ b/plugins/ingest-attachment/licenses/slf4j-api-LICENSE.txt @@ -1,4 +1,4 @@ -Copyright (c) 2004-2014 QOS.ch +Copyright (c) 2004-2022 QOS.ch All rights reserved. Permission is hereby granted, free of charge, to any person obtaining @@ -18,4 +18,4 @@ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/TikaImpl.java b/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/TikaImpl.java index ce7ceb5e3d776..fe783e5ddb675 100644 --- a/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/TikaImpl.java +++ b/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/TikaImpl.java @@ -112,6 +112,7 @@ final class TikaImpl { /** * parses with tika, throwing any exception hit while parsing the document */ + @SuppressWarnings("removal") static String parse(final byte content[], final Metadata metadata, final int limit) throws TikaException, IOException { // check that its not unprivileged code like a script SpecialPermission.check(); @@ -136,6 +137,7 @@ static String parse(final byte content[], final Metadata metadata, final int lim // apply additional containment for parsers, this is intersected with the current permissions // its hairy, but worth it so we don't have some XML flaw reading random crap from the FS + @SuppressWarnings("removal") private static final AccessControlContext RESTRICTED_CONTEXT = new AccessControlContext( new ProtectionDomain[] { new ProtectionDomain(null, getRestrictedPermissions()) } ); diff --git a/plugins/query-insights/build.gradle b/plugins/query-insights/build.gradle new file mode 100644 index 0000000000000..eabbd395bd3bd --- /dev/null +++ b/plugins/query-insights/build.gradle @@ -0,0 +1,18 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +opensearchplugin { + description 'OpenSearch Query Insights Plugin.' + classname 'org.opensearch.plugin.insights.QueryInsightsPlugin' +} + +dependencies { +} diff --git a/plugins/query-insights/src/internalClusterTest/java/org/opensearch/plugin/insights/QueryInsightsPluginTransportIT.java b/plugins/query-insights/src/internalClusterTest/java/org/opensearch/plugin/insights/QueryInsightsPluginTransportIT.java new file mode 100644 index 0000000000000..04e715444f50a --- /dev/null +++ b/plugins/query-insights/src/internalClusterTest/java/org/opensearch/plugin/insights/QueryInsightsPluginTransportIT.java @@ -0,0 +1,274 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights; + +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.action.admin.cluster.node.info.NodeInfo; +import org.opensearch.action.admin.cluster.node.info.NodesInfoRequest; +import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.opensearch.action.admin.cluster.node.info.PluginsAndModules; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.action.index.IndexResponse; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesAction; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesRequest; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesResponse; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.PluginInfo; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.Assert; + +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED; +import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE; +import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +/** + * Transport Action tests for Query Insights Plugin + */ + +@OpenSearchIntegTestCase.ClusterScope(numDataNodes = 0, scope = OpenSearchIntegTestCase.Scope.TEST) +public class QueryInsightsPluginTransportIT extends OpenSearchIntegTestCase { + + private final int TOTAL_NUMBER_OF_NODES = 2; + private final int TOTAL_SEARCH_REQUESTS = 5; + + @Override + protected Collection<Class<? extends Plugin>> nodePlugins() { + return Arrays.asList(QueryInsightsPlugin.class); + } + + /** + * Test Query Insights Plugin is installed + */ + public void testQueryInsightPluginInstalled() { + NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(); + nodesInfoRequest.addMetric(NodesInfoRequest.Metric.PLUGINS.metricName()); + NodesInfoResponse nodesInfoResponse = OpenSearchIntegTestCase.client().admin().cluster().nodesInfo(nodesInfoRequest).actionGet(); + List<PluginInfo> pluginInfos = nodesInfoResponse.getNodes() + .stream() + .flatMap( + (Function<NodeInfo, Stream<PluginInfo>>) nodeInfo -> nodeInfo.getInfo(PluginsAndModules.class).getPluginInfos().stream() + ) + .collect(Collectors.toList()); + Assert.assertTrue( + pluginInfos.stream().anyMatch(pluginInfo -> pluginInfo.getName().equals("org.opensearch.plugin.insights.QueryInsightsPlugin")) + ); + } + + /** + * Test get top queries when feature disabled + */ + public void testGetTopQueriesWhenFeatureDisabled() { + TopQueriesRequest request = new TopQueriesRequest(MetricType.LATENCY); + TopQueriesResponse response = OpenSearchIntegTestCase.client().execute(TopQueriesAction.INSTANCE, request).actionGet(); + Assert.assertNotEquals(0, response.failures().size()); + Assert.assertEquals( + "Cannot get top n queries for [latency] when it is not enabled.", + response.failures().get(0).getCause().getCause().getMessage() + ); + } + + /** + * Test update top query record when feature enabled + */ + public void testUpdateRecordWhenFeatureDisabledThenEnabled() throws ExecutionException, InterruptedException { + Settings commonSettings = Settings.builder().put(TOP_N_LATENCY_QUERIES_ENABLED.getKey(), "false").build(); + + logger.info("--> starting nodes for query insight testing"); + List<String> nodes = internalCluster().startNodes(TOTAL_NUMBER_OF_NODES, Settings.builder().put(commonSettings).build()); + + logger.info("--> waiting for nodes to form a cluster"); + ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet(); + assertFalse(health.isTimedOut()); + + assertAcked( + prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 2)) + ); + ensureStableCluster(2); + logger.info("--> creating indices for query insight testing"); + for (int i = 0; i < 5; i++) { + IndexResponse response = client().prepareIndex("test_" + i).setId("" + i).setSource("field_" + i, "value_" + i).get(); + assertEquals("CREATED", response.status().toString()); + } + // making search requests to get top queries + for (int i = 0; i < TOTAL_SEARCH_REQUESTS; i++) { + SearchResponse searchResponse = internalCluster().client(randomFrom(nodes)) + .prepareSearch() + .setQuery(QueryBuilders.matchAllQuery()) + .get(); + assertEquals(searchResponse.getFailedShards(), 0); + } + + TopQueriesRequest request = new TopQueriesRequest(MetricType.LATENCY); + TopQueriesResponse response = OpenSearchIntegTestCase.client().execute(TopQueriesAction.INSTANCE, request).actionGet(); + Assert.assertNotEquals(0, response.failures().size()); + Assert.assertEquals( + "Cannot get top n queries for [latency] when it is not enabled.", + response.failures().get(0).getCause().getCause().getMessage() + ); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest().persistentSettings( + Settings.builder().put(TOP_N_LATENCY_QUERIES_ENABLED.getKey(), "true").build() + ); + assertAcked(internalCluster().client().admin().cluster().updateSettings(updateSettingsRequest).get()); + TopQueriesRequest request2 = new TopQueriesRequest(MetricType.LATENCY); + TopQueriesResponse response2 = OpenSearchIntegTestCase.client().execute(TopQueriesAction.INSTANCE, request2).actionGet(); + Assert.assertEquals(0, response2.failures().size()); + Assert.assertEquals(TOTAL_NUMBER_OF_NODES, response2.getNodes().size()); + for (int i = 0; i < TOTAL_NUMBER_OF_NODES; i++) { + Assert.assertEquals(0, response2.getNodes().get(i).getTopQueriesRecord().size()); + } + + internalCluster().stopAllNodes(); + } + + /** + * Test get top queries when feature enabled + */ + public void testGetTopQueriesWhenFeatureEnabled() throws InterruptedException { + Settings commonSettings = Settings.builder() + .put(TOP_N_LATENCY_QUERIES_ENABLED.getKey(), "true") + .put(TOP_N_LATENCY_QUERIES_SIZE.getKey(), "100") + .put(TOP_N_LATENCY_QUERIES_WINDOW_SIZE.getKey(), "600s") + .build(); + + logger.info("--> starting nodes for query insight testing"); + List<String> nodes = internalCluster().startNodes(TOTAL_NUMBER_OF_NODES, Settings.builder().put(commonSettings).build()); + + logger.info("--> waiting for nodes to form a cluster"); + ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet(); + assertFalse(health.isTimedOut()); + + assertAcked( + prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 2)) + ); + ensureStableCluster(2); + logger.info("--> creating indices for query insight testing"); + for (int i = 0; i < 5; i++) { + IndexResponse response = client().prepareIndex("test_" + i).setId("" + i).setSource("field_" + i, "value_" + i).get(); + assertEquals("CREATED", response.status().toString()); + } + // making search requests to get top queries + for (int i = 0; i < TOTAL_SEARCH_REQUESTS; i++) { + SearchResponse searchResponse = internalCluster().client(randomFrom(nodes)) + .prepareSearch() + .setQuery(QueryBuilders.matchAllQuery()) + .get(); + assertEquals(searchResponse.getFailedShards(), 0); + } + // Sleep to wait for queue drained to top queries store + Thread.sleep(6000); + TopQueriesRequest request = new TopQueriesRequest(MetricType.LATENCY); + TopQueriesResponse response = OpenSearchIntegTestCase.client().execute(TopQueriesAction.INSTANCE, request).actionGet(); + Assert.assertEquals(0, response.failures().size()); + Assert.assertEquals(TOTAL_NUMBER_OF_NODES, response.getNodes().size()); + Assert.assertEquals(TOTAL_SEARCH_REQUESTS, response.getNodes().stream().mapToInt(o -> o.getTopQueriesRecord().size()).sum()); + + internalCluster().stopAllNodes(); + } + + /** + * Test get top queries with small top n size + */ + public void testGetTopQueriesWithSmallTopN() throws InterruptedException { + Settings commonSettings = Settings.builder() + .put(TOP_N_LATENCY_QUERIES_ENABLED.getKey(), "true") + .put(TOP_N_LATENCY_QUERIES_SIZE.getKey(), "1") + .put(TOP_N_LATENCY_QUERIES_WINDOW_SIZE.getKey(), "600s") + .build(); + + logger.info("--> starting nodes for query insight testing"); + List<String> nodes = internalCluster().startNodes(TOTAL_NUMBER_OF_NODES, Settings.builder().put(commonSettings).build()); + + logger.info("--> waiting for nodes to form a cluster"); + ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet(); + assertFalse(health.isTimedOut()); + + assertAcked( + prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 2)) + ); + ensureStableCluster(2); + logger.info("--> creating indices for query insight testing"); + for (int i = 0; i < 5; i++) { + IndexResponse response = client().prepareIndex("test_" + i).setId("" + i).setSource("field_" + i, "value_" + i).get(); + assertEquals("CREATED", response.status().toString()); + } + // making search requests to get top queries + for (int i = 0; i < TOTAL_SEARCH_REQUESTS; i++) { + SearchResponse searchResponse = internalCluster().client(randomFrom(nodes)) + .prepareSearch() + .setQuery(QueryBuilders.matchAllQuery()) + .get(); + assertEquals(searchResponse.getFailedShards(), 0); + } + Thread.sleep(6000); + TopQueriesRequest request = new TopQueriesRequest(MetricType.LATENCY); + TopQueriesResponse response = OpenSearchIntegTestCase.client().execute(TopQueriesAction.INSTANCE, request).actionGet(); + Assert.assertEquals(0, response.failures().size()); + Assert.assertEquals(TOTAL_NUMBER_OF_NODES, response.getNodes().size()); + Assert.assertEquals(2, response.getNodes().stream().mapToInt(o -> o.getTopQueriesRecord().size()).sum()); + + internalCluster().stopAllNodes(); + } + + /** + * Test get top queries with small window size + */ + public void testGetTopQueriesWithSmallWindowSize() throws InterruptedException { + Settings commonSettings = Settings.builder() + .put(TOP_N_LATENCY_QUERIES_ENABLED.getKey(), "true") + .put(TOP_N_LATENCY_QUERIES_SIZE.getKey(), "100") + .put(TOP_N_LATENCY_QUERIES_WINDOW_SIZE.getKey(), "1m") + .build(); + + logger.info("--> starting nodes for query insight testing"); + List<String> nodes = internalCluster().startNodes(TOTAL_NUMBER_OF_NODES, Settings.builder().put(commonSettings).build()); + + logger.info("--> waiting for nodes to form a cluster"); + ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet(); + assertFalse(health.isTimedOut()); + + assertAcked( + prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 2)) + ); + ensureStableCluster(2); + logger.info("--> creating indices for query insight testing"); + for (int i = 0; i < 5; i++) { + IndexResponse response = client().prepareIndex("test_" + i).setId("" + i).setSource("field_" + i, "value_" + i).get(); + assertEquals("CREATED", response.status().toString()); + } + // making search requests to get top queries + for (int i = 0; i < TOTAL_SEARCH_REQUESTS; i++) { + SearchResponse searchResponse = internalCluster().client(randomFrom(nodes)) + .prepareSearch() + .setQuery(QueryBuilders.matchAllQuery()) + .get(); + assertEquals(searchResponse.getFailedShards(), 0); + } + + TopQueriesRequest request = new TopQueriesRequest(MetricType.LATENCY); + TopQueriesResponse response = OpenSearchIntegTestCase.client().execute(TopQueriesAction.INSTANCE, request).actionGet(); + Assert.assertEquals(0, response.failures().size()); + Assert.assertEquals(TOTAL_NUMBER_OF_NODES, response.getNodes().size()); + Thread.sleep(6000); + internalCluster().stopAllNodes(); + } +} diff --git a/plugins/query-insights/src/javaRestTest/java/org/opensearch/plugin/insights/TopQueriesRestIT.java b/plugins/query-insights/src/javaRestTest/java/org/opensearch/plugin/insights/TopQueriesRestIT.java new file mode 100644 index 0000000000000..57dea6ad8d5ff --- /dev/null +++ b/plugins/query-insights/src/javaRestTest/java/org/opensearch/plugin/insights/TopQueriesRestIT.java @@ -0,0 +1,107 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights; + +import org.opensearch.client.Request; +import org.opensearch.client.Response; +import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.test.rest.OpenSearchRestTestCase; +import org.junit.Assert; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.List; +import java.util.Map; + +/** + * Rest Action tests for Query Insights + */ +public class TopQueriesRestIT extends OpenSearchRestTestCase { + + /** + * test Query Insights is installed + * @throws IOException IOException + */ + @SuppressWarnings("unchecked") + public void testQueryInsightsPluginInstalled() throws IOException { + Request request = new Request("GET", "/_cat/plugins?s=component&h=name,component,version,description&format=json"); + Response response = client().performRequest(request); + List<Object> pluginsList = JsonXContent.jsonXContent.createParser( + NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, + response.getEntity().getContent() + ).list(); + Assert.assertTrue( + pluginsList.stream().map(o -> (Map<String, Object>) o).anyMatch(plugin -> plugin.get("component").equals("query-insights")) + ); + } + + /** + * test enabling top queries + * @throws IOException IOException + */ + public void testTopQueriesResponses() throws IOException { + // Enable Top N Queries feature + Request request = new Request("PUT", "/_cluster/settings"); + request.setJsonEntity(defaultTopQueriesSettings()); + Response response = client().performRequest(request); + + Assert.assertEquals(200, response.getStatusLine().getStatusCode()); + + // Create documents for search + request = new Request("POST", "/my-index-0/_doc"); + request.setJsonEntity(createDocumentsBody()); + response = client().performRequest(request); + + Assert.assertEquals(201, response.getStatusLine().getStatusCode()); + + // Do Search + request = new Request("GET", "/my-index-0/_search?size=20&pretty"); + request.setJsonEntity(searchBody()); + response = client().performRequest(request); + Assert.assertEquals(200, response.getStatusLine().getStatusCode()); + response = client().performRequest(request); + Assert.assertEquals(200, response.getStatusLine().getStatusCode()); + + // Get Top Queries + request = new Request("GET", "/_insights/top_queries?pretty"); + response = client().performRequest(request); + + Assert.assertEquals(200, response.getStatusLine().getStatusCode()); + String top_requests = new String(response.getEntity().getContent().readAllBytes(), StandardCharsets.UTF_8); + Assert.assertTrue(top_requests.contains("top_queries")); + Assert.assertEquals(2, top_requests.split("searchType", -1).length - 1); + } + + private String defaultTopQueriesSettings() { + return "{\n" + + " \"persistent\" : {\n" + + " \"search.top_n_queries.latency.enabled\" : \"true\",\n" + + " \"search.top_n_queries.latency.window_size\" : \"600s\",\n" + + " \"search.top_n_queries.latency.top_n_size\" : 5\n" + + " }\n" + + "}"; + } + + private String createDocumentsBody() { + return "{\n" + + " \"@timestamp\": \"2099-11-15T13:12:00\",\n" + + " \"message\": \"this is document 1\",\n" + + " \"user\": {\n" + + " \"id\": \"cyji\"\n" + + " }\n" + + "}"; + } + + private String searchBody() { + return "{}"; + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/QueryInsightsPlugin.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/QueryInsightsPlugin.java new file mode 100644 index 0000000000000..4d7e0d486068a --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/QueryInsightsPlugin.java @@ -0,0 +1,116 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights; + +import org.opensearch.action.ActionRequest; +import org.opensearch.client.Client; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.IndexScopedSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.settings.SettingsFilter; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.env.Environment; +import org.opensearch.env.NodeEnvironment; +import org.opensearch.plugin.insights.core.listener.QueryInsightsListener; +import org.opensearch.plugin.insights.core.service.QueryInsightsService; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesAction; +import org.opensearch.plugin.insights.rules.resthandler.top_queries.RestTopQueriesAction; +import org.opensearch.plugin.insights.rules.transport.top_queries.TransportTopQueriesAction; +import org.opensearch.plugin.insights.settings.QueryInsightsSettings; +import org.opensearch.plugins.ActionPlugin; +import org.opensearch.plugins.Plugin; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.rest.RestController; +import org.opensearch.rest.RestHandler; +import org.opensearch.script.ScriptService; +import org.opensearch.threadpool.ExecutorBuilder; +import org.opensearch.threadpool.ScalingExecutorBuilder; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.watcher.ResourceWatcherService; + +import java.util.Collection; +import java.util.List; +import java.util.function.Supplier; + +/** + * Plugin class for Query Insights. + */ +public class QueryInsightsPlugin extends Plugin implements ActionPlugin { + /** + * Default constructor + */ + public QueryInsightsPlugin() {} + + @Override + public Collection<Object> createComponents( + final Client client, + final ClusterService clusterService, + final ThreadPool threadPool, + final ResourceWatcherService resourceWatcherService, + final ScriptService scriptService, + final NamedXContentRegistry xContentRegistry, + final Environment environment, + final NodeEnvironment nodeEnvironment, + final NamedWriteableRegistry namedWriteableRegistry, + final IndexNameExpressionResolver indexNameExpressionResolver, + final Supplier<RepositoriesService> repositoriesServiceSupplier + ) { + // create top n queries service + final QueryInsightsService queryInsightsService = new QueryInsightsService(threadPool); + return List.of(queryInsightsService, new QueryInsightsListener(clusterService, queryInsightsService)); + } + + @Override + public List<ExecutorBuilder<?>> getExecutorBuilders(final Settings settings) { + return List.of( + new ScalingExecutorBuilder( + QueryInsightsSettings.QUERY_INSIGHTS_EXECUTOR, + 1, + Math.min((OpenSearchExecutors.allocatedProcessors(settings) + 1) / 2, QueryInsightsSettings.MAX_THREAD_COUNT), + TimeValue.timeValueMinutes(5) + ) + ); + } + + @Override + public List<RestHandler> getRestHandlers( + final Settings settings, + final RestController restController, + final ClusterSettings clusterSettings, + final IndexScopedSettings indexScopedSettings, + final SettingsFilter settingsFilter, + final IndexNameExpressionResolver indexNameExpressionResolver, + final Supplier<DiscoveryNodes> nodesInCluster + ) { + return List.of(new RestTopQueriesAction()); + } + + @Override + public List<ActionHandler<? extends ActionRequest, ? extends ActionResponse>> getActions() { + return List.of(new ActionPlugin.ActionHandler<>(TopQueriesAction.INSTANCE, TransportTopQueriesAction.class)); + } + + @Override + public List<Setting<?>> getSettings() { + return List.of( + // Settings for top N queries + QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED, + QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE, + QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE + ); + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListener.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListener.java new file mode 100644 index 0000000000000..9ec8673147c38 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListener.java @@ -0,0 +1,147 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.core.listener; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.search.SearchPhaseContext; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchRequestContext; +import org.opensearch.action.search.SearchRequestOperationsListener; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.plugin.insights.core.service.QueryInsightsService; +import org.opensearch.plugin.insights.rules.model.Attribute; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED; +import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE; +import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE; + +/** + * The listener for query insights services. + * It forwards query-related data to the appropriate query insights stores, + * either for each request or for each phase. + * + * @opensearch.internal + */ +public final class QueryInsightsListener extends SearchRequestOperationsListener { + private static final ToXContent.Params FORMAT_PARAMS = new ToXContent.MapParams(Collections.singletonMap("pretty", "false")); + + private static final Logger log = LogManager.getLogger(QueryInsightsListener.class); + + private final QueryInsightsService queryInsightsService; + + /** + * Constructor for QueryInsightsListener + * + * @param clusterService The Node's cluster service. + * @param queryInsightsService The topQueriesByLatencyService associated with this listener + */ + @Inject + public QueryInsightsListener(final ClusterService clusterService, final QueryInsightsService queryInsightsService) { + this.queryInsightsService = queryInsightsService; + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(TOP_N_LATENCY_QUERIES_ENABLED, v -> this.setEnableTopQueries(MetricType.LATENCY, v)); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer( + TOP_N_LATENCY_QUERIES_SIZE, + v -> this.queryInsightsService.getTopQueriesService(MetricType.LATENCY).setTopNSize(v), + v -> this.queryInsightsService.getTopQueriesService(MetricType.LATENCY).validateTopNSize(v) + ); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer( + TOP_N_LATENCY_QUERIES_WINDOW_SIZE, + v -> this.queryInsightsService.getTopQueriesService(MetricType.LATENCY).setWindowSize(v), + v -> this.queryInsightsService.getTopQueriesService(MetricType.LATENCY).validateWindowSize(v) + ); + this.setEnableTopQueries(MetricType.LATENCY, clusterService.getClusterSettings().get(TOP_N_LATENCY_QUERIES_ENABLED)); + this.queryInsightsService.getTopQueriesService(MetricType.LATENCY) + .setTopNSize(clusterService.getClusterSettings().get(TOP_N_LATENCY_QUERIES_SIZE)); + this.queryInsightsService.getTopQueriesService(MetricType.LATENCY) + .setWindowSize(clusterService.getClusterSettings().get(TOP_N_LATENCY_QUERIES_WINDOW_SIZE)); + } + + /** + * Enable or disable top queries insights collection for {@link MetricType} + * This function will enable or disable the corresponding listeners + * and query insights services. + * + * @param metricType {@link MetricType} + * @param enabled boolean + */ + public void setEnableTopQueries(final MetricType metricType, final boolean enabled) { + boolean isAllMetricsDisabled = !queryInsightsService.isEnabled(); + this.queryInsightsService.enableCollection(metricType, enabled); + if (!enabled) { + // disable QueryInsightsListener only if all metrics collections are disabled now. + if (!queryInsightsService.isEnabled()) { + super.setEnabled(false); + this.queryInsightsService.stop(); + } + } else { + super.setEnabled(true); + // restart QueryInsightsListener only if none of metrics collections is enabled before. + if (isAllMetricsDisabled) { + this.queryInsightsService.stop(); + this.queryInsightsService.start(); + } + } + + } + + @Override + public boolean isEnabled() { + return super.isEnabled(); + } + + @Override + public void onPhaseStart(SearchPhaseContext context) {} + + @Override + public void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} + + @Override + public void onPhaseFailure(SearchPhaseContext context, Throwable cause) {} + + @Override + public void onRequestStart(SearchRequestContext searchRequestContext) {} + + @Override + public void onRequestEnd(final SearchPhaseContext context, final SearchRequestContext searchRequestContext) { + final SearchRequest request = context.getRequest(); + try { + Map<MetricType, Number> measurements = new HashMap<>(); + if (queryInsightsService.isCollectionEnabled(MetricType.LATENCY)) { + measurements.put( + MetricType.LATENCY, + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - searchRequestContext.getAbsoluteStartNanos()) + ); + } + Map<Attribute, Object> attributes = new HashMap<>(); + attributes.put(Attribute.SEARCH_TYPE, request.searchType().toString().toLowerCase(Locale.ROOT)); + attributes.put(Attribute.SOURCE, request.source().toString(FORMAT_PARAMS)); + attributes.put(Attribute.TOTAL_SHARDS, context.getNumShards()); + attributes.put(Attribute.INDICES, request.indices()); + attributes.put(Attribute.PHASE_LATENCY_MAP, searchRequestContext.phaseTookMap()); + SearchQueryRecord record = new SearchQueryRecord(request.getOrCreateAbsoluteStartMillis(), measurements, attributes); + queryInsightsService.addRecord(record); + } catch (Exception e) { + log.error(String.format(Locale.ROOT, "fail to ingest query insight data, error: %s", e)); + } + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/package-info.java new file mode 100644 index 0000000000000..3cb9cacf7fd1c --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Listeners for Query Insights + */ +package org.opensearch.plugin.insights.core.listener; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/QueryInsightsService.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/QueryInsightsService.java new file mode 100644 index 0000000000000..525ca0d4a3d33 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/QueryInsightsService.java @@ -0,0 +1,180 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.core.service; + +import org.opensearch.common.inject.Inject; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; +import org.opensearch.plugin.insights.settings.QueryInsightsSettings; +import org.opensearch.threadpool.Scheduler; +import org.opensearch.threadpool.ThreadPool; + +import java.util.ArrayList; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.LinkedBlockingQueue; + +/** + * Service responsible for gathering, analyzing, storing and exporting + * information related to search queries + * + * @opensearch.internal + */ +public class QueryInsightsService extends AbstractLifecycleComponent { + /** + * The internal OpenSearch thread pool that execute async processing and exporting tasks + */ + private final ThreadPool threadPool; + + /** + * Services to capture top n queries for different metric types + */ + private final Map<MetricType, TopQueriesService> topQueriesServices; + + /** + * Flags for enabling insight data collection for different metric types + */ + private final Map<MetricType, Boolean> enableCollect; + + /** + * The internal thread-safe queue to ingest the search query data and subsequently forward to processors + */ + private final LinkedBlockingQueue<SearchQueryRecord> queryRecordsQueue; + + /** + * Holds a reference to delayed operation {@link Scheduler.Cancellable} so it can be cancelled when + * the service closed concurrently. + */ + protected volatile Scheduler.Cancellable scheduledFuture; + + /** + * Constructor of the QueryInsightsService + * + * @param threadPool The OpenSearch thread pool to run async tasks + */ + @Inject + public QueryInsightsService(final ThreadPool threadPool) { + enableCollect = new HashMap<>(); + queryRecordsQueue = new LinkedBlockingQueue<>(QueryInsightsSettings.QUERY_RECORD_QUEUE_CAPACITY); + topQueriesServices = new HashMap<>(); + for (MetricType metricType : MetricType.allMetricTypes()) { + enableCollect.put(metricType, false); + topQueriesServices.put(metricType, new TopQueriesService(metricType)); + } + this.threadPool = threadPool; + } + + /** + * Ingest the query data into in-memory stores + * + * @param record the record to ingest + */ + public boolean addRecord(final SearchQueryRecord record) { + boolean shouldAdd = false; + for (Map.Entry<MetricType, TopQueriesService> entry : topQueriesServices.entrySet()) { + if (!enableCollect.get(entry.getKey())) { + continue; + } + List<SearchQueryRecord> currentSnapshot = entry.getValue().getTopQueriesCurrentSnapshot(); + // skip add to top N queries store if the incoming record is smaller than the Nth record + if (currentSnapshot.size() < entry.getValue().getTopNSize() + || SearchQueryRecord.compare(record, currentSnapshot.get(0), entry.getKey()) > 0) { + shouldAdd = true; + break; + } + } + if (shouldAdd) { + return queryRecordsQueue.offer(record); + } + return false; + } + + /** + * Drain the queryRecordsQueue into internal stores and services + */ + public void drainRecords() { + final List<SearchQueryRecord> records = new ArrayList<>(); + queryRecordsQueue.drainTo(records); + records.sort(Comparator.comparingLong(SearchQueryRecord::getTimestamp)); + for (MetricType metricType : MetricType.allMetricTypes()) { + if (enableCollect.get(metricType)) { + // ingest the records into topQueriesService + topQueriesServices.get(metricType).consumeRecords(records); + } + } + } + + /** + * Get the top queries service based on metricType + * @param metricType {@link MetricType} + * @return {@link TopQueriesService} + */ + public TopQueriesService getTopQueriesService(final MetricType metricType) { + return topQueriesServices.get(metricType); + } + + /** + * Set flag to enable or disable Query Insights data collection + * + * @param metricType {@link MetricType} + * @param enable Flag to enable or disable Query Insights data collection + */ + public void enableCollection(final MetricType metricType, final boolean enable) { + this.enableCollect.put(metricType, enable); + this.topQueriesServices.get(metricType).setEnabled(enable); + } + + /** + * Get if the Query Insights data collection is enabled for a MetricType + * + * @param metricType {@link MetricType} + * @return if the Query Insights data collection is enabled + */ + public boolean isCollectionEnabled(final MetricType metricType) { + return this.enableCollect.get(metricType); + } + + /** + * Check if query insights service is enabled + * + * @return if query insights service is enabled + */ + public boolean isEnabled() { + for (MetricType t : MetricType.allMetricTypes()) { + if (isCollectionEnabled(t)) { + return true; + } + } + return false; + } + + @Override + protected void doStart() { + if (isEnabled()) { + scheduledFuture = threadPool.scheduleWithFixedDelay( + this::drainRecords, + QueryInsightsSettings.QUERY_RECORD_QUEUE_DRAIN_INTERVAL, + QueryInsightsSettings.QUERY_INSIGHTS_EXECUTOR + ); + } + } + + @Override + protected void doStop() { + if (scheduledFuture != null) { + scheduledFuture.cancel(); + } + } + + @Override + protected void doClose() {} +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/TopQueriesService.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/TopQueriesService.java new file mode 100644 index 0000000000000..d2c30cbdf98e7 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/TopQueriesService.java @@ -0,0 +1,282 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.core.service; + +import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; +import org.opensearch.plugin.insights.settings.QueryInsightsSettings; + +import java.time.Instant; +import java.time.LocalDateTime; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.temporal.ChronoUnit; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Locale; +import java.util.PriorityQueue; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * Service responsible for gathering and storing top N queries + * with high latency or resource usage + * + * @opensearch.internal + */ +public class TopQueriesService { + private boolean enabled; + /** + * The metric type to measure top n queries + */ + private final MetricType metricType; + private int topNSize; + /** + * The window size to keep the top n queries + */ + private TimeValue windowSize; + /** + * The current window start timestamp + */ + private long windowStart; + /** + * The internal thread-safe store that holds the top n queries insight data + */ + private final PriorityQueue<SearchQueryRecord> topQueriesStore; + + /** + * The AtomicReference of a snapshot of the current window top queries for getters to consume + */ + private final AtomicReference<List<SearchQueryRecord>> topQueriesCurrentSnapshot; + + /** + * The AtomicReference of a snapshot of the last window top queries for getters to consume + */ + private final AtomicReference<List<SearchQueryRecord>> topQueriesHistorySnapshot; + + TopQueriesService(final MetricType metricType) { + this.enabled = false; + this.metricType = metricType; + this.topNSize = QueryInsightsSettings.DEFAULT_TOP_N_SIZE; + this.windowSize = QueryInsightsSettings.DEFAULT_WINDOW_SIZE; + this.windowStart = -1L; + topQueriesStore = new PriorityQueue<>(topNSize, (a, b) -> SearchQueryRecord.compare(a, b, metricType)); + topQueriesCurrentSnapshot = new AtomicReference<>(new ArrayList<>()); + topQueriesHistorySnapshot = new AtomicReference<>(new ArrayList<>()); + } + + /** + * Set the top N size for TopQueriesService service. + * + * @param topNSize the top N size to set + */ + public void setTopNSize(final int topNSize) { + this.topNSize = topNSize; + } + + /** + * Get the current configured top n size + * + * @return top n size + */ + public int getTopNSize() { + return topNSize; + } + + /** + * Validate the top N size based on the internal constrains + * + * @param size the wanted top N size + */ + public void validateTopNSize(final int size) { + if (size > QueryInsightsSettings.MAX_N_SIZE) { + throw new IllegalArgumentException( + "Top N size setting for [" + + metricType + + "]" + + " should be smaller than max top N size [" + + QueryInsightsSettings.MAX_N_SIZE + + "was (" + + size + + " > " + + QueryInsightsSettings.MAX_N_SIZE + + ")" + ); + } + } + + /** + * Set enable flag for the service + * @param enabled boolean + */ + public void setEnabled(final boolean enabled) { + this.enabled = enabled; + } + + /** + * Set the window size for top N queries service + * + * @param windowSize window size to set + */ + public void setWindowSize(final TimeValue windowSize) { + this.windowSize = windowSize; + // reset the window start time since the window size has changed + this.windowStart = -1L; + } + + /** + * Validate if the window size is valid, based on internal constrains. + * + * @param windowSize the window size to validate + */ + public void validateWindowSize(final TimeValue windowSize) { + if (windowSize.compareTo(QueryInsightsSettings.MAX_WINDOW_SIZE) > 0 + || windowSize.compareTo(QueryInsightsSettings.MIN_WINDOW_SIZE) < 0) { + throw new IllegalArgumentException( + "Window size setting for [" + + metricType + + "]" + + " should be between [" + + QueryInsightsSettings.MIN_WINDOW_SIZE + + "," + + QueryInsightsSettings.MAX_WINDOW_SIZE + + "]" + + "was (" + + windowSize + + ")" + ); + } + if (!(QueryInsightsSettings.VALID_WINDOW_SIZES_IN_MINUTES.contains(windowSize) || windowSize.getMinutes() % 60 == 0)) { + throw new IllegalArgumentException( + "Window size setting for [" + + metricType + + "]" + + " should be multiple of 1 hour, or one of " + + QueryInsightsSettings.VALID_WINDOW_SIZES_IN_MINUTES + + ", was (" + + windowSize + + ")" + ); + } + } + + /** + * Get all top queries records that are in the current top n queries store + * Optionally include top N records from the last window. + * + * By default, return the records in sorted order. + * + * @param includeLastWindow if the top N queries from the last window should be included + * @return List of the records that are in the query insight store + * @throws IllegalArgumentException if query insight is disabled in the cluster + */ + public List<SearchQueryRecord> getTopQueriesRecords(final boolean includeLastWindow) throws IllegalArgumentException { + if (!enabled) { + throw new IllegalArgumentException( + String.format(Locale.ROOT, "Cannot get top n queries for [%s] when it is not enabled.", metricType.toString()) + ); + } + // read from window snapshots + final List<SearchQueryRecord> queries = new ArrayList<>(topQueriesCurrentSnapshot.get()); + if (includeLastWindow) { + queries.addAll(topQueriesHistorySnapshot.get()); + } + return Stream.of(queries) + .flatMap(Collection::stream) + .sorted((a, b) -> SearchQueryRecord.compare(a, b, metricType) * -1) + .collect(Collectors.toList()); + } + + /** + * Consume records to top queries stores + * + * @param records a list of {@link SearchQueryRecord} + */ + void consumeRecords(final List<SearchQueryRecord> records) { + final long currentWindowStart = calculateWindowStart(System.currentTimeMillis()); + List<SearchQueryRecord> recordsInLastWindow = new ArrayList<>(); + List<SearchQueryRecord> recordsInThisWindow = new ArrayList<>(); + for (SearchQueryRecord record : records) { + // skip the records that does not have the corresponding measurement + if (!record.getMeasurements().containsKey(metricType)) { + continue; + } + if (record.getTimestamp() < currentWindowStart) { + recordsInLastWindow.add(record); + } else { + recordsInThisWindow.add(record); + } + } + // add records in last window, if there are any, to the top n store + addToTopNStore(recordsInLastWindow); + // rotate window and reset window start if necessary + rotateWindowIfNecessary(currentWindowStart); + // add records in current window, if there are any, to the top n store + addToTopNStore(recordsInThisWindow); + // update the current window snapshot for getters to consume + final List<SearchQueryRecord> newSnapShot = new ArrayList<>(topQueriesStore); + newSnapShot.sort((a, b) -> SearchQueryRecord.compare(a, b, metricType)); + topQueriesCurrentSnapshot.set(newSnapShot); + } + + private void addToTopNStore(final List<SearchQueryRecord> records) { + topQueriesStore.addAll(records); + // remove top elements for fix sizing priority queue + while (topQueriesStore.size() > topNSize) { + topQueriesStore.poll(); + } + } + + /** + * Reset the current window and rotate the data to history snapshot for top n queries, + * This function would be invoked zero time or only once in each consumeRecords call + * + * @param newWindowStart the new windowStart to set to + */ + private void rotateWindowIfNecessary(final long newWindowStart) { + // reset window if the current window is outdated + if (windowStart < newWindowStart) { + final List<SearchQueryRecord> history = new ArrayList<>(); + // rotate the current window to history store only if the data belongs to the last window + if (windowStart == newWindowStart - windowSize.getMillis()) { + history.addAll(topQueriesStore); + } + topQueriesHistorySnapshot.set(history); + topQueriesStore.clear(); + topQueriesCurrentSnapshot.set(new ArrayList<>()); + windowStart = newWindowStart; + } + } + + /** + * Calculate the window start for the given timestamp + * + * @param timestamp the given timestamp to calculate window start + */ + private long calculateWindowStart(final long timestamp) { + final LocalDateTime currentTime = LocalDateTime.ofInstant(Instant.ofEpochMilli(timestamp), ZoneId.of("UTC")); + LocalDateTime windowStartTime = currentTime.truncatedTo(ChronoUnit.HOURS); + while (!windowStartTime.plusMinutes(windowSize.getMinutes()).isAfter(currentTime)) { + windowStartTime = windowStartTime.plusMinutes(windowSize.getMinutes()); + } + return windowStartTime.toInstant(ZoneOffset.UTC).getEpochSecond() * 1000; + } + + /** + * Get the current top queries snapshot from the AtomicReference. + * + * @return a list of {@link SearchQueryRecord} + */ + public List<SearchQueryRecord> getTopQueriesCurrentSnapshot() { + return topQueriesCurrentSnapshot.get(); + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/package-info.java new file mode 100644 index 0000000000000..5068f28234f6d --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Service Classes for Query Insights + */ +package org.opensearch.plugin.insights.core.service; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/package-info.java new file mode 100644 index 0000000000000..04d1f9bfff7e1 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Base Package of Query Insights + */ +package org.opensearch.plugin.insights; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/package-info.java new file mode 100644 index 0000000000000..9b6b5856f7d27 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Transport Actions, Requests and Responses for Query Insights + */ +package org.opensearch.plugin.insights.rules.action; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueries.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueries.java new file mode 100644 index 0000000000000..26cff82aae52e --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueries.java @@ -0,0 +1,77 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.action.top_queries; + +import org.opensearch.action.support.nodes.BaseNodeResponse; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; + +import java.io.IOException; +import java.util.List; + +/** + * Holds all top queries records by resource usage or latency on a node + * Mainly used in the top N queries node response workflow. + * + * @opensearch.internal + */ +public class TopQueries extends BaseNodeResponse implements ToXContentObject { + /** The store to keep the top queries records */ + private final List<SearchQueryRecord> topQueriesRecords; + + /** + * Create the TopQueries Object from StreamInput + * @param in A {@link StreamInput} object. + * @throws IOException IOException + */ + public TopQueries(final StreamInput in) throws IOException { + super(in); + topQueriesRecords = in.readList(SearchQueryRecord::new); + } + + /** + * Create the TopQueries Object + * @param node A node that is part of the cluster. + * @param searchQueryRecords A list of SearchQueryRecord associated in this TopQueries. + */ + public TopQueries(final DiscoveryNode node, final List<SearchQueryRecord> searchQueryRecords) { + super(node); + topQueriesRecords = searchQueryRecords; + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + if (topQueriesRecords != null) { + for (SearchQueryRecord record : topQueriesRecords) { + record.toXContent(builder, params); + } + } + return builder; + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(out); + out.writeList(topQueriesRecords); + + } + + /** + * Get all top queries records + * + * @return the top queries records in this node response + */ + public List<SearchQueryRecord> getTopQueriesRecord() { + return topQueriesRecords; + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesAction.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesAction.java new file mode 100644 index 0000000000000..b8ed69fa5692b --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesAction.java @@ -0,0 +1,32 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.action.top_queries; + +import org.opensearch.action.ActionType; + +/** + * Transport action for cluster/node level top queries information. + * + * @opensearch.internal + */ +public class TopQueriesAction extends ActionType<TopQueriesResponse> { + + /** + * The TopQueriesAction Instance. + */ + public static final TopQueriesAction INSTANCE = new TopQueriesAction(); + /** + * The name of this Action + */ + public static final String NAME = "cluster:admin/opensearch/insights/top_queries"; + + private TopQueriesAction() { + super(NAME, TopQueriesResponse::new); + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesRequest.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesRequest.java new file mode 100644 index 0000000000000..3bdff2c403161 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesRequest.java @@ -0,0 +1,62 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.action.top_queries; + +import org.opensearch.action.support.nodes.BaseNodesRequest; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.plugin.insights.rules.model.MetricType; + +import java.io.IOException; + +/** + * A request to get cluster/node level top queries information. + * + * @opensearch.internal + */ +public class TopQueriesRequest extends BaseNodesRequest<TopQueriesRequest> { + + final MetricType metricType; + + /** + * Constructor for TopQueriesRequest + * + * @param in A {@link StreamInput} object. + * @throws IOException if the stream cannot be deserialized. + */ + public TopQueriesRequest(final StreamInput in) throws IOException { + super(in); + this.metricType = MetricType.readFromStream(in); + } + + /** + * Get top queries from nodes based on the nodes ids specified. + * If none are passed, cluster level top queries will be returned. + * + * @param metricType {@link MetricType} + * @param nodesIds the nodeIds specified in the request + */ + public TopQueriesRequest(final MetricType metricType, final String... nodesIds) { + super(nodesIds); + this.metricType = metricType; + } + + /** + * Get the type of requested metrics + */ + public MetricType getMetricType() { + return metricType; + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(metricType.toString()); + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesResponse.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesResponse.java new file mode 100644 index 0000000000000..2e66bb7f77baf --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesResponse.java @@ -0,0 +1,143 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.action.top_queries; + +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.support.nodes.BaseNodesResponse; +import org.opensearch.cluster.ClusterName; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.plugin.insights.rules.model.Attribute; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; + +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.stream.Collectors; + +/** + * Transport response for cluster/node level top queries information. + * + * @opensearch.internal + */ +public class TopQueriesResponse extends BaseNodesResponse<TopQueries> implements ToXContentFragment { + + private static final String CLUSTER_LEVEL_RESULTS_KEY = "top_queries"; + private final MetricType metricType; + private final int top_n_size; + + /** + * Constructor for TopQueriesResponse. + * + * @param in A {@link StreamInput} object. + * @throws IOException if the stream cannot be deserialized. + */ + public TopQueriesResponse(final StreamInput in) throws IOException { + super(in); + top_n_size = in.readInt(); + metricType = in.readEnum(MetricType.class); + } + + /** + * Constructor for TopQueriesResponse + * + * @param clusterName The current cluster name + * @param nodes A list that contains top queries results from all nodes + * @param failures A list that contains FailedNodeException + * @param top_n_size The top N size to return to the user + * @param metricType the {@link MetricType} to be returned in this response + */ + public TopQueriesResponse( + final ClusterName clusterName, + final List<TopQueries> nodes, + final List<FailedNodeException> failures, + final int top_n_size, + final MetricType metricType + ) { + super(clusterName, nodes, failures); + this.top_n_size = top_n_size; + this.metricType = metricType; + } + + @Override + protected List<TopQueries> readNodesFrom(final StreamInput in) throws IOException { + return in.readList(TopQueries::new); + } + + @Override + protected void writeNodesTo(final StreamOutput out, final List<TopQueries> nodes) throws IOException { + out.writeList(nodes); + out.writeLong(top_n_size); + out.writeEnum(metricType); + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + final List<TopQueries> results = getNodes(); + postProcess(results); + builder.startObject(); + toClusterLevelResult(builder, params, results); + return builder.endObject(); + } + + @Override + public String toString() { + try { + final XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); + builder.startObject(); + this.toXContent(builder, EMPTY_PARAMS); + builder.endObject(); + return builder.toString(); + } catch (IOException e) { + return "{ \"error\" : \"" + e.getMessage() + "\"}"; + } + } + + /** + * Post process the top queries results to add customized attributes + * + * @param results the top queries results + */ + private void postProcess(final List<TopQueries> results) { + for (TopQueries topQueries : results) { + final String nodeId = topQueries.getNode().getId(); + for (SearchQueryRecord record : topQueries.getTopQueriesRecord()) { + record.addAttribute(Attribute.NODE_ID, nodeId); + } + } + } + + /** + * Merge top n queries results from nodes into cluster level results in XContent format. + * + * @param builder XContent builder + * @param params serialization parameters + * @param results top queries results from all nodes + * @throws IOException if an error occurs + */ + private void toClusterLevelResult(final XContentBuilder builder, final Params params, final List<TopQueries> results) + throws IOException { + final List<SearchQueryRecord> all_records = results.stream() + .map(TopQueries::getTopQueriesRecord) + .flatMap(Collection::stream) + .sorted((a, b) -> SearchQueryRecord.compare(a, b, metricType) * -1) + .limit(top_n_size) + .collect(Collectors.toList()); + builder.startArray(CLUSTER_LEVEL_RESULTS_KEY); + for (SearchQueryRecord record : all_records) { + record.toXContent(builder, params); + } + builder.endArray(); + } + +} diff --git a/server/src/main/java/org/opensearch/common/cache/store/enums/CacheStoreType.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/package-info.java similarity index 55% rename from server/src/main/java/org/opensearch/common/cache/store/enums/CacheStoreType.java rename to plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/package-info.java index 04c0825787b66..3cc7900e5ce7d 100644 --- a/server/src/main/java/org/opensearch/common/cache/store/enums/CacheStoreType.java +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/package-info.java @@ -6,15 +6,7 @@ * compatible open source license. */ -package org.opensearch.common.cache.store.enums; - /** - * Cache store types in tiered cache. - * - * @opensearch.internal + * Transport Actions, Requests and Responses for Top N Queries */ -public enum CacheStoreType { - - ON_HEAP, - DISK; -} +package org.opensearch.plugin.insights.rules.action.top_queries; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/Attribute.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/Attribute.java new file mode 100644 index 0000000000000..c1d17edf9ff14 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/Attribute.java @@ -0,0 +1,74 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.model; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.Locale; + +/** + * Valid attributes for a search query record + * + * @opensearch.internal + */ +public enum Attribute { + /** + * The search query type + */ + SEARCH_TYPE, + /** + * The search query source + */ + SOURCE, + /** + * Total shards queried + */ + TOTAL_SHARDS, + /** + * The indices involved + */ + INDICES, + /** + * The per phase level latency map for a search query + */ + PHASE_LATENCY_MAP, + /** + * The node id for this request + */ + NODE_ID; + + /** + * Read an Attribute from a StreamInput + * + * @param in the StreamInput to read from + * @return Attribute + * @throws IOException IOException + */ + static Attribute readFromStream(final StreamInput in) throws IOException { + return Attribute.valueOf(in.readString().toUpperCase(Locale.ROOT)); + } + + /** + * Write Attribute to a StreamOutput + * + * @param out the StreamOutput to write + * @param attribute the Attribute to write + * @throws IOException IOException + */ + static void writeTo(final StreamOutput out, final Attribute attribute) throws IOException { + out.writeString(attribute.toString()); + } + + @Override + public String toString() { + return this.name().toLowerCase(Locale.ROOT); + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/MetricType.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/MetricType.java new file mode 100644 index 0000000000000..cdd090fbf4804 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/MetricType.java @@ -0,0 +1,121 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.model; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Comparator; +import java.util.Locale; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * Valid metric types for a search query record + * + * @opensearch.internal + */ +public enum MetricType implements Comparator<Number> { + /** + * Latency metric type + */ + LATENCY, + /** + * CPU usage metric type + */ + CPU, + /** + * JVM heap usage metric type + */ + JVM; + + /** + * Read a MetricType from a StreamInput + * + * @param in the StreamInput to read from + * @return MetricType + * @throws IOException IOException + */ + public static MetricType readFromStream(final StreamInput in) throws IOException { + return fromString(in.readString()); + } + + /** + * Create MetricType from String + * + * @param metricType the String representation of MetricType + * @return MetricType + */ + public static MetricType fromString(final String metricType) { + return MetricType.valueOf(metricType.toUpperCase(Locale.ROOT)); + } + + /** + * Write MetricType to a StreamOutput + * + * @param out the StreamOutput to write + * @param metricType the MetricType to write + * @throws IOException IOException + */ + static void writeTo(final StreamOutput out, final MetricType metricType) throws IOException { + out.writeString(metricType.toString()); + } + + @Override + public String toString() { + return this.name().toLowerCase(Locale.ROOT); + } + + /** + * Get all valid metrics + * + * @return A set of String that contains all valid metrics + */ + public static Set<MetricType> allMetricTypes() { + return Arrays.stream(values()).collect(Collectors.toSet()); + } + + /** + * Compare two numbers based on the metric type + * + * @param a the first Number to be compared. + * @param b the second Number to be compared. + * @return a negative integer, zero, or a positive integer as the first argument is less than, equal to, or greater than the second + */ + public int compare(final Number a, final Number b) { + switch (this) { + case LATENCY: + return Long.compare(a.longValue(), b.longValue()); + case JVM: + case CPU: + return Double.compare(a.doubleValue(), b.doubleValue()); + } + return -1; + } + + /** + * Parse a value with the correct type based on MetricType + * + * @param o the generic object to parse + * @return {@link Number} + */ + Number parseValue(final Object o) { + switch (this) { + case LATENCY: + return (Long) o; + case JVM: + case CPU: + return (Double) o; + default: + return (Number) o; + } + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecord.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecord.java new file mode 100644 index 0000000000000..060711edb5580 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecord.java @@ -0,0 +1,176 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.model; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +/** + * SearchQueryRecord represents a minimal atomic record stored in the Query Insight Framework, + * which contains extensive information related to a search query. + * + * @opensearch.internal + */ +public class SearchQueryRecord implements ToXContentObject, Writeable { + private final long timestamp; + private final Map<MetricType, Number> measurements; + private final Map<Attribute, Object> attributes; + + /** + * Constructor of SearchQueryRecord + * + * @param in the StreamInput to read the SearchQueryRecord from + * @throws IOException IOException + * @throws ClassCastException ClassCastException + */ + public SearchQueryRecord(final StreamInput in) throws IOException, ClassCastException { + this.timestamp = in.readLong(); + measurements = new HashMap<>(); + in.readMap(MetricType::readFromStream, StreamInput::readGenericValue) + .forEach(((metricType, o) -> measurements.put(metricType, metricType.parseValue(o)))); + this.attributes = in.readMap(Attribute::readFromStream, StreamInput::readGenericValue); + } + + /** + * Constructor of SearchQueryRecord + * + * @param timestamp The timestamp of the query. + * @param measurements A list of Measurement associated with this query + * @param attributes A list of Attributes associated with this query + */ + public SearchQueryRecord(final long timestamp, Map<MetricType, Number> measurements, final Map<Attribute, Object> attributes) { + if (measurements == null) { + throw new IllegalArgumentException("Measurements cannot be null"); + } + this.measurements = measurements; + this.attributes = attributes; + this.timestamp = timestamp; + } + + /** + * Returns the observation time of the metric. + * + * @return the observation time in milliseconds + */ + public long getTimestamp() { + return timestamp; + } + + /** + * Returns the measurement associated with the specified name. + * + * @param name the name of the measurement + * @return the measurement object, or null if not found + */ + public Number getMeasurement(final MetricType name) { + return measurements.get(name); + } + + /** + * Returns a map of all the measurements associated with the metric. + * + * @return a map of measurement names to measurement objects + */ + public Map<MetricType, Number> getMeasurements() { + return measurements; + } + + /** + * Returns a map of the attributes associated with the metric. + * + * @return a map of attribute keys to attribute values + */ + public Map<Attribute, Object> getAttributes() { + return attributes; + } + + /** + * Add an attribute to this record + * + * @param attribute attribute to add + * @param value the value associated with the attribute + */ + public void addAttribute(final Attribute attribute, final Object value) { + attributes.put(attribute, value); + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final ToXContent.Params params) throws IOException { + builder.startObject(); + builder.field("timestamp", timestamp); + for (Map.Entry<Attribute, Object> entry : attributes.entrySet()) { + builder.field(entry.getKey().toString(), entry.getValue()); + } + for (Map.Entry<MetricType, Number> entry : measurements.entrySet()) { + builder.field(entry.getKey().toString(), entry.getValue()); + } + return builder.endObject(); + } + + /** + * Write a SearchQueryRecord to a StreamOutput + * + * @param out the StreamOutput to write + * @throws IOException IOException + */ + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeLong(timestamp); + out.writeMap(measurements, (stream, metricType) -> MetricType.writeTo(out, metricType), StreamOutput::writeGenericValue); + out.writeMap(attributes, (stream, attribute) -> Attribute.writeTo(out, attribute), StreamOutput::writeGenericValue); + } + + /** + * Compare two SearchQueryRecord, based on the given MetricType + * + * @param a the first SearchQueryRecord to compare + * @param b the second SearchQueryRecord to compare + * @param metricType the MetricType to compare on + * @return 0 if the first SearchQueryRecord is numerically equal to the second SearchQueryRecord; + * -1 if the first SearchQueryRecord is numerically less than the second SearchQueryRecord; + * 1 if the first SearchQueryRecord is numerically greater than the second SearchQueryRecord. + */ + public static int compare(final SearchQueryRecord a, final SearchQueryRecord b, final MetricType metricType) { + return metricType.compare(a.getMeasurement(metricType), b.getMeasurement(metricType)); + } + + /** + * Check if a SearchQueryRecord is deep equal to another record + * + * @param o the other SearchQueryRecord record + * @return true if two records are deep equal, false otherwise. + */ + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (!(o instanceof SearchQueryRecord)) { + return false; + } + final SearchQueryRecord other = (SearchQueryRecord) o; + return timestamp == other.getTimestamp() + && measurements.equals(other.getMeasurements()) + && attributes.size() == other.getAttributes().size(); + } + + @Override + public int hashCode() { + return Objects.hash(timestamp, measurements, attributes); + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/package-info.java new file mode 100644 index 0000000000000..c59ec1550f54b --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Data Models for Query Insight Records + */ +package org.opensearch.plugin.insights.rules.model; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/package-info.java new file mode 100644 index 0000000000000..3787f05f65552 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Rest Handlers for Query Insights + */ +package org.opensearch.plugin.insights.rules.resthandler; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/RestTopQueriesAction.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/RestTopQueriesAction.java new file mode 100644 index 0000000000000..6aa511c626ab1 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/RestTopQueriesAction.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.resthandler.top_queries; + +import org.opensearch.client.node.NodeClient; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.Strings; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesAction; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesRequest; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesResponse; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.BytesRestResponse; +import org.opensearch.rest.RestChannel; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.RestResponse; +import org.opensearch.rest.action.RestResponseListener; + +import java.util.List; +import java.util.Locale; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.TOP_QUERIES_BASE_URI; +import static org.opensearch.rest.RestRequest.Method.GET; + +/** + * Rest action to get Top N queries by certain metric type + * + * @opensearch.api + */ +public class RestTopQueriesAction extends BaseRestHandler { + /** The metric types that are allowed in top N queries */ + static final Set<String> ALLOWED_METRICS = MetricType.allMetricTypes().stream().map(MetricType::toString).collect(Collectors.toSet()); + + /** + * Constructor for RestTopQueriesAction + */ + public RestTopQueriesAction() {} + + @Override + public List<Route> routes() { + return List.of( + new Route(GET, TOP_QUERIES_BASE_URI), + new Route(GET, String.format(Locale.ROOT, "%s/{nodeId}", TOP_QUERIES_BASE_URI)) + ); + } + + @Override + public String getName() { + return "query_insights_top_queries_action"; + } + + @Override + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) { + final TopQueriesRequest topQueriesRequest = prepareRequest(request); + topQueriesRequest.timeout(request.param("timeout")); + + return channel -> client.execute(TopQueriesAction.INSTANCE, topQueriesRequest, topQueriesResponse(channel)); + } + + static TopQueriesRequest prepareRequest(final RestRequest request) { + final String[] nodesIds = Strings.splitStringByCommaToArray(request.param("nodeId")); + final String metricType = request.param("type", MetricType.LATENCY.toString()); + if (!ALLOWED_METRICS.contains(metricType)) { + throw new IllegalArgumentException( + String.format(Locale.ROOT, "request [%s] contains invalid metric type [%s]", request.path(), metricType) + ); + } + return new TopQueriesRequest(MetricType.fromString(metricType), nodesIds); + } + + @Override + protected Set<String> responseParams() { + return Settings.FORMAT_PARAMS; + } + + @Override + public boolean canTripCircuitBreaker() { + return false; + } + + private RestResponseListener<TopQueriesResponse> topQueriesResponse(final RestChannel channel) { + return new RestResponseListener<>(channel) { + @Override + public RestResponse buildResponse(final TopQueriesResponse response) throws Exception { + return new BytesRestResponse(RestStatus.OK, response.toXContent(channel.newBuilder(), ToXContent.EMPTY_PARAMS)); + } + }; + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/package-info.java new file mode 100644 index 0000000000000..087cf7d765f8c --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Rest Handlers for Top N Queries + */ +package org.opensearch.plugin.insights.rules.resthandler.top_queries; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/package-info.java new file mode 100644 index 0000000000000..f3a1c70b9af57 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Transport Actions for Query Insights. + */ +package org.opensearch.plugin.insights.rules.transport; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesAction.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesAction.java new file mode 100644 index 0000000000000..ddf614211bc41 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesAction.java @@ -0,0 +1,155 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.transport.top_queries; + +import org.opensearch.OpenSearchException; +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.nodes.TransportNodesAction; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.plugin.insights.core.service.QueryInsightsService; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueries; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesAction; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesRequest; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesResponse; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.plugin.insights.settings.QueryInsightsSettings; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.List; +import java.util.Locale; + +/** + * Transport action for cluster/node level top queries information. + * + * @opensearch.internal + */ +public class TransportTopQueriesAction extends TransportNodesAction< + TopQueriesRequest, + TopQueriesResponse, + TransportTopQueriesAction.NodeRequest, + TopQueries> { + + private final QueryInsightsService queryInsightsService; + + /** + * Create the TransportTopQueriesAction Object + + * @param threadPool The OpenSearch thread pool to run async tasks + * @param clusterService The clusterService of this node + * @param transportService The TransportService of this node + * @param queryInsightsService The topQueriesByLatencyService associated with this Transport Action + * @param actionFilters the action filters + */ + @Inject + public TransportTopQueriesAction( + final ThreadPool threadPool, + final ClusterService clusterService, + final TransportService transportService, + final QueryInsightsService queryInsightsService, + final ActionFilters actionFilters + ) { + super( + TopQueriesAction.NAME, + threadPool, + clusterService, + transportService, + actionFilters, + TopQueriesRequest::new, + NodeRequest::new, + ThreadPool.Names.GENERIC, + TopQueries.class + ); + this.queryInsightsService = queryInsightsService; + } + + @Override + protected TopQueriesResponse newResponse( + final TopQueriesRequest topQueriesRequest, + final List<TopQueries> responses, + final List<FailedNodeException> failures + ) { + if (topQueriesRequest.getMetricType() == MetricType.LATENCY) { + return new TopQueriesResponse( + clusterService.getClusterName(), + responses, + failures, + clusterService.getClusterSettings().get(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE), + MetricType.LATENCY + ); + } else { + throw new OpenSearchException(String.format(Locale.ROOT, "invalid metric type %s", topQueriesRequest.getMetricType())); + } + } + + @Override + protected NodeRequest newNodeRequest(final TopQueriesRequest request) { + return new NodeRequest(request); + } + + @Override + protected TopQueries newNodeResponse(final StreamInput in) throws IOException { + return new TopQueries(in); + } + + @Override + protected TopQueries nodeOperation(final NodeRequest nodeRequest) { + final TopQueriesRequest request = nodeRequest.request; + if (request.getMetricType() == MetricType.LATENCY) { + return new TopQueries( + clusterService.localNode(), + queryInsightsService.getTopQueriesService(MetricType.LATENCY).getTopQueriesRecords(true) + ); + } else { + throw new OpenSearchException(String.format(Locale.ROOT, "invalid metric type %s", request.getMetricType())); + } + + } + + /** + * Inner Node Top Queries Request + * + * @opensearch.internal + */ + public static class NodeRequest extends TransportRequest { + + final TopQueriesRequest request; + + /** + * Create the NodeResponse object from StreamInput + * + * @param in the StreamInput to read the object + * @throws IOException IOException + */ + public NodeRequest(StreamInput in) throws IOException { + super(in); + request = new TopQueriesRequest(in); + } + + /** + * Create the NodeResponse object from a TopQueriesRequest + * @param request the TopQueriesRequest object + */ + public NodeRequest(final TopQueriesRequest request) { + this.request = request; + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(out); + request.writeTo(out); + } + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/top_queries/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/top_queries/package-info.java new file mode 100644 index 0000000000000..54da0980deff8 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/top_queries/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Transport Actions for Top N Queries. + */ +package org.opensearch.plugin.insights.rules.transport.top_queries; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/settings/QueryInsightsSettings.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/settings/QueryInsightsSettings.java new file mode 100644 index 0000000000000..52cc1fbde790f --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/settings/QueryInsightsSettings.java @@ -0,0 +1,116 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.settings; + +import org.opensearch.common.settings.Setting; +import org.opensearch.common.unit.TimeValue; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +/** + * Settings for Query Insights Plugin + * + * @opensearch.api + * @opensearch.experimental + */ +public class QueryInsightsSettings { + /** + * Executors settings + */ + public static final String QUERY_INSIGHTS_EXECUTOR = "query_insights_executor"; + /** + * Max number of thread + */ + public static final int MAX_THREAD_COUNT = 5; + /** + * Max number of requests for the consumer to collect at one time + */ + public static final int QUERY_RECORD_QUEUE_CAPACITY = 1000; + /** + * Time interval for record queue consumer to run + */ + public static final TimeValue QUERY_RECORD_QUEUE_DRAIN_INTERVAL = new TimeValue(5, TimeUnit.SECONDS); + /** + * Default Values and Settings + */ + public static final TimeValue MAX_WINDOW_SIZE = new TimeValue(1, TimeUnit.DAYS); + /** + * Minimal window size + */ + public static final TimeValue MIN_WINDOW_SIZE = new TimeValue(1, TimeUnit.MINUTES); + /** + * Valid window sizes + */ + public static final Set<TimeValue> VALID_WINDOW_SIZES_IN_MINUTES = new HashSet<>( + Arrays.asList( + new TimeValue(1, TimeUnit.MINUTES), + new TimeValue(5, TimeUnit.MINUTES), + new TimeValue(10, TimeUnit.MINUTES), + new TimeValue(30, TimeUnit.MINUTES) + ) + ); + + /** Default N size for top N queries */ + public static final int MAX_N_SIZE = 100; + /** Default window size in seconds to keep the top N queries with latency data in query insight store */ + public static final TimeValue DEFAULT_WINDOW_SIZE = new TimeValue(60, TimeUnit.SECONDS); + /** Default top N size to keep the data in query insight store */ + public static final int DEFAULT_TOP_N_SIZE = 3; + /** + * Query Insights base uri + */ + public static final String PLUGINS_BASE_URI = "/_insights"; + + /** + * Settings for Top Queries + * + */ + public static final String TOP_QUERIES_BASE_URI = PLUGINS_BASE_URI + "/top_queries"; + /** Default prefix for top N queries feature */ + public static final String TOP_N_QUERIES_SETTING_PREFIX = "search.insights.top_queries"; + /** Default prefix for top N queries by latency feature */ + public static final String TOP_N_LATENCY_QUERIES_PREFIX = TOP_N_QUERIES_SETTING_PREFIX + ".latency"; + /** + * Boolean setting for enabling top queries by latency. + */ + public static final Setting<Boolean> TOP_N_LATENCY_QUERIES_ENABLED = Setting.boolSetting( + TOP_N_LATENCY_QUERIES_PREFIX + ".enabled", + false, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Int setting to define the top n size for top queries by latency. + */ + public static final Setting<Integer> TOP_N_LATENCY_QUERIES_SIZE = Setting.intSetting( + TOP_N_LATENCY_QUERIES_PREFIX + ".top_n_size", + DEFAULT_TOP_N_SIZE, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Time setting to define the window size in seconds for top queries by latency. + */ + public static final Setting<TimeValue> TOP_N_LATENCY_QUERIES_WINDOW_SIZE = Setting.positiveTimeSetting( + TOP_N_LATENCY_QUERIES_PREFIX + ".window_size", + DEFAULT_WINDOW_SIZE, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + /** + * Default constructor + */ + public QueryInsightsSettings() {} +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/settings/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/settings/package-info.java new file mode 100644 index 0000000000000..f3152bbf966cb --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/settings/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Settings for Query Insights Plugin + */ +package org.opensearch.plugin.insights.settings; diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsPluginTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsPluginTests.java new file mode 100644 index 0000000000000..273b69e483e8c --- /dev/null +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsPluginTests.java @@ -0,0 +1,107 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights; + +import org.opensearch.action.ActionRequest; +import org.opensearch.client.Client; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.plugin.insights.core.listener.QueryInsightsListener; +import org.opensearch.plugin.insights.core.service.QueryInsightsService; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesAction; +import org.opensearch.plugin.insights.rules.resthandler.top_queries.RestTopQueriesAction; +import org.opensearch.plugin.insights.settings.QueryInsightsSettings; +import org.opensearch.plugins.ActionPlugin; +import org.opensearch.rest.RestHandler; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ExecutorBuilder; +import org.opensearch.threadpool.ScalingExecutorBuilder; +import org.opensearch.threadpool.ThreadPool; +import org.junit.Before; + +import java.util.Arrays; +import java.util.List; + +import static org.mockito.Mockito.mock; + +public class QueryInsightsPluginTests extends OpenSearchTestCase { + + private QueryInsightsPlugin queryInsightsPlugin; + + private final Client client = mock(Client.class); + private ClusterService clusterService; + private final ThreadPool threadPool = mock(ThreadPool.class); + + @Before + public void setup() { + queryInsightsPlugin = new QueryInsightsPlugin(); + Settings.Builder settingsBuilder = Settings.builder(); + Settings settings = settingsBuilder.build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED); + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE); + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE); + + clusterService = new ClusterService(settings, clusterSettings, threadPool); + + } + + public void testGetSettings() { + assertEquals( + Arrays.asList( + QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED, + QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE, + QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE + ), + queryInsightsPlugin.getSettings() + ); + } + + public void testCreateComponent() { + List<Object> components = (List<Object>) queryInsightsPlugin.createComponents( + client, + clusterService, + threadPool, + null, + null, + null, + null, + null, + null, + null, + null + ); + assertEquals(2, components.size()); + assertTrue(components.get(0) instanceof QueryInsightsService); + assertTrue(components.get(1) instanceof QueryInsightsListener); + } + + public void testGetExecutorBuilders() { + Settings.Builder settingsBuilder = Settings.builder(); + Settings settings = settingsBuilder.build(); + List<ExecutorBuilder<?>> executorBuilders = queryInsightsPlugin.getExecutorBuilders(settings); + assertEquals(1, executorBuilders.size()); + assertTrue(executorBuilders.get(0) instanceof ScalingExecutorBuilder); + } + + public void testGetRestHandlers() { + List<RestHandler> components = queryInsightsPlugin.getRestHandlers(Settings.EMPTY, null, null, null, null, null, null); + assertEquals(1, components.size()); + assertTrue(components.get(0) instanceof RestTopQueriesAction); + } + + public void testGetActions() { + List<ActionPlugin.ActionHandler<? extends ActionRequest, ? extends ActionResponse>> components = queryInsightsPlugin.getActions(); + assertEquals(1, components.size()); + assertTrue(components.get(0).getAction() instanceof TopQueriesAction); + } + +} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsTestUtils.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsTestUtils.java new file mode 100644 index 0000000000000..870ef5b9c8be9 --- /dev/null +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsTestUtils.java @@ -0,0 +1,189 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights; + +import org.opensearch.action.search.SearchType; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.util.Maps; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueries; +import org.opensearch.plugin.insights.rules.model.Attribute; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; +import org.opensearch.test.VersionUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.test.OpenSearchTestCase.buildNewFakeTransportAddress; +import static org.opensearch.test.OpenSearchTestCase.random; +import static org.opensearch.test.OpenSearchTestCase.randomAlphaOfLengthBetween; +import static org.opensearch.test.OpenSearchTestCase.randomArray; +import static org.opensearch.test.OpenSearchTestCase.randomDouble; +import static org.opensearch.test.OpenSearchTestCase.randomIntBetween; +import static org.opensearch.test.OpenSearchTestCase.randomLong; +import static org.opensearch.test.OpenSearchTestCase.randomLongBetween; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +final public class QueryInsightsTestUtils { + + public QueryInsightsTestUtils() {} + + public static List<SearchQueryRecord> generateQueryInsightRecords(int count) { + return generateQueryInsightRecords(count, count, System.currentTimeMillis(), 0); + } + + /** + * Creates a List of random Query Insight Records for testing purpose + */ + public static List<SearchQueryRecord> generateQueryInsightRecords(int lower, int upper, long startTimeStamp, long interval) { + List<SearchQueryRecord> records = new ArrayList<>(); + int countOfRecords = randomIntBetween(lower, upper); + long timestamp = startTimeStamp; + for (int i = 0; i < countOfRecords; ++i) { + Map<MetricType, Number> measurements = Map.of( + MetricType.LATENCY, + randomLongBetween(1000, 10000), + MetricType.CPU, + randomDouble(), + MetricType.JVM, + randomDouble() + ); + + Map<String, Long> phaseLatencyMap = new HashMap<>(); + int countOfPhases = randomIntBetween(2, 5); + for (int j = 0; j < countOfPhases; ++j) { + phaseLatencyMap.put(randomAlphaOfLengthBetween(5, 10), randomLong()); + } + Map<Attribute, Object> attributes = new HashMap<>(); + attributes.put(Attribute.SEARCH_TYPE, SearchType.QUERY_THEN_FETCH.toString().toLowerCase(Locale.ROOT)); + attributes.put(Attribute.SOURCE, "{\"size\":20}"); + attributes.put(Attribute.TOTAL_SHARDS, randomIntBetween(1, 100)); + attributes.put(Attribute.INDICES, randomArray(1, 3, Object[]::new, () -> randomAlphaOfLengthBetween(5, 10))); + attributes.put(Attribute.PHASE_LATENCY_MAP, phaseLatencyMap); + + records.add(new SearchQueryRecord(timestamp, measurements, attributes)); + timestamp += interval; + } + return records; + } + + public static TopQueries createRandomTopQueries() { + DiscoveryNode node = new DiscoveryNode( + "node_for_top_queries_test", + buildNewFakeTransportAddress(), + emptyMap(), + emptySet(), + VersionUtils.randomVersion(random()) + ); + List<SearchQueryRecord> records = generateQueryInsightRecords(10); + + return new TopQueries(node, records); + } + + public static TopQueries createFixedTopQueries() { + DiscoveryNode node = new DiscoveryNode( + "node_for_top_queries_test", + buildNewFakeTransportAddress(), + emptyMap(), + emptySet(), + VersionUtils.randomVersion(random()) + ); + List<SearchQueryRecord> records = new ArrayList<>(); + records.add(createFixedSearchQueryRecord()); + + return new TopQueries(node, records); + } + + public static SearchQueryRecord createFixedSearchQueryRecord() { + long timestamp = 1706574180000L; + Map<MetricType, Number> measurements = Map.of(MetricType.LATENCY, 1L); + + Map<String, Long> phaseLatencyMap = new HashMap<>(); + Map<Attribute, Object> attributes = new HashMap<>(); + attributes.put(Attribute.SEARCH_TYPE, SearchType.QUERY_THEN_FETCH.toString().toLowerCase(Locale.ROOT)); + + return new SearchQueryRecord(timestamp, measurements, attributes); + } + + public static void compareJson(ToXContent param1, ToXContent param2) throws IOException { + if (param1 == null || param2 == null) { + assertNull(param1); + assertNull(param2); + return; + } + + ToXContent.Params params = ToXContent.EMPTY_PARAMS; + XContentBuilder param1Builder = jsonBuilder(); + param1.toXContent(param1Builder, params); + + XContentBuilder param2Builder = jsonBuilder(); + param2.toXContent(param2Builder, params); + + assertEquals(param1Builder.toString(), param2Builder.toString()); + } + + @SuppressWarnings("unchecked") + public static boolean checkRecordsEquals(List<SearchQueryRecord> records1, List<SearchQueryRecord> records2) { + if (records1.size() != records2.size()) { + return false; + } + for (int i = 0; i < records1.size(); i++) { + if (!records1.get(i).equals(records2.get(i))) { + return false; + } + Map<Attribute, Object> attributes1 = records1.get(i).getAttributes(); + Map<Attribute, Object> attributes2 = records2.get(i).getAttributes(); + for (Map.Entry<Attribute, Object> entry : attributes1.entrySet()) { + Attribute attribute = entry.getKey(); + Object value = entry.getValue(); + if (!attributes2.containsKey(attribute)) { + return false; + } + if (value instanceof Object[] && !Arrays.deepEquals((Object[]) value, (Object[]) attributes2.get(attribute))) { + return false; + } else if (value instanceof Map + && !Maps.deepEquals((Map<Object, Object>) value, (Map<Object, Object>) attributes2.get(attribute))) { + return false; + } + } + } + return true; + } + + public static boolean checkRecordsEqualsWithoutOrder( + List<SearchQueryRecord> records1, + List<SearchQueryRecord> records2, + MetricType metricType + ) { + Set<SearchQueryRecord> set2 = new TreeSet<>((a, b) -> SearchQueryRecord.compare(a, b, metricType)); + set2.addAll(records2); + if (records1.size() != records2.size()) { + return false; + } + for (int i = 0; i < records1.size(); i++) { + if (!set2.contains(records1.get(i))) { + return false; + } + } + return true; + } +} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListenerTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListenerTests.java new file mode 100644 index 0000000000000..f340950017a5c --- /dev/null +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListenerTests.java @@ -0,0 +1,161 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.core.listener; + +import org.opensearch.action.search.SearchPhaseContext; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchRequestContext; +import org.opensearch.action.search.SearchType; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.plugin.insights.core.service.QueryInsightsService; +import org.opensearch.plugin.insights.core.service.TopQueriesService; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.plugin.insights.settings.QueryInsightsSettings; +import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.opensearch.search.aggregations.support.ValueType; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Phaser; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +/** + * Unit Tests for {@link QueryInsightsListener}. + */ +public class QueryInsightsListenerTests extends OpenSearchTestCase { + private final SearchRequestContext searchRequestContext = mock(SearchRequestContext.class); + private final SearchPhaseContext searchPhaseContext = mock(SearchPhaseContext.class); + private final SearchRequest searchRequest = mock(SearchRequest.class); + private final QueryInsightsService queryInsightsService = mock(QueryInsightsService.class); + private final TopQueriesService topQueriesService = mock(TopQueriesService.class); + private ClusterService clusterService; + + @Before + public void setup() { + Settings.Builder settingsBuilder = Settings.builder(); + Settings settings = settingsBuilder.build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED); + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE); + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE); + clusterService = new ClusterService(settings, clusterSettings, null); + when(queryInsightsService.isCollectionEnabled(MetricType.LATENCY)).thenReturn(true); + when(queryInsightsService.getTopQueriesService(MetricType.LATENCY)).thenReturn(topQueriesService); + } + + public void testOnRequestEnd() throws InterruptedException { + Long timestamp = System.currentTimeMillis() - 100L; + SearchType searchType = SearchType.QUERY_THEN_FETCH; + + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + searchSourceBuilder.aggregation(new TermsAggregationBuilder("agg1").userValueTypeHint(ValueType.STRING).field("type.keyword")); + searchSourceBuilder.size(0); + + String[] indices = new String[] { "index-1", "index-2" }; + + Map<String, Long> phaseLatencyMap = new HashMap<>(); + phaseLatencyMap.put("expand", 0L); + phaseLatencyMap.put("query", 20L); + phaseLatencyMap.put("fetch", 1L); + + int numberOfShards = 10; + + QueryInsightsListener queryInsightsListener = new QueryInsightsListener(clusterService, queryInsightsService); + + when(searchRequest.getOrCreateAbsoluteStartMillis()).thenReturn(timestamp); + when(searchRequest.searchType()).thenReturn(searchType); + when(searchRequest.source()).thenReturn(searchSourceBuilder); + when(searchRequest.indices()).thenReturn(indices); + when(searchRequestContext.phaseTookMap()).thenReturn(phaseLatencyMap); + when(searchPhaseContext.getRequest()).thenReturn(searchRequest); + when(searchPhaseContext.getNumShards()).thenReturn(numberOfShards); + + queryInsightsListener.onRequestEnd(searchPhaseContext, searchRequestContext); + + verify(queryInsightsService, times(1)).addRecord(any()); + } + + public void testConcurrentOnRequestEnd() throws InterruptedException { + Long timestamp = System.currentTimeMillis() - 100L; + SearchType searchType = SearchType.QUERY_THEN_FETCH; + + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + searchSourceBuilder.aggregation(new TermsAggregationBuilder("agg1").userValueTypeHint(ValueType.STRING).field("type.keyword")); + searchSourceBuilder.size(0); + + String[] indices = new String[] { "index-1", "index-2" }; + + Map<String, Long> phaseLatencyMap = new HashMap<>(); + phaseLatencyMap.put("expand", 0L); + phaseLatencyMap.put("query", 20L); + phaseLatencyMap.put("fetch", 1L); + + int numberOfShards = 10; + + final List<QueryInsightsListener> searchListenersList = new ArrayList<>(); + + when(searchRequest.getOrCreateAbsoluteStartMillis()).thenReturn(timestamp); + when(searchRequest.searchType()).thenReturn(searchType); + when(searchRequest.source()).thenReturn(searchSourceBuilder); + when(searchRequest.indices()).thenReturn(indices); + when(searchRequestContext.phaseTookMap()).thenReturn(phaseLatencyMap); + when(searchPhaseContext.getRequest()).thenReturn(searchRequest); + when(searchPhaseContext.getNumShards()).thenReturn(numberOfShards); + + int numRequests = 50; + Thread[] threads = new Thread[numRequests]; + Phaser phaser = new Phaser(numRequests + 1); + CountDownLatch countDownLatch = new CountDownLatch(numRequests); + + for (int i = 0; i < numRequests; i++) { + searchListenersList.add(new QueryInsightsListener(clusterService, queryInsightsService)); + } + + for (int i = 0; i < numRequests; i++) { + int finalI = i; + threads[i] = new Thread(() -> { + phaser.arriveAndAwaitAdvance(); + QueryInsightsListener thisListener = searchListenersList.get(finalI); + thisListener.onRequestEnd(searchPhaseContext, searchRequestContext); + countDownLatch.countDown(); + }); + threads[i].start(); + } + phaser.arriveAndAwaitAdvance(); + countDownLatch.await(); + + verify(queryInsightsService, times(numRequests)).addRecord(any()); + } + + public void testSetEnabled() { + when(queryInsightsService.isCollectionEnabled(MetricType.LATENCY)).thenReturn(true); + QueryInsightsListener queryInsightsListener = new QueryInsightsListener(clusterService, queryInsightsService); + queryInsightsListener.setEnableTopQueries(MetricType.LATENCY, true); + assertTrue(queryInsightsListener.isEnabled()); + + when(queryInsightsService.isCollectionEnabled(MetricType.LATENCY)).thenReturn(false); + when(queryInsightsService.isCollectionEnabled(MetricType.CPU)).thenReturn(false); + when(queryInsightsService.isCollectionEnabled(MetricType.JVM)).thenReturn(false); + queryInsightsListener.setEnableTopQueries(MetricType.LATENCY, false); + assertFalse(queryInsightsListener.isEnabled()); + } +} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/QueryInsightsServiceTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/QueryInsightsServiceTests.java new file mode 100644 index 0000000000000..c29b48b9690d1 --- /dev/null +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/QueryInsightsServiceTests.java @@ -0,0 +1,49 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.core.service; + +import org.opensearch.plugin.insights.QueryInsightsTestUtils; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; +import org.opensearch.plugin.insights.settings.QueryInsightsSettings; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ThreadPool; +import org.junit.Before; + +import static org.mockito.Mockito.mock; + +/** + * Unit Tests for {@link QueryInsightsService}. + */ +public class QueryInsightsServiceTests extends OpenSearchTestCase { + private final ThreadPool threadPool = mock(ThreadPool.class); + private QueryInsightsService queryInsightsService; + + @Before + public void setup() { + queryInsightsService = new QueryInsightsService(threadPool); + queryInsightsService.enableCollection(MetricType.LATENCY, true); + queryInsightsService.enableCollection(MetricType.CPU, true); + queryInsightsService.enableCollection(MetricType.JVM, true); + } + + public void testAddRecordToLimitAndDrain() { + SearchQueryRecord record = QueryInsightsTestUtils.generateQueryInsightRecords(1, 1, System.currentTimeMillis(), 0).get(0); + for (int i = 0; i < QueryInsightsSettings.QUERY_RECORD_QUEUE_CAPACITY; i++) { + assertTrue(queryInsightsService.addRecord(record)); + } + // exceed capacity + assertFalse(queryInsightsService.addRecord(record)); + queryInsightsService.drainRecords(); + assertEquals( + QueryInsightsSettings.DEFAULT_TOP_N_SIZE, + queryInsightsService.getTopQueriesService(MetricType.LATENCY).getTopQueriesRecords(false).size() + ); + } +} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/TopQueriesServiceTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/TopQueriesServiceTests.java new file mode 100644 index 0000000000000..060df84a89485 --- /dev/null +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/TopQueriesServiceTests.java @@ -0,0 +1,102 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.core.service; + +import org.opensearch.cluster.coordination.DeterministicTaskQueue; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugin.insights.QueryInsightsTestUtils; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; +import org.opensearch.plugin.insights.settings.QueryInsightsSettings; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +/** + * Unit Tests for {@link QueryInsightsService}. + */ +public class TopQueriesServiceTests extends OpenSearchTestCase { + private TopQueriesService topQueriesService; + + @Before + public void setup() { + topQueriesService = new TopQueriesService(MetricType.LATENCY); + topQueriesService.setTopNSize(Integer.MAX_VALUE); + topQueriesService.setWindowSize(new TimeValue(Long.MAX_VALUE)); + topQueriesService.setEnabled(true); + } + + public void testIngestQueryDataWithLargeWindow() { + final List<SearchQueryRecord> records = QueryInsightsTestUtils.generateQueryInsightRecords(10); + topQueriesService.consumeRecords(records); + assertTrue( + QueryInsightsTestUtils.checkRecordsEqualsWithoutOrder( + topQueriesService.getTopQueriesRecords(false), + records, + MetricType.LATENCY + ) + ); + } + + public void testRollingWindows() { + List<SearchQueryRecord> records; + // Create 5 records at Now - 10 minutes to make sure they belong to the last window + records = QueryInsightsTestUtils.generateQueryInsightRecords(5, 5, System.currentTimeMillis() - 1000 * 60 * 10, 0); + topQueriesService.setWindowSize(TimeValue.timeValueMinutes(10)); + topQueriesService.consumeRecords(records); + assertEquals(0, topQueriesService.getTopQueriesRecords(true).size()); + + // Create 10 records at now + 1 minute, to make sure they belong to the current window + records = QueryInsightsTestUtils.generateQueryInsightRecords(10, 10, System.currentTimeMillis() + 1000 * 60, 0); + topQueriesService.setWindowSize(TimeValue.timeValueMinutes(10)); + topQueriesService.consumeRecords(records); + assertEquals(10, topQueriesService.getTopQueriesRecords(true).size()); + } + + public void testSmallNSize() { + final List<SearchQueryRecord> records = QueryInsightsTestUtils.generateQueryInsightRecords(10); + topQueriesService.setTopNSize(1); + topQueriesService.consumeRecords(records); + assertEquals(1, topQueriesService.getTopQueriesRecords(false).size()); + } + + public void testValidateTopNSize() { + assertThrows(IllegalArgumentException.class, () -> { topQueriesService.validateTopNSize(QueryInsightsSettings.MAX_N_SIZE + 1); }); + } + + public void testGetTopQueriesWhenNotEnabled() { + topQueriesService.setEnabled(false); + assertThrows(IllegalArgumentException.class, () -> { topQueriesService.getTopQueriesRecords(false); }); + } + + public void testValidateWindowSize() { + assertThrows(IllegalArgumentException.class, () -> { + topQueriesService.validateWindowSize(new TimeValue(QueryInsightsSettings.MAX_WINDOW_SIZE.getSeconds() + 1, TimeUnit.SECONDS)); + }); + assertThrows(IllegalArgumentException.class, () -> { + topQueriesService.validateWindowSize(new TimeValue(QueryInsightsSettings.MIN_WINDOW_SIZE.getSeconds() - 1, TimeUnit.SECONDS)); + }); + assertThrows(IllegalArgumentException.class, () -> { topQueriesService.validateWindowSize(new TimeValue(2, TimeUnit.DAYS)); }); + assertThrows(IllegalArgumentException.class, () -> { topQueriesService.validateWindowSize(new TimeValue(7, TimeUnit.MINUTES)); }); + } + + private static void runUntilTimeoutOrFinish(DeterministicTaskQueue deterministicTaskQueue, long duration) { + final long endTime = deterministicTaskQueue.getCurrentTimeMillis() + duration; + while (deterministicTaskQueue.getCurrentTimeMillis() < endTime + && (deterministicTaskQueue.hasRunnableTasks() || deterministicTaskQueue.hasDeferredTasks())) { + if (deterministicTaskQueue.hasDeferredTasks() && randomBoolean()) { + deterministicTaskQueue.advanceTime(); + } else if (deterministicTaskQueue.hasRunnableTasks()) { + deterministicTaskQueue.runRandomTask(); + } + } + } +} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesRequestTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesRequestTests.java new file mode 100644 index 0000000000000..619fd4b33a3dc --- /dev/null +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesRequestTests.java @@ -0,0 +1,43 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.action.top_queries; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.test.OpenSearchTestCase; + +/** + * Granular tests for the {@link TopQueriesRequest} class. + */ +public class TopQueriesRequestTests extends OpenSearchTestCase { + + /** + * Check that we can set the metric type + */ + public void testSetMetricType() throws Exception { + TopQueriesRequest request = new TopQueriesRequest(MetricType.LATENCY, randomAlphaOfLength(5)); + TopQueriesRequest deserializedRequest = roundTripRequest(request); + assertEquals(request.getMetricType(), deserializedRequest.getMetricType()); + } + + /** + * Serialize and deserialize a request. + * @param request A request to serialize. + * @return The deserialized, "round-tripped" request. + */ + private static TopQueriesRequest roundTripRequest(TopQueriesRequest request) throws Exception { + try (BytesStreamOutput out = new BytesStreamOutput()) { + request.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + return new TopQueriesRequest(in); + } + } + } +} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesResponseTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesResponseTests.java new file mode 100644 index 0000000000000..eeee50d3da703 --- /dev/null +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesResponseTests.java @@ -0,0 +1,71 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.action.top_queries; + +import org.opensearch.cluster.ClusterName; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.plugin.insights.QueryInsightsTestUtils; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +/** + * Granular tests for the {@link TopQueriesResponse} class. + */ +public class TopQueriesResponseTests extends OpenSearchTestCase { + + /** + * Check serialization and deserialization + */ + public void testSerialize() throws Exception { + TopQueries topQueries = QueryInsightsTestUtils.createRandomTopQueries(); + ClusterName clusterName = new ClusterName("test-cluster"); + TopQueriesResponse response = new TopQueriesResponse(clusterName, List.of(topQueries), new ArrayList<>(), 10, MetricType.LATENCY); + TopQueriesResponse deserializedResponse = roundTripResponse(response); + assertEquals(response.toString(), deserializedResponse.toString()); + } + + public void testToXContent() throws IOException { + char[] expectedXcontent = + "{\"top_queries\":[{\"timestamp\":1706574180000,\"node_id\":\"node_for_top_queries_test\",\"search_type\":\"query_then_fetch\",\"latency\":1}]}" + .toCharArray(); + TopQueries topQueries = QueryInsightsTestUtils.createFixedTopQueries(); + ClusterName clusterName = new ClusterName("test-cluster"); + TopQueriesResponse response = new TopQueriesResponse(clusterName, List.of(topQueries), new ArrayList<>(), 10, MetricType.LATENCY); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); + char[] xContent = BytesReference.bytes(response.toXContent(builder, ToXContent.EMPTY_PARAMS)).utf8ToString().toCharArray(); + Arrays.sort(expectedXcontent); + Arrays.sort(xContent); + + assertEquals(Arrays.hashCode(expectedXcontent), Arrays.hashCode(xContent)); + } + + /** + * Serialize and deserialize a TopQueriesResponse. + * @param response A response to serialize. + * @return The deserialized, "round-tripped" response. + */ + private static TopQueriesResponse roundTripResponse(TopQueriesResponse response) throws Exception { + try (BytesStreamOutput out = new BytesStreamOutput()) { + response.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + return new TopQueriesResponse(in); + } + } + } +} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesTests.java new file mode 100644 index 0000000000000..7db08b53ad1df --- /dev/null +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesTests.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.action.top_queries; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.plugin.insights.QueryInsightsTestUtils; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +/** + * Tests for {@link TopQueries}. + */ +public class TopQueriesTests extends OpenSearchTestCase { + + public void testTopQueries() throws IOException { + TopQueries topQueries = QueryInsightsTestUtils.createRandomTopQueries(); + try (BytesStreamOutput out = new BytesStreamOutput()) { + topQueries.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + TopQueries readTopQueries = new TopQueries(in); + assertTrue( + QueryInsightsTestUtils.checkRecordsEquals(topQueries.getTopQueriesRecord(), readTopQueries.getTopQueriesRecord()) + ); + } + } + } +} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecordTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecordTests.java new file mode 100644 index 0000000000000..793d5878e2300 --- /dev/null +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecordTests.java @@ -0,0 +1,71 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.model; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.plugin.insights.QueryInsightsTestUtils; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +/** + * Granular tests for the {@link SearchQueryRecord} class. + */ +public class SearchQueryRecordTests extends OpenSearchTestCase { + + /** + * Check that if the serialization, deserialization and equals functions are working as expected + */ + public void testSerializationAndEquals() throws Exception { + List<SearchQueryRecord> records = QueryInsightsTestUtils.generateQueryInsightRecords(10); + List<SearchQueryRecord> copiedRecords = new ArrayList<>(); + for (SearchQueryRecord record : records) { + copiedRecords.add(roundTripRecord(record)); + } + assertTrue(QueryInsightsTestUtils.checkRecordsEquals(records, copiedRecords)); + + } + + public void testAllMetricTypes() { + Set<MetricType> allMetrics = MetricType.allMetricTypes(); + Set<MetricType> expected = new HashSet<>(Arrays.asList(MetricType.LATENCY, MetricType.CPU, MetricType.JVM)); + assertEquals(expected, allMetrics); + } + + public void testCompare() { + SearchQueryRecord record1 = QueryInsightsTestUtils.createFixedSearchQueryRecord(); + SearchQueryRecord record2 = QueryInsightsTestUtils.createFixedSearchQueryRecord(); + assertEquals(0, SearchQueryRecord.compare(record1, record2, MetricType.LATENCY)); + } + + public void testEqual() { + SearchQueryRecord record1 = QueryInsightsTestUtils.createFixedSearchQueryRecord(); + SearchQueryRecord record2 = QueryInsightsTestUtils.createFixedSearchQueryRecord(); + assertEquals(record1, record2); + } + + /** + * Serialize and deserialize a SearchQueryRecord. + * @param record A SearchQueryRecord to serialize. + * @return The deserialized, "round-tripped" record. + */ + private static SearchQueryRecord roundTripRecord(SearchQueryRecord record) throws Exception { + try (BytesStreamOutput out = new BytesStreamOutput()) { + record.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + return new SearchQueryRecord(in); + } + } + } +} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/RestTopQueriesActionTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/RestTopQueriesActionTests.java new file mode 100644 index 0000000000000..ac19fa2a7348f --- /dev/null +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/RestTopQueriesActionTests.java @@ -0,0 +1,70 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.resthandler.top_queries; + +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesRequest; +import org.opensearch.rest.RestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.rest.FakeRestRequest; + +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +import static org.opensearch.plugin.insights.rules.resthandler.top_queries.RestTopQueriesAction.ALLOWED_METRICS; + +public class RestTopQueriesActionTests extends OpenSearchTestCase { + + public void testEmptyNodeIdsValidType() { + Map<String, String> params = new HashMap<>(); + params.put("type", randomFrom(ALLOWED_METRICS)); + RestRequest restRequest = buildRestRequest(params); + TopQueriesRequest actual = RestTopQueriesAction.prepareRequest(restRequest); + assertEquals(0, actual.nodesIds().length); + } + + public void testNodeIdsValid() { + Map<String, String> params = new HashMap<>(); + params.put("type", randomFrom(ALLOWED_METRICS)); + String[] nodes = randomArray(1, 10, String[]::new, () -> randomAlphaOfLengthBetween(5, 10)); + params.put("nodeId", String.join(",", nodes)); + + RestRequest restRequest = buildRestRequest(params); + TopQueriesRequest actual = RestTopQueriesAction.prepareRequest(restRequest); + assertArrayEquals(nodes, actual.nodesIds()); + } + + public void testInValidType() { + Map<String, String> params = new HashMap<>(); + params.put("type", randomAlphaOfLengthBetween(5, 10).toUpperCase(Locale.ROOT)); + + RestRequest restRequest = buildRestRequest(params); + Exception exception = assertThrows(IllegalArgumentException.class, () -> { RestTopQueriesAction.prepareRequest(restRequest); }); + assertEquals( + String.format(Locale.ROOT, "request [/_insights/top_queries] contains invalid metric type [%s]", params.get("type")), + exception.getMessage() + ); + } + + public void testGetRoutes() { + RestTopQueriesAction action = new RestTopQueriesAction(); + List<RestHandler.Route> routes = action.routes(); + assertEquals(2, routes.size()); + assertEquals("query_insights_top_queries_action", action.getName()); + } + + private FakeRestRequest buildRestRequest(Map<String, String> params) { + return new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.GET) + .withPath("/_insights/top_queries") + .withParams(params) + .build(); + } +} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesActionTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesActionTests.java new file mode 100644 index 0000000000000..a5f36b6e8cce0 --- /dev/null +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesActionTests.java @@ -0,0 +1,84 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.transport.top_queries; + +import org.opensearch.action.support.ActionFilters; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.plugin.insights.core.service.QueryInsightsService; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesRequest; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesResponse; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.plugin.insights.settings.QueryInsightsSettings; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; +import org.junit.Before; + +import java.util.List; + +import static org.mockito.Mockito.mock; + +public class TransportTopQueriesActionTests extends OpenSearchTestCase { + + private final ThreadPool threadPool = mock(ThreadPool.class); + + private final Settings.Builder settingsBuilder = Settings.builder(); + private final Settings settings = settingsBuilder.build(); + private final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + private final ClusterService clusterService = new ClusterService(settings, clusterSettings, threadPool); + private final TransportService transportService = mock(TransportService.class); + private final QueryInsightsService topQueriesByLatencyService = mock(QueryInsightsService.class); + private final ActionFilters actionFilters = mock(ActionFilters.class); + private final TransportTopQueriesAction transportTopQueriesAction = new TransportTopQueriesAction( + threadPool, + clusterService, + transportService, + topQueriesByLatencyService, + actionFilters + ); + private final DummyParentAction dummyParentAction = new DummyParentAction( + threadPool, + clusterService, + transportService, + topQueriesByLatencyService, + actionFilters + ); + + class DummyParentAction extends TransportTopQueriesAction { + public DummyParentAction( + ThreadPool threadPool, + ClusterService clusterService, + TransportService transportService, + QueryInsightsService topQueriesByLatencyService, + ActionFilters actionFilters + ) { + super(threadPool, clusterService, transportService, topQueriesByLatencyService, actionFilters); + } + + public TopQueriesResponse createNewResponse() { + TopQueriesRequest request = new TopQueriesRequest(MetricType.LATENCY); + return newResponse(request, List.of(), List.of()); + } + } + + @Before + public void setup() { + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED); + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE); + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE); + } + + public void testNewResponse() { + TopQueriesResponse response = dummyParentAction.createNewResponse(); + assertNotNull(response); + } + +} diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 51f2057b4bedb..31db767b2c68e 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -44,7 +44,7 @@ opensearchplugin { } dependencies { - api 'com.azure:azure-core:1.39.0' + api 'com.azure:azure-core:1.47.0' api 'com.azure:azure-json:1.0.1' api 'com.azure:azure-storage-common:12.21.2' api 'com.azure:azure-core-http-netty:1.12.8' @@ -64,7 +64,7 @@ dependencies { api "com.fasterxml.jackson.datatype:jackson-datatype-jsr310:${versions.jackson}" api "com.fasterxml.jackson.dataformat:jackson-dataformat-xml:${versions.jackson}" api "com.fasterxml.jackson.module:jackson-module-jaxb-annotations:${versions.jackson}" - api 'org.codehaus.woodstox:stax2-api:4.2.1' + api 'org.codehaus.woodstox:stax2-api:4.2.2' implementation "com.fasterxml.woodstox:woodstox-core:${versions.woodstox}" runtimeOnly "com.google.guava:guava:${versions.guava}" api "org.apache.commons:commons-lang3:${versions.commonslang}" diff --git a/plugins/repository-azure/licenses/azure-core-1.39.0.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.39.0.jar.sha1 deleted file mode 100644 index c91498a464b3d..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -39765fb88a90174628b31ddf6ff9f8d63462e080 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-1.47.0.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.47.0.jar.sha1 new file mode 100644 index 0000000000000..42e35aacc63b1 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-1.47.0.jar.sha1 @@ -0,0 +1 @@ +6b300175826f0bb0916fca2fa5f70885b716e93f \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-annotations-2.16.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-annotations-2.16.1.jar.sha1 deleted file mode 100644 index cbc65687606fc..0000000000000 --- a/plugins/repository-azure/licenses/jackson-annotations-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fd441d574a71e7d10a4f73de6609f881d8cdfeec \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-annotations-2.17.0.jar.sha1 b/plugins/repository-azure/licenses/jackson-annotations-2.17.0.jar.sha1 new file mode 100644 index 0000000000000..66bf7ed6ecce8 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-annotations-2.17.0.jar.sha1 @@ -0,0 +1 @@ +880a742337010da4c851f843d8cac150e22dff9f \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-databind-2.16.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-databind-2.16.1.jar.sha1 deleted file mode 100644 index d231db4fd49fc..0000000000000 --- a/plugins/repository-azure/licenses/jackson-databind-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -02a16efeb840c45af1e2f31753dfe76795278b73 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-databind-2.17.0.jar.sha1 b/plugins/repository-azure/licenses/jackson-databind-2.17.0.jar.sha1 new file mode 100644 index 0000000000000..c0e4bb0c56849 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-databind-2.17.0.jar.sha1 @@ -0,0 +1 @@ +7173e9e1d4bc6d7ca03bc4eeedcd548b8b580b34 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.16.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.16.1.jar.sha1 deleted file mode 100644 index ad4e055d4f19a..0000000000000 --- a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d952ad30d3f2d1220f39db175618414b56d14638 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.0.jar.sha1 b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.0.jar.sha1 new file mode 100644 index 0000000000000..9dea3dfc55691 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.0.jar.sha1 @@ -0,0 +1 @@ +fbe3c274a39cef5538ca8688ac7e2ad0053a6ffa \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.16.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.16.1.jar.sha1 deleted file mode 100644 index 4309dad93b2b6..0000000000000 --- a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -36a418325c618e440e5ccb80b75c705d894f50bd \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.0.jar.sha1 b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.0.jar.sha1 new file mode 100644 index 0000000000000..fe8e51b8e0869 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.0.jar.sha1 @@ -0,0 +1 @@ +3fab507bba9d477e52ed2302dc3ddbd23cbae339 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.16.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.16.1.jar.sha1 deleted file mode 100644 index 5f54d0ac554e0..0000000000000 --- a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e9df364a2695e66eb8d2803d6725424842760125 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.0.jar.sha1 b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.0.jar.sha1 new file mode 100644 index 0000000000000..3954ac9c39af3 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.0.jar.sha1 @@ -0,0 +1 @@ +e07032ce170277213ac4835169ca79fa0340c7b5 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.104.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.104.Final.jar.sha1 deleted file mode 100644 index 0232fc58f9357..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8e237ce67ab230ed1ba749d6651b278333c21b3f \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..d823de7ffadd4 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +945e8ad5ab7ec4f11fb0257d2594af0cfae1d4b7 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 deleted file mode 100644 index f0242709f34f7..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5bb757929f7c4d1bf12740a378a99643caaad1ac \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..5b3d3311edc9f --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +3885ffe7dd05c9773df70c61009f34a5a8a383ec \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.104.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.104.Final.jar.sha1 deleted file mode 100644 index 3b6cd3524d978..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5b126ceba61275f38297cacd5ea0cd6d3addee04 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.107.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..114d77a1bb95f --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +3b7070e9acfe262bb0bd936c4051116631796b3b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.104.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.104.Final.jar.sha1 deleted file mode 100644 index 9d01e814971f2..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -50a2d899a8f8a68daed1a9b6d7750184310cc45f \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.107.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..5a4bde479eb38 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +ebc495e9b2bc2c9ab60a264b40f62dc0671d9f6e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.104.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.104.Final.jar.sha1 deleted file mode 100644 index 987b524aedc98..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f1210e5856fecb9182d58c0d33fa6e946b344b40 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..a62cb0fefcc40 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +d655d09e972dee46f580dbcf41c0d1356aea9e1b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 deleted file mode 100644 index 64797bf11aedc..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d4da9f7237ac3ac292891e0b2d5364acbce128cf \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..407ecaffdad30 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +4d61d4959741109b3eccd7337f11fc89fa90a74a \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.13.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.13.jar.sha1 deleted file mode 100644 index 5eaf96739ed72..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-core-1.1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -faea23e582978a34f6a932b81e86206ec2314990 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.17.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.17.jar.sha1 new file mode 100644 index 0000000000000..3d631bc904f24 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-core-1.1.17.jar.sha1 @@ -0,0 +1 @@ +319b1d41f28e92b31b7ca0f19183337f5539bb44 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.13.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.13.jar.sha1 deleted file mode 100644 index 091125169c696..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-http-1.1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c5af7bc746050d080891a5446cca2c96a0c51d03 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.17.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.17.jar.sha1 new file mode 100644 index 0000000000000..9ceef6959744b --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-http-1.1.17.jar.sha1 @@ -0,0 +1 @@ +9ed949dcd050ef30d9eeedd53d95d1dce20ce832 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/slf4j-api-LICENSE.txt b/plugins/repository-azure/licenses/slf4j-api-LICENSE.txt index 8fda22f4d72f6..2be7689435062 100644 --- a/plugins/repository-azure/licenses/slf4j-api-LICENSE.txt +++ b/plugins/repository-azure/licenses/slf4j-api-LICENSE.txt @@ -1,4 +1,4 @@ -Copyright (c) 2004-2014 QOS.ch +Copyright (c) 2004-2022 QOS.ch All rights reserved. Permission is hereby granted, free of charge, to any person obtaining @@ -18,4 +18,4 @@ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/plugins/repository-azure/licenses/stax2-api-4.2.1.jar.sha1 b/plugins/repository-azure/licenses/stax2-api-4.2.1.jar.sha1 deleted file mode 100644 index 2c12704cdc560..0000000000000 --- a/plugins/repository-azure/licenses/stax2-api-4.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a3f7325c52240418c2ba257b103c3c550e140c83 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/stax2-api-4.2.2.jar.sha1 b/plugins/repository-azure/licenses/stax2-api-4.2.2.jar.sha1 new file mode 100644 index 0000000000000..b15a7ead0d016 --- /dev/null +++ b/plugins/repository-azure/licenses/stax2-api-4.2.2.jar.sha1 @@ -0,0 +1 @@ +b0d746cadea928e5264f2ea294ea9a1bf815bbde \ No newline at end of file diff --git a/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureBlobStoreRepositoryTests.java b/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureBlobStoreRepositoryTests.java index 986720ec431fe..1ba16422c9214 100644 --- a/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureBlobStoreRepositoryTests.java +++ b/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureBlobStoreRepositoryTests.java @@ -39,6 +39,8 @@ import com.azure.storage.common.implementation.Constants; import com.azure.storage.common.policy.RequestRetryOptions; import com.azure.storage.common.policy.RetryPolicyType; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.regex.Regex; import org.opensearch.common.settings.MockSecureSettings; @@ -188,6 +190,7 @@ protected String requestUniqueId(final HttpExchange exchange) { @SuppressForbidden(reason = "this test uses a HttpServer to emulate an Azure endpoint") private static class AzureHTTPStatsCollectorHandler extends HttpStatsCollectorHandler { + private static final Logger testLogger = LogManager.getLogger(AzureHTTPStatsCollectorHandler.class); private static final Pattern listPattern = Pattern.compile("GET /[a-zA-Z0-9]+\\??.+"); private static final Pattern getPattern = Pattern.compile("GET /[^?/]+/[^?/]+\\??.*"); @@ -197,6 +200,7 @@ private AzureHTTPStatsCollectorHandler(HttpHandler delegate) { @Override protected void maybeTrack(String request, Headers headers) { + testLogger.info(request, headers); if (getPattern.matcher(request).matches()) { trackRequest("GetBlob"); } else if (Regex.simpleMatch("HEAD /*/*", request)) { diff --git a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java index b60701ba5e533..74edd4f3eb23c 100644 --- a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java @@ -389,6 +389,7 @@ private static class NioThreadFactory implements ThreadFactory { private final AtomicInteger threadNumber = new AtomicInteger(1); private final String namePrefix; + @SuppressWarnings("removal") NioThreadFactory() { SecurityManager s = System.getSecurityManager(); group = (s != null) ? s.getThreadGroup() : Thread.currentThread().getThreadGroup(); diff --git a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/SocketAccess.java b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/SocketAccess.java index 0fbe9797f726f..a206c3b883870 100644 --- a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/SocketAccess.java +++ b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/SocketAccess.java @@ -49,6 +49,7 @@ * {@link SocketPermission} 'connect' to establish connections. This class wraps the operations requiring access in * {@link AccessController#doPrivileged(PrivilegedAction)} blocks. */ +@SuppressWarnings("removal") public final class SocketAccess { private SocketAccess() {} diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index af6bf38cb065c..0ddcf0f6dddca 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -54,19 +54,19 @@ versions << [ dependencies { api 'com.google.api:api-common:1.8.1' api 'com.google.api:gax:2.35.0' - api 'com.google.api:gax-httpjson:2.39.0' + api 'com.google.api:gax-httpjson:2.42.0' api 'com.google.apis:google-api-services-storage:v1-rev20230617-2.0.0' api 'com.google.api-client:google-api-client:2.2.0' - api 'com.google.api.grpc:proto-google-common-protos:2.25.1' + api 'com.google.api.grpc:proto-google-common-protos:2.33.0' api 'com.google.api.grpc:proto-google-iam-v1:0.12.0' api "com.google.auth:google-auth-library-credentials:${versions.google_auth}" api "com.google.auth:google-auth-library-oauth2-http:${versions.google_auth}" - api 'com.google.cloud:google-cloud-core:2.5.10' + api 'com.google.cloud:google-cloud-core:2.30.0' api 'com.google.cloud:google-cloud-core-http:2.23.0' api 'com.google.cloud:google-cloud-storage:1.113.1' @@ -78,7 +78,7 @@ dependencies { api 'com.google.http-client:google-http-client:1.43.3' api 'com.google.http-client:google-http-client-appengine:1.43.3' api 'com.google.http-client:google-http-client-gson:1.43.3' - api 'com.google.http-client:google-http-client-jackson2:1.43.3' + api 'com.google.http-client:google-http-client-jackson2:1.44.1' api 'com.google.oauth-client:google-oauth-client:1.34.1' @@ -206,7 +206,10 @@ thirdPartyAudit { // commons-logging provided dependencies 'javax.jms.Message', 'javax.servlet.ServletContextEvent', - 'javax.servlet.ServletContextListener' + 'javax.servlet.ServletContextListener', + // Bump for gax 2.42.0 + 'com.google.api.gax.rpc.EndpointContext', + 'com.google.api.gax.rpc.RequestMutator' ) } diff --git a/plugins/repository-gcs/licenses/gax-httpjson-2.39.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-httpjson-2.39.0.jar.sha1 deleted file mode 100644 index d0f3b8dfa6d25..0000000000000 --- a/plugins/repository-gcs/licenses/gax-httpjson-2.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ebff086f21c54c1eb02056f94255b4f1836a8efe \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gax-httpjson-2.42.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-httpjson-2.42.0.jar.sha1 new file mode 100644 index 0000000000000..672506572ed4d --- /dev/null +++ b/plugins/repository-gcs/licenses/gax-httpjson-2.42.0.jar.sha1 @@ -0,0 +1 @@ +4db06bc31c2fb34b0490362e8666c20fdc1fb3f2 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-core-2.30.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-2.30.0.jar.sha1 new file mode 100644 index 0000000000000..10f8f90df108f --- /dev/null +++ b/plugins/repository-gcs/licenses/google-cloud-core-2.30.0.jar.sha1 @@ -0,0 +1 @@ +b48ea27cbdccd5f225d8a35ea28e2cd01c25918b \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-core-2.5.10.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-2.5.10.jar.sha1 deleted file mode 100644 index 34c3dc6805500..0000000000000 --- a/plugins/repository-gcs/licenses/google-cloud-core-2.5.10.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4d979bfe28551eb78cddae9282833ede147a9331 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.43.3.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.43.3.jar.sha1 deleted file mode 100644 index 8380b9fb770b5..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.43.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -689da86469d19a01c726c8c24477b95c8a834bbe \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.44.1.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.44.1.jar.sha1 new file mode 100644 index 0000000000000..4472ffbbebe1c --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.44.1.jar.sha1 @@ -0,0 +1 @@ +3f1947de0fd9eb250af16abe6103c11e68d11635 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-2.25.1.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-2.25.1.jar.sha1 deleted file mode 100644 index cd065dabb8e8a..0000000000000 --- a/plugins/repository-gcs/licenses/proto-google-common-protos-2.25.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cb90049537b621e39610a110c58ce0b914ee3cc5 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-2.33.0.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-2.33.0.jar.sha1 new file mode 100644 index 0000000000000..746e4e99fd881 --- /dev/null +++ b/plugins/repository-gcs/licenses/proto-google-common-protos-2.33.0.jar.sha1 @@ -0,0 +1 @@ +644e11df1cec6d38a63a9a06a701e48c398b87d0 \ No newline at end of file diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java index e15b37f209c5f..620f8e98d5f20 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java @@ -106,6 +106,7 @@ class GoogleCloudStorageRetryingInputStream extends InputStream { currentStream = openStream(); } + @SuppressWarnings("removal") @SuppressForbidden(reason = "need access to storage client") private static com.google.api.services.storage.Storage getStorage(Storage client) { return AccessController.doPrivileged((PrivilegedAction<com.google.api.services.storage.Storage>) () -> { diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/SocketAccess.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/SocketAccess.java index 35127d6ea4060..f8c451749480b 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/SocketAccess.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/SocketAccess.java @@ -48,6 +48,7 @@ * needs {@link SocketPermission} 'connect' to establish connections. This class wraps the operations requiring access * in {@link AccessController#doPrivileged(PrivilegedAction)} blocks. */ +@SuppressWarnings("removal") final class SocketAccess { private SocketAccess() {} diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.104.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.104.Final.jar.sha1 deleted file mode 100644 index 9110503f67304..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d75246285e5fac6f6dad47e387ed4f46f36e521d \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.107.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..0e3595fecb0d2 --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +3019703b67413ef3d6150da1f49753f4010507ce \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/slf4j-api-LICENSE.txt b/plugins/repository-hdfs/licenses/slf4j-api-LICENSE.txt index 8fda22f4d72f6..2be7689435062 100644 --- a/plugins/repository-hdfs/licenses/slf4j-api-LICENSE.txt +++ b/plugins/repository-hdfs/licenses/slf4j-api-LICENSE.txt @@ -1,4 +1,4 @@ -Copyright (c) 2004-2014 QOS.ch +Copyright (c) 2004-2022 QOS.ch All rights reserved. Permission is hereby granted, free of charge, to any person obtaining @@ -18,4 +18,4 @@ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsPlugin.java b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsPlugin.java index 119d060374be2..af49cd3c579e6 100644 --- a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsPlugin.java +++ b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsPlugin.java @@ -52,6 +52,7 @@ import java.util.Collections; import java.util.Map; +@SuppressWarnings("removal") public final class HdfsPlugin extends Plugin implements RepositoryPlugin { // initialize some problematic classes with elevated privileges diff --git a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsRepository.java b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsRepository.java index f0ffec5713c1d..4b38e62b2525a 100644 --- a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsRepository.java +++ b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsRepository.java @@ -254,6 +254,7 @@ private static String getHostName() { } } + @SuppressWarnings("removal") @Override protected HdfsBlobStore createBlobStore() { // initialize our blobstore using elevated privileges. diff --git a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsSecurityContext.java b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsSecurityContext.java index 07d1d29eecfc4..5a27eb937ff9c 100644 --- a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsSecurityContext.java +++ b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsSecurityContext.java @@ -57,6 +57,7 @@ * Keeps track of the current user for a given repository, as well as which * permissions to grant the blob store restricted execution methods. */ +@SuppressWarnings("removal") class HdfsSecurityContext { private static final Permission[] SIMPLE_AUTH_PERMISSIONS; diff --git a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java index d0b63f17e3887..89ba8d51cf7f7 100644 --- a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java +++ b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java @@ -62,6 +62,7 @@ /** * Integration test that runs against an HA-Enabled HDFS instance */ +@SuppressWarnings("removal") public class HaHdfsFailoverTestSuiteIT extends OpenSearchRestTestCase { public void testHAFailoverWithRepository() throws Exception { diff --git a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsBlobStoreContainerTests.java b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsBlobStoreContainerTests.java index 3a6eb0e205ccb..5f7454df4ecfc 100644 --- a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsBlobStoreContainerTests.java +++ b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsBlobStoreContainerTests.java @@ -64,6 +64,7 @@ @ThreadLeakFilters(filters = { HdfsClientThreadLeakFilter.class }) public class HdfsBlobStoreContainerTests extends OpenSearchTestCase { + @SuppressWarnings("removal") private FileContext createTestContext() { FileContext fileContext; try { diff --git a/plugins/repository-s3/licenses/annotations-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/annotations-2.20.55.jar.sha1 deleted file mode 100644 index 5a626eeb5725b..0000000000000 --- a/plugins/repository-s3/licenses/annotations-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -330e9d0e5f2401fffba5afe30f3740f400e8308d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/annotations-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/annotations-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..8d30ad649916b --- /dev/null +++ b/plugins/repository-s3/licenses/annotations-2.20.86.jar.sha1 @@ -0,0 +1 @@ +7e785e9ecb1230e52e9daa713335f38809ddcb74 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/apache-client-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/apache-client-2.20.55.jar.sha1 deleted file mode 100644 index 3ee96bb6e4076..0000000000000 --- a/plugins/repository-s3/licenses/apache-client-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5c149885667d41a306769505cfa481cfddf6f113 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/apache-client-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/apache-client-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..e7ae36581925c --- /dev/null +++ b/plugins/repository-s3/licenses/apache-client-2.20.86.jar.sha1 @@ -0,0 +1 @@ +af31c4d3abec23b73061c6965364a6e3abbcc01a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/auth-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/auth-2.20.55.jar.sha1 deleted file mode 100644 index 010464bdf9fd1..0000000000000 --- a/plugins/repository-s3/licenses/auth-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e21f00a8a2096d5044f3eff176944256e01a175e \ No newline at end of file diff --git a/plugins/repository-s3/licenses/auth-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/auth-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..e4c1b29cea894 --- /dev/null +++ b/plugins/repository-s3/licenses/auth-2.20.86.jar.sha1 @@ -0,0 +1 @@ +f2da82f33776ce4814a3ab53b5ccb82a5d135936 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-core-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/aws-core-2.20.55.jar.sha1 deleted file mode 100644 index 4b4ee1db864a8..0000000000000 --- a/plugins/repository-s3/licenses/aws-core-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -734427c2cece98a8cb90871b78d2311e4a7ef746 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-core-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/aws-core-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..d42a15c4da413 --- /dev/null +++ b/plugins/repository-s3/licenses/aws-core-2.20.86.jar.sha1 @@ -0,0 +1 @@ +ea126147c3d17a8b3075e3122ec9c2b94fe1f6d5 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-json-protocol-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/aws-json-protocol-2.20.55.jar.sha1 deleted file mode 100644 index 45a88305c1928..0000000000000 --- a/plugins/repository-s3/licenses/aws-json-protocol-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a52731c86b974aefa5bbb1c545f407811a0163b1 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-json-protocol-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/aws-json-protocol-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..ee08d240fbfba --- /dev/null +++ b/plugins/repository-s3/licenses/aws-json-protocol-2.20.86.jar.sha1 @@ -0,0 +1 @@ +8b9d09c1aa9d3f2119267f0b6549ae1810512c7b \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-query-protocol-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/aws-query-protocol-2.20.55.jar.sha1 deleted file mode 100644 index ba5f43378730c..0000000000000 --- a/plugins/repository-s3/licenses/aws-query-protocol-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ac116215cc85366f0bdffee53b4c21e7a7fe03ef \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-query-protocol-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/aws-query-protocol-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..9b19f570d56fb --- /dev/null +++ b/plugins/repository-s3/licenses/aws-query-protocol-2.20.86.jar.sha1 @@ -0,0 +1 @@ +e001792ec1a681f5bc6ee4157d572173416304ad \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-xml-protocol-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/aws-xml-protocol-2.20.55.jar.sha1 deleted file mode 100644 index fc65ee07c40c6..0000000000000 --- a/plugins/repository-s3/licenses/aws-xml-protocol-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6a3b5f607ece38536f17d869b82c669c6339f9ad \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-xml-protocol-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/aws-xml-protocol-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..50940d73f4f7b --- /dev/null +++ b/plugins/repository-s3/licenses/aws-xml-protocol-2.20.86.jar.sha1 @@ -0,0 +1 @@ +b78a1182a9cf3cccf416cc5a441d08174b08682d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/endpoints-spi-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/endpoints-spi-2.20.55.jar.sha1 deleted file mode 100644 index 5bc0e31166c77..0000000000000 --- a/plugins/repository-s3/licenses/endpoints-spi-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -085f82038ee86a7d6cd568fe7edd842978d92de3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/endpoints-spi-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/endpoints-spi-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..16f9db1fd6327 --- /dev/null +++ b/plugins/repository-s3/licenses/endpoints-spi-2.20.86.jar.sha1 @@ -0,0 +1 @@ +2b9075dd0ed32da97f95229f55c01425353e8cba \ No newline at end of file diff --git a/plugins/repository-s3/licenses/http-client-spi-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/http-client-spi-2.20.55.jar.sha1 deleted file mode 100644 index 523cf43dcb2e9..0000000000000 --- a/plugins/repository-s3/licenses/http-client-spi-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -34f9b10c1a46038a0ceebdd750ba3a413a862ceb \ No newline at end of file diff --git a/plugins/repository-s3/licenses/http-client-spi-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/http-client-spi-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..0662e15b1f3e6 --- /dev/null +++ b/plugins/repository-s3/licenses/http-client-spi-2.20.86.jar.sha1 @@ -0,0 +1 @@ +776bfc86fabc6e8c792ea4650a281d0bec5e9708 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-annotations-2.16.1.jar.sha1 b/plugins/repository-s3/licenses/jackson-annotations-2.16.1.jar.sha1 deleted file mode 100644 index cbc65687606fc..0000000000000 --- a/plugins/repository-s3/licenses/jackson-annotations-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fd441d574a71e7d10a4f73de6609f881d8cdfeec \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-annotations-2.17.0.jar.sha1 b/plugins/repository-s3/licenses/jackson-annotations-2.17.0.jar.sha1 new file mode 100644 index 0000000000000..66bf7ed6ecce8 --- /dev/null +++ b/plugins/repository-s3/licenses/jackson-annotations-2.17.0.jar.sha1 @@ -0,0 +1 @@ +880a742337010da4c851f843d8cac150e22dff9f \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-databind-2.16.1.jar.sha1 b/plugins/repository-s3/licenses/jackson-databind-2.16.1.jar.sha1 deleted file mode 100644 index d231db4fd49fc..0000000000000 --- a/plugins/repository-s3/licenses/jackson-databind-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -02a16efeb840c45af1e2f31753dfe76795278b73 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-databind-2.17.0.jar.sha1 b/plugins/repository-s3/licenses/jackson-databind-2.17.0.jar.sha1 new file mode 100644 index 0000000000000..c0e4bb0c56849 --- /dev/null +++ b/plugins/repository-s3/licenses/jackson-databind-2.17.0.jar.sha1 @@ -0,0 +1 @@ +7173e9e1d4bc6d7ca03bc4eeedcd548b8b580b34 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/json-utils-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/json-utils-2.20.55.jar.sha1 deleted file mode 100644 index a19b00e62f8b5..0000000000000 --- a/plugins/repository-s3/licenses/json-utils-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cd6710900e3190eac4c4496ae529ce08680dd320 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/json-utils-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/json-utils-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..7011f8c3e6c78 --- /dev/null +++ b/plugins/repository-s3/licenses/json-utils-2.20.86.jar.sha1 @@ -0,0 +1 @@ +5dd418ad48e3bfd8c3fa05ff29a955b91c1af666 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/metrics-spi-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/metrics-spi-2.20.55.jar.sha1 deleted file mode 100644 index db6701d87892a..0000000000000 --- a/plugins/repository-s3/licenses/metrics-spi-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8a0eae705b27465516f3b09cc9918e40963d534d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/metrics-spi-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/metrics-spi-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..bbd88bb9e1b0c --- /dev/null +++ b/plugins/repository-s3/licenses/metrics-spi-2.20.86.jar.sha1 @@ -0,0 +1 @@ +74a65d0f8decd0b3057fb500ca5409ff5778752a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.104.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.104.Final.jar.sha1 deleted file mode 100644 index 30f215e47f8ad..0000000000000 --- a/plugins/repository-s3/licenses/netty-buffer-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -756797174b94a3aee11ce83522473f3c18287a43 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..beb44fc0f4cf9 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-buffer-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +8509a72b8a5a2d33d611e99254aed39765c3ad82 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.104.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.104.Final.jar.sha1 deleted file mode 100644 index 9ed9b896d4b4e..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f51fcfd3baac88b2c0b8dc715932ad5622d17429 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..4c74bb06fd83b --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +0a1d32debf2ed07c5852ab5b2904c43adb76c39e \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.104.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.104.Final.jar.sha1 deleted file mode 100644 index 478e7cfba1470..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2db1556de1b8dc07695604bf51a0a133263ad63f \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..38eb2e5bad80a --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +04d8e9e51b7254bd26a42fe17bdcae32e4c6ebb3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 deleted file mode 100644 index f0242709f34f7..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5bb757929f7c4d1bf12740a378a99643caaad1ac \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..5b3d3311edc9f --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +3885ffe7dd05c9773df70c61009f34a5a8a383ec \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.104.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.104.Final.jar.sha1 deleted file mode 100644 index 1b533eea3b3b3..0000000000000 --- a/plugins/repository-s3/licenses/netty-common-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ec5fc4a7c5475eb20805e14f7274aa28872b5ba1 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..bbe91c6ccfb1d --- /dev/null +++ b/plugins/repository-s3/licenses/netty-common-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +4f17a547530d64becd7179507b25f4154bcfba57 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.104.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.104.Final.jar.sha1 deleted file mode 100644 index 70777be4dc636..0000000000000 --- a/plugins/repository-s3/licenses/netty-handler-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -18c0e659950cdef5f12c36eccfa14cbd2ad2049d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..ba27b38632622 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-handler-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +d4c6b05f4d9aca117981297fb7f02953102ebb5e \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-nio-client-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/netty-nio-client-2.20.55.jar.sha1 deleted file mode 100644 index 489f18e0bceaa..0000000000000 --- a/plugins/repository-s3/licenses/netty-nio-client-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4c269571ad2fb19851ebd7c7856aa2975fe0bab3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-nio-client-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/netty-nio-client-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..4ae8b2ec5a23c --- /dev/null +++ b/plugins/repository-s3/licenses/netty-nio-client-2.20.86.jar.sha1 @@ -0,0 +1 @@ +29195a65eeea36cf1960d1939bca6586d5842dad \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.104.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.104.Final.jar.sha1 deleted file mode 100644 index d7c15af9312fe..0000000000000 --- a/plugins/repository-s3/licenses/netty-resolver-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dfa4fe5c3a6eabb7af09902eb63266829876d8a2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..3bc0f7b3fed09 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-resolver-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +dfee84308341a42131dd0f8ac0e1e02d627c19f3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.104.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.104.Final.jar.sha1 deleted file mode 100644 index 5cacaf11a29ce..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da7b263b6fedc5add70e78ee8927c8bd2b9bb589 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..19419999300dd --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +d6a105c621b47d1410e0e09419d7209d2d46e914 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.104.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.104.Final.jar.sha1 deleted file mode 100644 index 522d85a3bf12e..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -700fdbabab44709b0eccffe8f91c4226a5787356 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..6b9a35acb2c20 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +9234407d6a46745599735765c4d3755c7fc84162 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 deleted file mode 100644 index 64797bf11aedc..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d4da9f7237ac3ac292891e0b2d5364acbce128cf \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..407ecaffdad30 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +4d61d4959741109b3eccd7337f11fc89fa90a74a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/profiles-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/profiles-2.20.55.jar.sha1 deleted file mode 100644 index b7104cf0939e6..0000000000000 --- a/plugins/repository-s3/licenses/profiles-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -959aad08b2f24057bf286c761b49e3af31a0a623 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/profiles-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/profiles-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..425ce9b92f9f2 --- /dev/null +++ b/plugins/repository-s3/licenses/profiles-2.20.86.jar.sha1 @@ -0,0 +1 @@ +27a8f4aa488d1d3ef947865ee0190f16d10a3cc7 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/protocol-core-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/protocol-core-2.20.55.jar.sha1 deleted file mode 100644 index 4dee45f4d9dd3..0000000000000 --- a/plugins/repository-s3/licenses/protocol-core-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0935e3ab32962a890f1d13bf39ba2167d9d692f9 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/protocol-core-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/protocol-core-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..8de58699d8d82 --- /dev/null +++ b/plugins/repository-s3/licenses/protocol-core-2.20.86.jar.sha1 @@ -0,0 +1 @@ +bd85984ac6327a50d20e7957ecebf4fa3ad7766b \ No newline at end of file diff --git a/plugins/repository-s3/licenses/regions-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/regions-2.20.55.jar.sha1 deleted file mode 100644 index 993fc2f97de62..0000000000000 --- a/plugins/repository-s3/licenses/regions-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a117c19b4a30e902f4f1cc4bef6b5c10cc9aef31 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/regions-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/regions-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..266bc76ad6f77 --- /dev/null +++ b/plugins/repository-s3/licenses/regions-2.20.86.jar.sha1 @@ -0,0 +1 @@ +04fd460ce1c633986ecef1b4218d3e7067a7087d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/s3-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/s3-2.20.55.jar.sha1 deleted file mode 100644 index b7f3157995aa6..0000000000000 --- a/plugins/repository-s3/licenses/s3-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -69e7df4c7c170867dc246c0205c5e0b6099e8a6f \ No newline at end of file diff --git a/plugins/repository-s3/licenses/s3-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/s3-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..7125793759db5 --- /dev/null +++ b/plugins/repository-s3/licenses/s3-2.20.86.jar.sha1 @@ -0,0 +1 @@ +6a37f591abd11a3f848f091f1724825741daaeb2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/sdk-core-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/sdk-core-2.20.55.jar.sha1 deleted file mode 100644 index 5f12be9c08c5b..0000000000000 --- a/plugins/repository-s3/licenses/sdk-core-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8f2347feaf2575560ca89a2caa8d0243dbeb17a9 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/sdk-core-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/sdk-core-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..9eca40e6b9a9a --- /dev/null +++ b/plugins/repository-s3/licenses/sdk-core-2.20.86.jar.sha1 @@ -0,0 +1 @@ +1b9df9ca5e4918fab05db3b703b2873e83104c30 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/signer-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/signer-2.20.55.jar.sha1 deleted file mode 100644 index ec53fa0db623e..0000000000000 --- a/plugins/repository-s3/licenses/signer-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a44e55775ae429931287f81a634eeb67bd607a9f \ No newline at end of file diff --git a/plugins/repository-s3/licenses/signer-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/signer-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..cb73b19e14fcf --- /dev/null +++ b/plugins/repository-s3/licenses/signer-2.20.86.jar.sha1 @@ -0,0 +1 @@ +52b92753b944d3e1b8c6814bc9d6c93119ca6421 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/slf4j-api-LICENSE.txt b/plugins/repository-s3/licenses/slf4j-api-LICENSE.txt index 8fda22f4d72f6..2be7689435062 100644 --- a/plugins/repository-s3/licenses/slf4j-api-LICENSE.txt +++ b/plugins/repository-s3/licenses/slf4j-api-LICENSE.txt @@ -1,4 +1,4 @@ -Copyright (c) 2004-2014 QOS.ch +Copyright (c) 2004-2022 QOS.ch All rights reserved. Permission is hereby granted, free of charge, to any person obtaining @@ -18,4 +18,4 @@ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/plugins/repository-s3/licenses/sts-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/sts-2.20.55.jar.sha1 deleted file mode 100644 index 9f4bbdd0f22ad..0000000000000 --- a/plugins/repository-s3/licenses/sts-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -adc350996b6f8481a32c8e73598138fc32826584 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/sts-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/sts-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..1f40b6dcd8417 --- /dev/null +++ b/plugins/repository-s3/licenses/sts-2.20.86.jar.sha1 @@ -0,0 +1 @@ +7415d850a4aea10935f84766065dd76a3d327a54 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/third-party-jackson-core-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/third-party-jackson-core-2.20.55.jar.sha1 deleted file mode 100644 index e7eebbb98f1fe..0000000000000 --- a/plugins/repository-s3/licenses/third-party-jackson-core-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -956912f26056fc7d46b2db566362fe5f7a8c0e14 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/third-party-jackson-core-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/third-party-jackson-core-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..c9c3d4dc53505 --- /dev/null +++ b/plugins/repository-s3/licenses/third-party-jackson-core-2.20.86.jar.sha1 @@ -0,0 +1 @@ +5ad6e7f7d52d8a5390b2daf2fd8ffcab97fe3102 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/utils-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/utils-2.20.55.jar.sha1 deleted file mode 100644 index fc4cde604e33c..0000000000000 --- a/plugins/repository-s3/licenses/utils-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d3e1bbbc19795eadbeb4dd963a94647576644097 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/utils-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/utils-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..b91a3b3047570 --- /dev/null +++ b/plugins/repository-s3/licenses/utils-2.20.86.jar.sha1 @@ -0,0 +1 @@ +7a61f8b3c54ecf3dc785830d4f482f19ca52bc57 \ No newline at end of file diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java index 3a55fcb0bdbcd..25f361b40636e 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java @@ -514,7 +514,7 @@ private static List<ListObjectsV2Response> executeListing( for (ListObjectsV2Response listObjectsV2Response : listObjectsIterable) { results.add(listObjectsV2Response); totalObjects += listObjectsV2Response.contents().size(); - if (limit != -1 && totalObjects > limit) { + if (limit != -1 && totalObjects >= limit) { break; } } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java index 4fda0ee95a3ec..e44f408e6dd12 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java @@ -201,6 +201,12 @@ final class S3ClientSettings { key -> Setting.intSetting(key, 500, Property.NodeScope) ); + static final Setting.AffixSetting<Integer> MAX_SYNC_CONNECTIONS_SETTING = Setting.affixKeySetting( + PREFIX, + "max_sync_connections", + key -> Setting.intSetting(key, 500, Property.NodeScope) + ); + /** Connection acquisition timeout for new connections to S3. */ static final Setting.AffixSetting<TimeValue> CONNECTION_ACQUISITION_TIMEOUT = Setting.affixKeySetting( PREFIX, @@ -284,10 +290,13 @@ final class S3ClientSettings { /** The connection TTL for the s3 client */ final int connectionTTLMillis; - /** The max number of connections for the s3 client */ + /** The max number of connections for the s3 async client */ final int maxConnections; - /** The connnection acquisition timeout for the s3 async client */ + /** The max number of connections for the s3 sync client */ + final int maxSyncConnections; + + /** The connnection acquisition timeout for the s3 sync and async client */ final int connectionAcquisitionTimeoutMillis; /** The number of retries to use for the s3 client. */ @@ -318,6 +327,7 @@ private S3ClientSettings( int connectionTimeoutMillis, int connectionTTLMillis, int maxConnections, + int maxSyncConnections, int connectionAcquisitionTimeoutMillis, int maxRetries, boolean throttleRetries, @@ -336,6 +346,7 @@ private S3ClientSettings( this.connectionTimeoutMillis = connectionTimeoutMillis; this.connectionTTLMillis = connectionTTLMillis; this.maxConnections = maxConnections; + this.maxSyncConnections = maxSyncConnections; this.connectionAcquisitionTimeoutMillis = connectionAcquisitionTimeoutMillis; this.maxRetries = maxRetries; this.throttleRetries = throttleRetries; @@ -386,6 +397,9 @@ S3ClientSettings refine(Settings repositorySettings) { ).millis() ); final int newMaxConnections = Math.toIntExact(getRepoSettingOrDefault(MAX_CONNECTIONS_SETTING, normalizedSettings, maxConnections)); + final int newMaxSyncConnections = Math.toIntExact( + getRepoSettingOrDefault(MAX_SYNC_CONNECTIONS_SETTING, normalizedSettings, maxConnections) + ); final int newMaxRetries = getRepoSettingOrDefault(MAX_RETRIES_SETTING, normalizedSettings, maxRetries); final boolean newThrottleRetries = getRepoSettingOrDefault(USE_THROTTLE_RETRIES_SETTING, normalizedSettings, throttleRetries); final boolean newPathStyleAccess = getRepoSettingOrDefault(USE_PATH_STYLE_ACCESS, normalizedSettings, pathStyleAccess); @@ -433,6 +447,7 @@ S3ClientSettings refine(Settings repositorySettings) { newConnectionTimeoutMillis, newConnectionTTLMillis, newMaxConnections, + newMaxSyncConnections, newConnectionAcquisitionTimeoutMillis, newMaxRetries, newThrottleRetries, @@ -563,6 +578,7 @@ static S3ClientSettings getClientSettings(final Settings settings, final String Math.toIntExact(getConfigValue(settings, clientName, CONNECTION_TIMEOUT_SETTING).millis()), Math.toIntExact(getConfigValue(settings, clientName, CONNECTION_TTL_SETTING).millis()), Math.toIntExact(getConfigValue(settings, clientName, MAX_CONNECTIONS_SETTING)), + Math.toIntExact(getConfigValue(settings, clientName, MAX_SYNC_CONNECTIONS_SETTING)), Math.toIntExact(getConfigValue(settings, clientName, CONNECTION_ACQUISITION_TIMEOUT).millis()), getConfigValue(settings, clientName, MAX_RETRIES_SETTING), getConfigValue(settings, clientName, USE_THROTTLE_RETRIES_SETTING), diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java index 24387fb98a425..fe81da31432f4 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java @@ -279,6 +279,8 @@ protected PasswordAuthentication getPasswordAuthentication() { } clientBuilder.socketTimeout(Duration.ofMillis(clientSettings.readTimeoutMillis)); + clientBuilder.maxConnections(clientSettings.maxSyncConnections); + clientBuilder.connectionAcquisitionTimeout(Duration.ofMillis(clientSettings.connectionAcquisitionTimeoutMillis)); return clientBuilder; } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/SocketAccess.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/SocketAccess.java index 4888764dbc720..f88aa46e61806 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/SocketAccess.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/SocketAccess.java @@ -46,6 +46,7 @@ * {@link SocketPermission} 'connect' to establish connections. This class wraps the operations requiring access in * {@link AccessController#doPrivileged(PrivilegedAction)} blocks. */ +@SuppressWarnings("removal") public final class SocketAccess { private SocketAccess() {} diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java index 8e1926d40302f..f84d953baae8e 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java @@ -75,6 +75,7 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; +@SuppressWarnings("removal") @SuppressForbidden(reason = "test requires to set a System property to allow insecure settings when running in IDE") public class RepositoryCredentialsTests extends OpenSearchSingleNodeTestCase implements ConfigPathSupport { diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java index 58ad290a31e85..2b45e9cfe2d4b 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java @@ -916,6 +916,15 @@ public void testListBlobsByPrefixInLexicographicOrderWithLimitLessThanPageSize() testListBlobsByPrefixInLexicographicOrder(2, 1, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); } + /** + * Test the boundary value at page size to ensure + * unnecessary calls are not made to S3 by fetching the next page. + * @throws IOException + */ + public void testListBlobsByPrefixInLexicographicOrderWithLimitEqualToPageSize() throws IOException { + testListBlobsByPrefixInLexicographicOrder(5, 1, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); + } + public void testListBlobsByPrefixInLexicographicOrderWithLimitGreaterThanPageSize() throws IOException { testListBlobsByPrefixInLexicographicOrder(8, 2, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); } diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ClientSettingsTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ClientSettingsTests.java index f27c8387b6e45..b47749553aeba 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ClientSettingsTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ClientSettingsTests.java @@ -74,6 +74,8 @@ public void testThereIsADefaultClientByDefault() { assertThat(defaultSettings.connectionTimeoutMillis, is(10 * 1000)); assertThat(defaultSettings.connectionTTLMillis, is(5 * 1000)); assertThat(defaultSettings.maxConnections, is(500)); + assertThat(defaultSettings.maxSyncConnections, is(500)); + assertThat(defaultSettings.connectionAcquisitionTimeoutMillis, is(15 * 60 * 1000)); assertThat(defaultSettings.maxRetries, is(3)); assertThat(defaultSettings.throttleRetries, is(true)); } diff --git a/plugins/telemetry-otel/build.gradle b/plugins/telemetry-otel/build.gradle index 9be83e30c3183..735cbd92b691a 100644 --- a/plugins/telemetry-otel/build.gradle +++ b/plugins/telemetry-otel/build.gradle @@ -16,7 +16,7 @@ apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { description 'Opentelemetry based telemetry implementation.' classname 'org.opensearch.telemetry.OTelTelemetryPlugin' - hasClientJar = true + hasClientJar = false } dependencies { diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.34.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.34.1.jar.sha1 deleted file mode 100644 index 19f734ca17b79..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-api-1.34.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b4aea155f6d6b1032eba85378564431cfd86f562 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.36.0.jar.sha1 new file mode 100644 index 0000000000000..b577500d71e1d --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-1.36.0.jar.sha1 @@ -0,0 +1 @@ +59470f4aa3a9207f21936461b8fdcb36d46455ab \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.34.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.34.1.jar.sha1 deleted file mode 100644 index 4c06d28cba199..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-context-1.34.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3fcc87f3d810ce49d865ee54b40831559c5e129b \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.36.0.jar.sha1 new file mode 100644 index 0000000000000..d3156577248d5 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-context-1.36.0.jar.sha1 @@ -0,0 +1 @@ +8850bc4c65d0fd22ff987b4683206ec4e69f2689 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.34.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.34.1.jar.sha1 deleted file mode 100644 index 91a5c0f715d2b..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.34.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -19c9a3f52851a1333b648ed83c82d16eb4c64afd \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.36.0.jar.sha1 new file mode 100644 index 0000000000000..f176b21d12dc4 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.36.0.jar.sha1 @@ -0,0 +1 @@ +8d1cb823ab18fa871a1549e7c522bf28f2b3d8fe \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.34.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.34.1.jar.sha1 deleted file mode 100644 index 6c05600ae3b08..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.34.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b3e74d5b8cf5e60d9965042fa284085bbe081ce3 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.36.0.jar.sha1 new file mode 100644 index 0000000000000..cd25e0ab9f294 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.36.0.jar.sha1 @@ -0,0 +1 @@ +bc045cae89ff6f18071760f6e4659dd880e88a1b \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.34.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.34.1.jar.sha1 deleted file mode 100644 index f54e6f6893050..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.34.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -af68f90f0410b7b3a1900d3e0a15ad51b10ffd5b \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.36.0.jar.sha1 new file mode 100644 index 0000000000000..fabb394f9c2e0 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.36.0.jar.sha1 @@ -0,0 +1 @@ +5ee49902ba884d6c3e48499a9311a624396d9630 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.34.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.34.1.jar.sha1 deleted file mode 100644 index 49d40b36ba85b..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.34.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4acab18052267e280d1f9de22c591a5c88bed3a6 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.36.0.jar.sha1 new file mode 100644 index 0000000000000..378ba4d43dcd1 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.36.0.jar.sha1 @@ -0,0 +1 @@ +2706e3b883d2bcd1a6b3e0bb4118ffbd7820550b \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.34.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.34.1.jar.sha1 deleted file mode 100644 index a01de2aa84c43..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.34.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9f07e1764389e076a36fb7d9e5769e29f3dab950 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.36.0.jar.sha1 new file mode 100644 index 0000000000000..a3d7e15e1a624 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.36.0.jar.sha1 @@ -0,0 +1 @@ +dcc924787b559278697b74dbc5bb6d046b236ef6 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.34.1-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.34.1-alpha.jar.sha1 deleted file mode 100644 index a5fc8c2059104..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.34.1-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9201e6a43a0a89515626f7516c7d1b2c349f76df \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.36.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.36.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..71ab3e184db9e --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.36.0-alpha.jar.sha1 @@ -0,0 +1 @@ +d58f7c669e371f6ff61b705770af9a3c1f31df52 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.34.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.34.1.jar.sha1 deleted file mode 100644 index cd746f0756e46..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.34.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ab49eb621d6d01f0ad2f016989d0352ef18ea9a2 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.36.0.jar.sha1 new file mode 100644 index 0000000000000..c9a75d1b4350a --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.36.0.jar.sha1 @@ -0,0 +1 @@ +4056d1b562b4da7720817d8af15d1d3ccdf4b776 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.34.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.34.1.jar.sha1 deleted file mode 100644 index 740737dc13efc..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.34.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -01fcd8bad38d7b8987f6fc93bd7e933240eb727e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.36.0.jar.sha1 new file mode 100644 index 0000000000000..c31584f59c0d8 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.36.0.jar.sha1 @@ -0,0 +1 @@ +11d6f8c7b029efcb5c6c449cadef155b781afb78 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.34.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.34.1.jar.sha1 deleted file mode 100644 index e6ff3dbafda22..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.34.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -abad9abc80dfe6118a60413afa161696bbf8dd43 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.36.0.jar.sha1 new file mode 100644 index 0000000000000..a134bb06ec635 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.36.0.jar.sha1 @@ -0,0 +1 @@ +98e94479db1e68c4779efc44bf6b4fca83e98b54 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.34.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.34.1.jar.sha1 deleted file mode 100644 index 36ec960c4f7be..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.34.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d88407ae475e5f4e859a81e4f61e362e939f7bc2 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.36.0.jar.sha1 new file mode 100644 index 0000000000000..d146241f52f29 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.36.0.jar.sha1 @@ -0,0 +1 @@ +4f8f5d30c3eeede7b2260d979d9f403cfa381c3d \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.34.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.34.1.jar.sha1 deleted file mode 100644 index 293b82f206c99..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.34.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -121a75c2ba9ed8b80f5ff131c2411a5d460f38d0 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.36.0.jar.sha1 new file mode 100644 index 0000000000000..802761e38846c --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.36.0.jar.sha1 @@ -0,0 +1 @@ +e3068cbaedfac6a28c6483923982b2efb861d3f4 \ No newline at end of file diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsDisabledSanityIT.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsDisabledSanityIT.java index bcdcb657c4f42..e77e69d121036 100644 --- a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsDisabledSanityIT.java +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsDisabledSanityIT.java @@ -15,6 +15,7 @@ import org.opensearch.telemetry.OTelTelemetrySettings; import org.opensearch.telemetry.TelemetrySettings; import org.opensearch.telemetry.metrics.noop.NoopCounter; +import org.opensearch.telemetry.metrics.noop.NoopHistogram; import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; import org.opensearch.test.OpenSearchIntegTestCase; @@ -53,10 +54,13 @@ public void testSanityChecksWhenMetricsDisabled() throws Exception { Counter counter = metricsRegistry.createCounter("test-counter", "test", "1"); counter.add(1.0); + Histogram histogram = metricsRegistry.createHistogram("test-histogram", "test", "1"); + Thread.sleep(2000); assertTrue(metricsRegistry instanceof NoopMetricsRegistry); assertTrue(counter instanceof NoopCounter); + assertTrue(histogram instanceof NoopHistogram); } } diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsEnabledSanityIT.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsEnabledSanityIT.java index ed341595d327d..90143d907cd99 100644 --- a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsEnabledSanityIT.java +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsEnabledSanityIT.java @@ -14,15 +14,22 @@ import org.opensearch.telemetry.IntegrationTestOTelTelemetryPlugin; import org.opensearch.telemetry.OTelTelemetrySettings; import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.telemetry.metrics.tags.Tags; import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.After; +import java.io.Closeable; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Supplier; import java.util.stream.Collectors; import io.opentelemetry.sdk.metrics.data.DoublePointData; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableExponentialHistogramPointData; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, minNumDataNodes = 1) public class TelemetryMetricsEnabledSanityIT extends OpenSearchIntegTestCase { @@ -92,6 +99,66 @@ public void testUpDownCounter() throws Exception { assertEquals(-1.0, value, 0.0); } + public void testHistogram() throws Exception { + MetricsRegistry metricsRegistry = internalCluster().getInstance(MetricsRegistry.class); + InMemorySingletonMetricsExporter.INSTANCE.reset(); + + Histogram histogram = metricsRegistry.createHistogram("test-histogram", "test", "ms"); + histogram.record(2.0); + histogram.record(1.0); + histogram.record(3.0); + // Sleep for about 2s to wait for metrics to be published. + Thread.sleep(2000); + + InMemorySingletonMetricsExporter exporter = InMemorySingletonMetricsExporter.INSTANCE; + ImmutableExponentialHistogramPointData histogramPointData = ((ImmutableExponentialHistogramPointData) ((ArrayList) exporter + .getFinishedMetricItems() + .stream() + .filter(a -> a.getName().contains("test-histogram")) + .collect(Collectors.toList()) + .get(0) + .getExponentialHistogramData() + .getPoints()).get(0)); + assertEquals(1.0, histogramPointData.getSum(), 6.0); + assertEquals(1.0, histogramPointData.getMax(), 3.0); + assertEquals(1.0, histogramPointData.getMin(), 1.0); + } + + public void testGauge() throws Exception { + String metricName = "test-gauge"; + MetricsRegistry metricsRegistry = internalCluster().getInstance(MetricsRegistry.class); + InMemorySingletonMetricsExporter.INSTANCE.reset(); + Tags tags = Tags.create().addTag("test", "integ-test"); + final AtomicInteger testValue = new AtomicInteger(0); + Supplier<Double> valueProvider = () -> { return Double.valueOf(testValue.incrementAndGet()); }; + Closeable gaugeCloseable = metricsRegistry.createGauge(metricName, "test", "ms", valueProvider, tags); + // Sleep for about 2.2s to wait for metrics to be published. + Thread.sleep(2200); + + InMemorySingletonMetricsExporter exporter = InMemorySingletonMetricsExporter.INSTANCE; + + assertTrue(getMaxObservableGaugeValue(exporter, metricName) >= 2.0); + gaugeCloseable.close(); + double observableGaugeValueAfterStop = getMaxObservableGaugeValue(exporter, metricName); + + // Sleep for about 1.2s to wait for metrics to see that closed observableGauge shouldn't execute the callable. + Thread.sleep(1200); + assertEquals(observableGaugeValueAfterStop, getMaxObservableGaugeValue(exporter, metricName), 0.0); + + } + + private static double getMaxObservableGaugeValue(InMemorySingletonMetricsExporter exporter, String metricName) { + List<MetricData> dataPoints = exporter.getFinishedMetricItems() + .stream() + .filter(a -> a.getName().contains(metricName)) + .collect(Collectors.toList()); + double totalValue = 0; + for (MetricData metricData : dataPoints) { + totalValue = Math.max(totalValue, ((DoublePointData) metricData.getDoubleGaugeData().getPoints().toArray()[0]).getValue()); + } + return totalValue; + } + @After public void reset() { InMemorySingletonMetricsExporter.INSTANCE.reset(); diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java index 297ae8873636f..000fd09d43c18 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java @@ -53,7 +53,9 @@ public List<Setting<?>> getSettings() { OTelTelemetrySettings.TRACER_EXPORTER_DELAY_SETTING, OTelTelemetrySettings.TRACER_EXPORTER_MAX_QUEUE_SIZE_SETTING, OTelTelemetrySettings.OTEL_TRACER_SPAN_EXPORTER_CLASS_SETTING, - OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING + OTelTelemetrySettings.OTEL_TRACER_SPAN_SAMPLER_CLASS_SETTINGS, + OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING, + OTelTelemetrySettings.TRACER_SAMPLER_ACTION_PROBABILITY ); } diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetrySettings.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetrySettings.java index 8e23f724b4570..95ce6918fcb70 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetrySettings.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetrySettings.java @@ -13,15 +13,21 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.telemetry.metrics.exporter.OTelMetricsExporterFactory; import org.opensearch.telemetry.tracing.exporter.OTelSpanExporterFactory; +import org.opensearch.telemetry.tracing.sampler.OTelSamplerFactory; +import org.opensearch.telemetry.tracing.sampler.ProbabilisticSampler; +import org.opensearch.telemetry.tracing.sampler.ProbabilisticTransportActionSampler; import java.security.AccessController; import java.security.PrivilegedActionException; import java.security.PrivilegedExceptionAction; +import java.util.Arrays; +import java.util.List; import io.opentelemetry.exporter.logging.LoggingMetricExporter; import io.opentelemetry.exporter.logging.LoggingSpanExporter; import io.opentelemetry.sdk.metrics.export.MetricExporter; import io.opentelemetry.sdk.trace.export.SpanExporter; +import io.opentelemetry.sdk.trace.samplers.Sampler; /** * OTel specific telemetry settings. @@ -66,7 +72,7 @@ private OTelTelemetrySettings() {} /** * Span Exporter type setting. */ - @SuppressWarnings("unchecked") + @SuppressWarnings({ "unchecked", "removal" }) public static final Setting<Class<SpanExporter>> OTEL_TRACER_SPAN_EXPORTER_CLASS_SETTING = new Setting<>( "telemetry.otel.tracer.span.exporter.class", LoggingSpanExporter.class.getName(), @@ -90,7 +96,7 @@ private OTelTelemetrySettings() {} /** * Metrics Exporter type setting. */ - @SuppressWarnings("unchecked") + @SuppressWarnings({ "unchecked", "removal" }) public static final Setting<Class<MetricExporter>> OTEL_METRICS_EXPORTER_CLASS_SETTING = new Setting<>( "telemetry.otel.metrics.exporter.class", LoggingMetricExporter.class.getName(), @@ -110,4 +116,40 @@ private OTelTelemetrySettings() {} Setting.Property.NodeScope, Setting.Property.Final ); + + /** + * Samplers orders setting. + */ + @SuppressWarnings("unchecked") + public static final Setting<List<Class<Sampler>>> OTEL_TRACER_SPAN_SAMPLER_CLASS_SETTINGS = Setting.listSetting( + "telemetry.otel.tracer.span.sampler.classes", + Arrays.asList(ProbabilisticTransportActionSampler.class.getName(), ProbabilisticSampler.class.getName()), + sampler -> { + // Check we ourselves are not being called by unprivileged code. + SpecialPermission.check(); + try { + return AccessController.doPrivileged((PrivilegedExceptionAction<Class<Sampler>>) () -> { + final ClassLoader loader = OTelSamplerFactory.class.getClassLoader(); + return (Class<Sampler>) loader.loadClass(sampler); + }); + } catch (PrivilegedActionException ex) { + throw new IllegalStateException("Unable to load sampler class: " + sampler, ex.getCause()); + } + }, + Setting.Property.NodeScope, + Setting.Property.Final + ); + + /** + * Probability of action based sampler + */ + public static final Setting<Double> TRACER_SAMPLER_ACTION_PROBABILITY = Setting.doubleSetting( + "telemetry.tracer.action.sampler.probability", + 0.001d, + 0.000d, + 1.00d, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + } diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelHistogram.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelHistogram.java new file mode 100644 index 0000000000000..73bb0d8adff62 --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelHistogram.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.telemetry.OTelAttributesConverter; +import org.opensearch.telemetry.metrics.tags.Tags; + +import io.opentelemetry.api.metrics.DoubleHistogram; + +/** + * OTel aware implementation {@link Histogram} + */ +class OTelHistogram implements Histogram { + + private final DoubleHistogram otelDoubleHistogram; + + /** + * Constructor + * @param otelDoubleCounter delegate counter. + */ + public OTelHistogram(DoubleHistogram otelDoubleCounter) { + this.otelDoubleHistogram = otelDoubleCounter; + } + + @Override + public void record(double value) { + otelDoubleHistogram.record(value); + } + + @Override + public void record(double value, Tags tags) { + otelDoubleHistogram.record(value, OTelAttributesConverter.convert(tags)); + } +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetry.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetry.java index 6160e5106c041..6fe08040d7af5 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetry.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetry.java @@ -9,17 +9,22 @@ package org.opensearch.telemetry.metrics; import org.opensearch.common.concurrent.RefCountedReleasable; +import org.opensearch.telemetry.OTelAttributesConverter; import org.opensearch.telemetry.OTelTelemetryPlugin; +import org.opensearch.telemetry.metrics.tags.Tags; import java.io.Closeable; import java.io.IOException; import java.security.AccessController; import java.security.PrivilegedAction; +import java.util.function.Supplier; import io.opentelemetry.api.metrics.DoubleCounter; +import io.opentelemetry.api.metrics.DoubleHistogram; import io.opentelemetry.api.metrics.DoubleUpDownCounter; import io.opentelemetry.api.metrics.Meter; import io.opentelemetry.api.metrics.MeterProvider; +import io.opentelemetry.api.metrics.ObservableDoubleGauge; import io.opentelemetry.sdk.OpenTelemetrySdk; /** @@ -42,6 +47,7 @@ public OTelMetricsTelemetry(RefCountedReleasable<OpenTelemetrySdk> openTelemetry this.otelMeter = meterProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME); } + @SuppressWarnings("removal") @Override public Counter createCounter(String name, String description, String unit) { DoubleCounter doubleCounter = AccessController.doPrivileged( @@ -54,6 +60,7 @@ public Counter createCounter(String name, String description, String unit) { return new OTelCounter(doubleCounter); } + @SuppressWarnings("removal") @Override public Counter createUpDownCounter(String name, String description, String unit) { DoubleUpDownCounter doubleUpDownCounter = AccessController.doPrivileged( @@ -66,6 +73,34 @@ public Counter createUpDownCounter(String name, String description, String unit) return new OTelUpDownCounter(doubleUpDownCounter); } + /** + * Creates the Otel Histogram. In {@link org.opensearch.telemetry.tracing.OTelResourceProvider} + * we can configure the bucketing/aggregation strategy through view. Default startegy configured + * is the {@link io.opentelemetry.sdk.metrics.internal.view.Base2ExponentialHistogramAggregation}. + * @param name name of the histogram. + * @param description any description about the metric. + * @param unit unit of the metric. + * @return histogram + */ + @Override + public Histogram createHistogram(String name, String description, String unit) { + DoubleHistogram doubleHistogram = AccessController.doPrivileged( + (PrivilegedAction<DoubleHistogram>) () -> otelMeter.histogramBuilder(name).setUnit(unit).setDescription(description).build() + ); + return new OTelHistogram(doubleHistogram); + } + + @Override + public Closeable createGauge(String name, String description, String unit, Supplier<Double> valueProvider, Tags tags) { + ObservableDoubleGauge doubleObservableGauge = AccessController.doPrivileged( + (PrivilegedAction<ObservableDoubleGauge>) () -> otelMeter.gaugeBuilder(name) + .setUnit(unit) + .setDescription(description) + .buildWithCallback(record -> record.record(valueProvider.get(), OTelAttributesConverter.convert(tags))) + ); + return () -> doubleObservableGauge.close(); + } + @Override public void close() throws IOException { meterProvider.close(); diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/exporter/OTelMetricsExporterFactory.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/exporter/OTelMetricsExporterFactory.java index ef5a31e4003ca..9c548044484fd 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/exporter/OTelMetricsExporterFactory.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/exporter/OTelMetricsExporterFactory.java @@ -51,6 +51,7 @@ public static MetricExporter create(Settings settings) { return metricExporter; } + @SuppressWarnings("removal") private static MetricExporter instantiateExporter(Class<MetricExporter> exporterProviderClass) { try { // Check we ourselves are not being called by unprivileged code. diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java index 14a19f122c17b..475fc09d04bff 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java @@ -12,7 +12,7 @@ import org.opensearch.telemetry.TelemetrySettings; import org.opensearch.telemetry.metrics.exporter.OTelMetricsExporterFactory; import org.opensearch.telemetry.tracing.exporter.OTelSpanExporterFactory; -import org.opensearch.telemetry.tracing.sampler.ProbabilisticSampler; +import org.opensearch.telemetry.tracing.sampler.OTelSamplerFactory; import org.opensearch.telemetry.tracing.sampler.RequestSampler; import java.security.AccessController; @@ -23,8 +23,12 @@ import io.opentelemetry.api.trace.propagation.W3CTraceContextPropagator; import io.opentelemetry.context.propagation.ContextPropagators; import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.InstrumentSelector; +import io.opentelemetry.sdk.metrics.InstrumentType; import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.View; import io.opentelemetry.sdk.metrics.export.PeriodicMetricReader; +import io.opentelemetry.sdk.metrics.internal.view.Base2ExponentialHistogramAggregation; import io.opentelemetry.sdk.resources.Resource; import io.opentelemetry.sdk.trace.SdkTracerProvider; import io.opentelemetry.sdk.trace.export.BatchSpanProcessor; @@ -40,6 +44,7 @@ * This class encapsulates all OpenTelemetry related resources */ public final class OTelResourceProvider { + private OTelResourceProvider() {} /** @@ -48,13 +53,14 @@ private OTelResourceProvider() {} * @param settings cluster settings * @return OpenTelemetrySdk instance */ + @SuppressWarnings("removal") public static OpenTelemetrySdk get(TelemetrySettings telemetrySettings, Settings settings) { return AccessController.doPrivileged( (PrivilegedAction<OpenTelemetrySdk>) () -> get( settings, OTelSpanExporterFactory.create(settings), ContextPropagators.create(W3CTraceContextPropagator.getInstance()), - Sampler.parentBased(new RequestSampler(new ProbabilisticSampler(telemetrySettings))) + Sampler.parentBased(new RequestSampler(OTelSamplerFactory.create(telemetrySettings, settings))) ) ); } @@ -91,6 +97,10 @@ private static SdkMeterProvider createSdkMetricProvider(Settings settings, Resou .setInterval(TelemetrySettings.METRICS_PUBLISH_INTERVAL_SETTING.get(settings).getSeconds(), TimeUnit.SECONDS) .build() ) + .registerView( + InstrumentSelector.builder().setType(InstrumentType.HISTOGRAM).build(), + View.builder().setAggregation(Base2ExponentialHistogramAggregation.getDefault()).build() + ) .build(); } diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/exporter/OTelSpanExporterFactory.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/exporter/OTelSpanExporterFactory.java index da7ce5c47d9ca..e9d7e78882c7d 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/exporter/OTelSpanExporterFactory.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/exporter/OTelSpanExporterFactory.java @@ -51,6 +51,7 @@ public static SpanExporter create(Settings settings) { return spanExporter; } + @SuppressWarnings("removal") private static SpanExporter instantiateSpanExporter(Class<SpanExporter> spanExporterProviderClass) { try { // Check we ourselves are not being called by unprivileged code. diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/OTelSamplerFactory.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/OTelSamplerFactory.java new file mode 100644 index 0000000000000..b9d5c07a40cd8 --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/OTelSamplerFactory.java @@ -0,0 +1,96 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.sampler; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.SpecialPermission; +import org.opensearch.common.settings.Settings; +import org.opensearch.telemetry.OTelTelemetrySettings; +import org.opensearch.telemetry.TelemetrySettings; + +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; +import java.security.AccessController; +import java.security.PrivilegedExceptionAction; +import java.util.List; +import java.util.ListIterator; + +import io.opentelemetry.sdk.trace.samplers.Sampler; + +/** + * Factory class to create the instance of OTelSampler + */ +public class OTelSamplerFactory { + + /** + * Logger instance for logging messages related to the OTelSamplerFactory. + */ + private static final Logger logger = LogManager.getLogger(OTelSamplerFactory.class); + + /** + * Base constructor. + */ + private OTelSamplerFactory() { + + } + + /** + * Creates the {@link Sampler} instances based on the TRACER_SPAN_SAMPLER_CLASSES value. + * + * @param telemetrySettings TelemetrySettings. + * @param settings the settings + * @return list of samplers. + */ + public static Sampler create(TelemetrySettings telemetrySettings, Settings settings) { + List<Class<Sampler>> samplersNameList = OTelTelemetrySettings.OTEL_TRACER_SPAN_SAMPLER_CLASS_SETTINGS.get(settings); + ListIterator<Class<Sampler>> li = samplersNameList.listIterator(samplersNameList.size()); + + Sampler fallbackSampler = null; + + // Iterating samplers list in reverse order to create chain of sampler + while (li.hasPrevious()) { + Class<Sampler> samplerName = li.previous(); + fallbackSampler = instantiateSampler(samplerName, telemetrySettings, settings, fallbackSampler); + } + + return fallbackSampler; + } + + private static Sampler instantiateSampler( + Class<Sampler> samplerClassName, + TelemetrySettings telemetrySettings, + Settings settings, + Sampler fallbackSampler + ) { + try { + // Check we ourselves are not being called by unprivileged code. + SpecialPermission.check(); + + return AccessController.doPrivileged((PrivilegedExceptionAction<Sampler>) () -> { + try { + // Define the method type which receives TelemetrySettings & Sampler as arguments + MethodType methodType = MethodType.methodType(Sampler.class, TelemetrySettings.class, Settings.class, Sampler.class); + + return (Sampler) MethodHandles.publicLookup() + .findStatic(samplerClassName, "create", methodType) + .invokeExact(telemetrySettings, settings, fallbackSampler); + } catch (Throwable e) { + if (e.getCause() instanceof NoSuchMethodException) { + throw new IllegalStateException("No create method exist in [" + samplerClassName + "]", e.getCause()); + } else { + throw new IllegalStateException("Sampler instantiation failed for class [" + samplerClassName + "]", e.getCause()); + } + } + }); + } catch (Exception e) { + throw new IllegalStateException("Sampler instantiation failed for class [" + samplerClassName + "]", e.getCause()); + } + } +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticSampler.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticSampler.java index 774070aa39df6..d7fe92b1f3495 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticSampler.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticSampler.java @@ -8,6 +8,7 @@ package org.opensearch.telemetry.tracing.sampler; +import org.opensearch.common.settings.Settings; import org.opensearch.telemetry.TelemetrySettings; import java.util.List; @@ -18,14 +19,18 @@ import io.opentelemetry.context.Context; import io.opentelemetry.sdk.trace.data.LinkData; import io.opentelemetry.sdk.trace.samplers.Sampler; +import io.opentelemetry.sdk.trace.samplers.SamplingDecision; import io.opentelemetry.sdk.trace.samplers.SamplingResult; /** - * ProbabilisticSampler implements a head-based sampling strategy based on provided settings. + * ProbabilisticSampler implements a probability sampling strategy based on configured sampling ratio. */ public class ProbabilisticSampler implements Sampler { private Sampler defaultSampler; private final TelemetrySettings telemetrySettings; + private final Settings settings; + private final Sampler fallbackSampler; + private double samplingRatio; /** @@ -33,21 +38,24 @@ public class ProbabilisticSampler implements Sampler { * * @param telemetrySettings Telemetry settings. */ - public ProbabilisticSampler(TelemetrySettings telemetrySettings) { + private ProbabilisticSampler(TelemetrySettings telemetrySettings, Settings settings, Sampler fallbackSampler) { this.telemetrySettings = Objects.requireNonNull(telemetrySettings); + this.settings = Objects.requireNonNull(settings); this.samplingRatio = telemetrySettings.getSamplingProbability(); this.defaultSampler = Sampler.traceIdRatioBased(samplingRatio); + this.fallbackSampler = fallbackSampler; } - Sampler getSampler() { - double newSamplingRatio = telemetrySettings.getSamplingProbability(); - if (isSamplingRatioChanged(newSamplingRatio)) { - synchronized (this) { - this.samplingRatio = newSamplingRatio; - defaultSampler = Sampler.traceIdRatioBased(samplingRatio); - } - } - return defaultSampler; + /** + * Create probabilistic sampler. + * + * @param telemetrySettings the telemetry settings + * @param settings the settings + * @param fallbackSampler the fallback sampler + * @return the probabilistic sampler + */ + public static Sampler create(TelemetrySettings telemetrySettings, Settings settings, Sampler fallbackSampler) { + return new ProbabilisticSampler(telemetrySettings, settings, fallbackSampler); } private boolean isSamplingRatioChanged(double newSamplingRatio) { @@ -67,7 +75,19 @@ public SamplingResult shouldSample( Attributes attributes, List<LinkData> parentLinks ) { - return getSampler().shouldSample(parentContext, traceId, name, spanKind, attributes, parentLinks); + double newSamplingRatio = telemetrySettings.getSamplingProbability(); + if (isSamplingRatioChanged(newSamplingRatio)) { + synchronized (this) { + this.samplingRatio = newSamplingRatio; + defaultSampler = Sampler.traceIdRatioBased(samplingRatio); + } + } + final SamplingResult result = defaultSampler.shouldSample(parentContext, traceId, name, spanKind, attributes, parentLinks); + if (result.getDecision() != SamplingDecision.DROP && fallbackSampler != null) { + return fallbackSampler.shouldSample(parentContext, traceId, name, spanKind, attributes, parentLinks); + } else { + return result; + } } @Override diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticTransportActionSampler.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticTransportActionSampler.java new file mode 100644 index 0000000000000..93a8edaaaa760 --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticTransportActionSampler.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.sampler; + +import org.opensearch.common.settings.Settings; +import org.opensearch.telemetry.OTelTelemetrySettings; +import org.opensearch.telemetry.TelemetrySettings; + +import java.util.List; +import java.util.Objects; + +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.trace.SpanKind; +import io.opentelemetry.context.Context; +import io.opentelemetry.sdk.trace.data.LinkData; +import io.opentelemetry.sdk.trace.samplers.Sampler; +import io.opentelemetry.sdk.trace.samplers.SamplingDecision; +import io.opentelemetry.sdk.trace.samplers.SamplingResult; + +import static org.opensearch.telemetry.tracing.AttributeNames.TRANSPORT_ACTION; + +/** + * ProbabilisticTransportActionSampler sampler samples request with action based on defined probability + */ +public class ProbabilisticTransportActionSampler implements Sampler { + + private final Sampler fallbackSampler; + private Sampler actionSampler; + private final TelemetrySettings telemetrySettings; + private final Settings settings; + private double actionSamplingRatio; + + /** + * Creates ProbabilisticTransportActionSampler sampler + * @param telemetrySettings TelemetrySettings + */ + private ProbabilisticTransportActionSampler(TelemetrySettings telemetrySettings, Settings settings, Sampler fallbackSampler) { + this.telemetrySettings = Objects.requireNonNull(telemetrySettings); + this.settings = Objects.requireNonNull(settings); + this.actionSamplingRatio = OTelTelemetrySettings.TRACER_SAMPLER_ACTION_PROBABILITY.get(settings); + this.actionSampler = Sampler.traceIdRatioBased(actionSamplingRatio); + this.fallbackSampler = fallbackSampler; + } + + /** + * Create probabilistic transport action sampler. + * + * @param telemetrySettings the telemetry settings + * @param settings the settings + * @param fallbackSampler the fallback sampler + * @return the probabilistic transport action sampler + */ + public static Sampler create(TelemetrySettings telemetrySettings, Settings settings, Sampler fallbackSampler) { + return new ProbabilisticTransportActionSampler(telemetrySettings, settings, fallbackSampler); + } + + @Override + public SamplingResult shouldSample( + Context parentContext, + String traceId, + String name, + SpanKind spanKind, + Attributes attributes, + List<LinkData> parentLinks + ) { + final String action = attributes.get(AttributeKey.stringKey(TRANSPORT_ACTION)); + if (action != null) { + final SamplingResult result = actionSampler.shouldSample(parentContext, traceId, name, spanKind, attributes, parentLinks); + if (result.getDecision() != SamplingDecision.DROP && fallbackSampler != null) { + return fallbackSampler.shouldSample(parentContext, traceId, name, spanKind, attributes, parentLinks); + } + return result; + } + if (fallbackSampler != null) return fallbackSampler.shouldSample(parentContext, traceId, name, spanKind, attributes, parentLinks); + + return SamplingResult.drop(); + } + + double getSamplingRatio() { + return actionSamplingRatio; + } + + @Override + public String getDescription() { + return "Transport Action Sampler"; + } + + @Override + public String toString() { + return getDescription(); + } +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/RequestSampler.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/RequestSampler.java index 9ea681370a3ec..87c2849173aff 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/RequestSampler.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/RequestSampler.java @@ -18,21 +18,20 @@ import io.opentelemetry.sdk.trace.samplers.Sampler; import io.opentelemetry.sdk.trace.samplers.SamplingResult; +import static org.opensearch.telemetry.tracing.AttributeNames.TRACE; + /** - * HeadBased sampler + * RequestSampler based on HeadBased sampler */ public class RequestSampler implements Sampler { - private final Sampler defaultSampler; - - // TODO: Pick value of TRACE from PR #9415. - private static final String TRACE = "trace"; + private final Sampler fallbackSampler; /** - * Creates Head based sampler - * @param defaultSampler defaultSampler + * Creates request sampler which applies based on all applicable sampler + * @param fallbackSampler Sampler */ - public RequestSampler(Sampler defaultSampler) { - this.defaultSampler = defaultSampler; + public RequestSampler(Sampler fallbackSampler) { + this.fallbackSampler = fallbackSampler; } @Override @@ -44,15 +43,15 @@ public SamplingResult shouldSample( Attributes attributes, List<LinkData> parentLinks ) { - final String trace = attributes.get(AttributeKey.stringKey(TRACE)); if (trace != null) { return (Boolean.parseBoolean(trace) == true) ? SamplingResult.recordAndSample() : SamplingResult.drop(); - } else { - return defaultSampler.shouldSample(parentContext, traceId, name, spanKind, attributes, parentLinks); } - + if (fallbackSampler != null) { + return fallbackSampler.shouldSample(parentContext, traceId, name, spanKind, attributes, parentLinks); + } + return SamplingResult.recordAndSample(); } @Override diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/OTelTelemetryPluginTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/OTelTelemetryPluginTests.java index 2fcf89947e537..4a1301588dad2 100644 --- a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/OTelTelemetryPluginTests.java +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/OTelTelemetryPluginTests.java @@ -30,9 +30,11 @@ import static org.opensearch.telemetry.OTelTelemetryPlugin.OTEL_TRACER_NAME; import static org.opensearch.telemetry.OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING; import static org.opensearch.telemetry.OTelTelemetrySettings.OTEL_TRACER_SPAN_EXPORTER_CLASS_SETTING; +import static org.opensearch.telemetry.OTelTelemetrySettings.OTEL_TRACER_SPAN_SAMPLER_CLASS_SETTINGS; import static org.opensearch.telemetry.OTelTelemetrySettings.TRACER_EXPORTER_BATCH_SIZE_SETTING; import static org.opensearch.telemetry.OTelTelemetrySettings.TRACER_EXPORTER_DELAY_SETTING; import static org.opensearch.telemetry.OTelTelemetrySettings.TRACER_EXPORTER_MAX_QUEUE_SIZE_SETTING; +import static org.opensearch.telemetry.OTelTelemetrySettings.TRACER_SAMPLER_ACTION_PROBABILITY; import static org.opensearch.telemetry.TelemetrySettings.TRACER_ENABLED_SETTING; import static org.opensearch.telemetry.TelemetrySettings.TRACER_SAMPLER_PROBABILITY; @@ -69,7 +71,9 @@ public void testGetTelemetry() { TRACER_EXPORTER_DELAY_SETTING, TRACER_EXPORTER_MAX_QUEUE_SIZE_SETTING, OTEL_TRACER_SPAN_EXPORTER_CLASS_SETTING, - OTEL_METRICS_EXPORTER_CLASS_SETTING + OTEL_TRACER_SPAN_SAMPLER_CLASS_SETTINGS, + OTEL_METRICS_EXPORTER_CLASS_SETTING, + TRACER_SAMPLER_ACTION_PROBABILITY ), oTelTelemetryPlugin.getSettings() ); diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetryTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetryTests.java index 9de575b69774a..2e89a3c488d5c 100644 --- a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetryTests.java +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetryTests.java @@ -14,16 +14,25 @@ import org.opensearch.telemetry.metrics.tags.Tags; import org.opensearch.test.OpenSearchTestCase; +import java.io.Closeable; +import java.util.function.Consumer; + import io.opentelemetry.api.OpenTelemetry; import io.opentelemetry.api.metrics.DoubleCounter; import io.opentelemetry.api.metrics.DoubleCounterBuilder; +import io.opentelemetry.api.metrics.DoubleGaugeBuilder; +import io.opentelemetry.api.metrics.DoubleHistogram; +import io.opentelemetry.api.metrics.DoubleHistogramBuilder; import io.opentelemetry.api.metrics.DoubleUpDownCounter; import io.opentelemetry.api.metrics.DoubleUpDownCounterBuilder; import io.opentelemetry.api.metrics.LongCounterBuilder; import io.opentelemetry.api.metrics.LongUpDownCounterBuilder; import io.opentelemetry.api.metrics.Meter; import io.opentelemetry.api.metrics.MeterProvider; +import io.opentelemetry.api.metrics.ObservableDoubleGauge; +import org.mockito.Mockito; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -118,4 +127,57 @@ public void testUpDownCounter() { counter.add(-2.0, tags); verify(mockOTelUpDownDoubleCounter).add((-2.0), OTelAttributesConverter.convert(tags)); } + + @SuppressWarnings({ "rawtypes", "unchecked" }) + public void testHistogram() { + String histogramName = "test-histogram"; + String description = "test"; + String unit = "1"; + Meter mockMeter = mock(Meter.class); + OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); + DoubleHistogram mockOTelDoubleHistogram = mock(DoubleHistogram.class); + DoubleHistogramBuilder mockOTelDoubleHistogramBuilder = mock(DoubleHistogramBuilder.class); + MeterProvider meterProvider = mock(MeterProvider.class); + when(meterProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockMeter); + MetricsTelemetry metricsTelemetry = new OTelMetricsTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + meterProvider + ); + when(mockMeter.histogramBuilder(Mockito.contains(histogramName))).thenReturn(mockOTelDoubleHistogramBuilder); + when(mockOTelDoubleHistogramBuilder.setDescription(description)).thenReturn(mockOTelDoubleHistogramBuilder); + when(mockOTelDoubleHistogramBuilder.setUnit(unit)).thenReturn(mockOTelDoubleHistogramBuilder); + when(mockOTelDoubleHistogramBuilder.build()).thenReturn(mockOTelDoubleHistogram); + + Histogram histogram = metricsTelemetry.createHistogram(histogramName, description, unit); + histogram.record(1.0); + verify(mockOTelDoubleHistogram).record(1.0); + Tags tags = Tags.create().addTag("test", "test"); + histogram.record(2.0, tags); + verify(mockOTelDoubleHistogram).record(2.0, OTelAttributesConverter.convert(tags)); + } + + @SuppressWarnings({ "rawtypes", "unchecked" }) + public void testGauge() throws Exception { + String observableGaugeName = "test-gauge"; + String description = "test"; + String unit = "1"; + Meter mockMeter = mock(Meter.class); + OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); + ObservableDoubleGauge observableDoubleGauge = mock(ObservableDoubleGauge.class); + DoubleGaugeBuilder mockOTelDoubleGaugeBuilder = mock(DoubleGaugeBuilder.class); + MeterProvider meterProvider = mock(MeterProvider.class); + when(meterProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockMeter); + MetricsTelemetry metricsTelemetry = new OTelMetricsTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + meterProvider + ); + when(mockMeter.gaugeBuilder(Mockito.contains(observableGaugeName))).thenReturn(mockOTelDoubleGaugeBuilder); + when(mockOTelDoubleGaugeBuilder.setDescription(description)).thenReturn(mockOTelDoubleGaugeBuilder); + when(mockOTelDoubleGaugeBuilder.setUnit(unit)).thenReturn(mockOTelDoubleGaugeBuilder); + when(mockOTelDoubleGaugeBuilder.buildWithCallback(any(Consumer.class))).thenReturn(observableDoubleGauge); + + Closeable closeable = metricsTelemetry.createGauge(observableGaugeName, description, unit, () -> 1.0, Tags.EMPTY); + closeable.close(); + verify(observableDoubleGauge).close(); + } } diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/OTelSamplerFactoryTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/OTelSamplerFactoryTests.java new file mode 100644 index 0000000000000..39ccf299dfdc4 --- /dev/null +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/OTelSamplerFactoryTests.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.telemetry.tracing.sampler; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.telemetry.OTelTelemetrySettings; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Set; + +import io.opentelemetry.sdk.trace.samplers.Sampler; + +import static org.opensearch.telemetry.TelemetrySettings.TRACER_ENABLED_SETTING; +import static org.opensearch.telemetry.TelemetrySettings.TRACER_SAMPLER_PROBABILITY; + +public class OTelSamplerFactoryTests extends OpenSearchTestCase { + + public void testDefaultCreate() { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, Set.of(TRACER_SAMPLER_PROBABILITY, TRACER_ENABLED_SETTING)); + TelemetrySettings telemetrySettings = new TelemetrySettings(Settings.EMPTY, clusterSettings); + Sampler sampler = OTelSamplerFactory.create(telemetrySettings, Settings.EMPTY); + assertEquals(sampler.getClass(), ProbabilisticTransportActionSampler.class); + } + + public void testCreateWithSingleSampler() { + Settings settings = Settings.builder() + .put(OTelTelemetrySettings.OTEL_TRACER_SPAN_SAMPLER_CLASS_SETTINGS.getKey(), ProbabilisticSampler.class.getName()) + .build(); + + ClusterSettings clusterSettings = new ClusterSettings(settings, Set.of(TRACER_SAMPLER_PROBABILITY, TRACER_ENABLED_SETTING)); + TelemetrySettings telemetrySettings = new TelemetrySettings(settings, clusterSettings); + Sampler sampler = OTelSamplerFactory.create(telemetrySettings, settings); + assertTrue(sampler instanceof ProbabilisticSampler); + } +} diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticSamplerTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticSamplerTests.java index 639dc341ef0db..a094cd0119f5e 100644 --- a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticSamplerTests.java +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticSamplerTests.java @@ -15,18 +15,21 @@ import java.util.Set; +import io.opentelemetry.api.trace.SpanKind; +import io.opentelemetry.context.Context; import io.opentelemetry.sdk.trace.samplers.Sampler; import static org.opensearch.telemetry.OTelTelemetrySettings.TRACER_EXPORTER_DELAY_SETTING; import static org.opensearch.telemetry.TelemetrySettings.TRACER_ENABLED_SETTING; import static org.opensearch.telemetry.TelemetrySettings.TRACER_SAMPLER_PROBABILITY; +import static org.mockito.Mockito.mock; public class ProbabilisticSamplerTests extends OpenSearchTestCase { // When ProbabilisticSampler is created with OTelTelemetrySettings as null public void testProbabilisticSamplerWithNullSettings() { // Verify that the constructor throws IllegalArgumentException when given null settings - assertThrows(NullPointerException.class, () -> { new ProbabilisticSampler(null); }); + assertThrows(NullPointerException.class, () -> { ProbabilisticSampler.create(null, null, null); }); } public void testDefaultGetSampler() { @@ -37,10 +40,9 @@ public void testDefaultGetSampler() { ); // Probabilistic Sampler - ProbabilisticSampler probabilisticSampler = new ProbabilisticSampler(telemetrySettings); + Sampler probabilisticSampler = ProbabilisticSampler.create(telemetrySettings, Settings.EMPTY, null); - assertNotNull(probabilisticSampler.getSampler()); - assertEquals(0.01, probabilisticSampler.getSamplingRatio(), 0.0d); + assertEquals(0.01, ((ProbabilisticSampler) probabilisticSampler).getSamplingRatio(), 0.0d); } public void testGetSamplerWithUpdatedSamplingRatio() { @@ -51,14 +53,16 @@ public void testGetSamplerWithUpdatedSamplingRatio() { ); // Probabilistic Sampler - ProbabilisticSampler probabilisticSampler = new ProbabilisticSampler(telemetrySettings); - assertEquals(0.01d, probabilisticSampler.getSamplingRatio(), 0.0d); + Sampler probabilisticSampler = ProbabilisticSampler.create(telemetrySettings, Settings.EMPTY, null); + + assertEquals(0.01d, ((ProbabilisticSampler) probabilisticSampler).getSamplingRatio(), 0.0d); telemetrySettings.setSamplingProbability(0.02); + // Need to call shouldSample() to update the value of samplingRatio + probabilisticSampler.shouldSample(mock(Context.class), "00000000000000000000000000000000", "", SpanKind.INTERNAL, null, null); + // Need to call getSampler() to update the value of tracerHeadSamplerSamplingRatio - Sampler updatedProbabilisticSampler = probabilisticSampler.getSampler(); - assertEquals(0.02, probabilisticSampler.getSamplingRatio(), 0.0d); + assertEquals(0.02, ((ProbabilisticSampler) probabilisticSampler).getSamplingRatio(), 0.0d); } - } diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticTransportActionSamplerTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticTransportActionSamplerTests.java new file mode 100644 index 0000000000000..261b0252fef60 --- /dev/null +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticTransportActionSamplerTests.java @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.sampler; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Collections; +import java.util.Set; + +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.trace.SpanKind; +import io.opentelemetry.context.Context; +import io.opentelemetry.sdk.trace.samplers.Sampler; +import io.opentelemetry.sdk.trace.samplers.SamplingResult; + +import static org.opensearch.telemetry.TelemetrySettings.TRACER_ENABLED_SETTING; +import static org.opensearch.telemetry.TelemetrySettings.TRACER_SAMPLER_PROBABILITY; +import static org.opensearch.telemetry.tracing.AttributeNames.TRANSPORT_ACTION; +import static org.mockito.Mockito.mock; + +public class ProbabilisticTransportActionSamplerTests extends OpenSearchTestCase { + + public void testGetSamplerWithActionSamplingRatio() { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, Set.of(TRACER_SAMPLER_PROBABILITY, TRACER_ENABLED_SETTING)); + + TelemetrySettings telemetrySettings = new TelemetrySettings(Settings.EMPTY, clusterSettings); + + // ProbabilisticTransportActionSampler + Sampler probabilisticTransportActionSampler = ProbabilisticTransportActionSampler.create(telemetrySettings, Settings.EMPTY, null); + + SamplingResult result = probabilisticTransportActionSampler.shouldSample( + mock(Context.class), + "00000000000000000000000000000000", + "spanName", + SpanKind.INTERNAL, + Attributes.builder().put(TRANSPORT_ACTION, "dummy_action").build(), + Collections.emptyList() + ); + // Verify that ProbabilisticTransportActionSampler returned SamplingResult.recordAndSample() as all actions will be sampled + assertEquals(SamplingResult.recordAndSample(), result); + assertEquals(0.001, ((ProbabilisticTransportActionSampler) probabilisticTransportActionSampler).getSamplingRatio(), 0.000d); + } +} diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/RequestSamplerTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/RequestSamplerTests.java index facf04623ec46..da234ca13dc9d 100644 --- a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/RequestSamplerTests.java +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/RequestSamplerTests.java @@ -8,9 +8,14 @@ package org.opensearch.telemetry.tracing.sampler; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.telemetry.TelemetrySettings; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; import java.util.Collections; +import java.util.Set; import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.api.common.Attributes; @@ -19,29 +24,29 @@ import io.opentelemetry.sdk.trace.samplers.Sampler; import io.opentelemetry.sdk.trace.samplers.SamplingResult; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; +import static org.opensearch.telemetry.TelemetrySettings.TRACER_ENABLED_SETTING; +import static org.opensearch.telemetry.TelemetrySettings.TRACER_SAMPLER_PROBABILITY; +import static org.opensearch.telemetry.tracing.AttributeNames.TRANSPORT_ACTION; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; public class RequestSamplerTests extends OpenSearchTestCase { + private ClusterSettings clusterSettings; + private TelemetrySettings telemetrySettings; + private RequestSampler requestSampler; + private Context parentContext; + + @Before + public void init() { + clusterSettings = new ClusterSettings(Settings.EMPTY, Set.of(TRACER_SAMPLER_PROBABILITY, TRACER_ENABLED_SETTING)); + telemetrySettings = new TelemetrySettings(Settings.EMPTY, clusterSettings); + Sampler fallbackSampler = OTelSamplerFactory.create(telemetrySettings, Settings.EMPTY); + requestSampler = new RequestSampler(fallbackSampler); + parentContext = mock(Context.class); + } public void testShouldSampleWithTraceAttributeAsTrue() { - - // Create a mock default sampler - Sampler defaultSampler = mock(Sampler.class); - when(defaultSampler.shouldSample(any(), anyString(), anyString(), any(), any(), any())).thenReturn(SamplingResult.drop()); - - // Create an instance of HeadSampler with the mock default sampler - RequestSampler requestSampler = new RequestSampler(defaultSampler); - - // Create a mock Context and Attributes - Context parentContext = mock(Context.class); Attributes attributes = Attributes.of(AttributeKey.stringKey("trace"), "true"); - // Call shouldSample on HeadSampler SamplingResult result = requestSampler.shouldSample( parentContext, "traceId", @@ -50,43 +55,85 @@ public void testShouldSampleWithTraceAttributeAsTrue() { attributes, Collections.emptyList() ); - assertEquals(SamplingResult.recordAndSample(), result); + } + + public void testShouldSampleWithTraceAttributeAsFalse() { + Attributes attributes = Attributes.of(AttributeKey.stringKey("trace"), "false"); - // Verify that the default sampler's shouldSample method was not called - verify(defaultSampler, never()).shouldSample(any(), anyString(), anyString(), any(), any(), any()); + SamplingResult result = requestSampler.shouldSample( + parentContext, + "traceId", + "spanName", + SpanKind.INTERNAL, + attributes, + Collections.emptyList() + ); + assertEquals(SamplingResult.drop(), result); } - public void testShouldSampleWithoutTraceAttribute() { + public void testShouldSampleForProbabilisticSampler() { + clusterSettings.applySettings( + Settings.builder() + .put("telemetry.tracer.sampler.probability", "1.0") + .put("telemetry.otel.tracer.span.sampler.classes", "org.opensearch.telemetry.tracing.sampler.ProbabilisticSampler") + .build() + ); + + Attributes attributes = Attributes.builder().build(); + + SamplingResult result = requestSampler.shouldSample( + parentContext, + "00000000000000000000000000000000", + "spanName", + SpanKind.INTERNAL, + attributes, + Collections.emptyList() + ); - // Create a mock default sampler - Sampler defaultSampler = mock(Sampler.class); - when(defaultSampler.shouldSample(any(), anyString(), anyString(), any(), any(), any())).thenReturn( - SamplingResult.recordAndSample() + // Verify that request is sampled + assertEquals(SamplingResult.recordAndSample(), result); + + clusterSettings.applySettings(Settings.builder().put("telemetry.tracer.sampler.probability", "0.0").build()); + result = requestSampler.shouldSample( + parentContext, + "00000000000000000000000000000000", + "spanName", + SpanKind.INTERNAL, + attributes, + Collections.emptyList() ); + assertEquals(SamplingResult.drop(), result); - // Create an instance of HeadSampler with the mock default sampler - RequestSampler requestSampler = new RequestSampler(defaultSampler); + } - // Create a mock Context and Attributes + public void testShouldSampleForProbabilisticTransportActionSampler() { + clusterSettings.applySettings( + Settings.builder() + .put( + "telemetry.otel.tracer.span.sampler.classes", + "org.opensearch.telemetry.tracing.sampler.ProbabilisticTransportActionSampler" + ) + .build() + ); + clusterSettings.applySettings(Settings.builder().put("telemetry.tracer.action.sampler.probability", "1.0").build()); + + // Create a mock Context and Attributes with dummy action Context parentContext = mock(Context.class); - Attributes attributes = Attributes.empty(); + Attributes attributes = Attributes.builder().put(TRANSPORT_ACTION, "dummy_action").build(); - // Call shouldSample on HeadSampler + // Calling shouldSample to update samplingRatio SamplingResult result = requestSampler.shouldSample( parentContext, - "traceId", + "00000000000000000000000000000000", "spanName", SpanKind.INTERNAL, attributes, Collections.emptyList() ); - // Verify that HeadSampler returned SamplingResult.recordAndSample() + // Verify that request is sampled assertEquals(SamplingResult.recordAndSample(), result); - - // Verify that the default sampler's shouldSample method was called - verify(defaultSampler).shouldSample(any(), anyString(), anyString(), any(), any(), any()); } } diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.104.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.104.Final.jar.sha1 deleted file mode 100644 index 30f215e47f8ad..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -756797174b94a3aee11ce83522473f3c18287a43 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..beb44fc0f4cf9 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +8509a72b8a5a2d33d611e99254aed39765c3ad82 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.104.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.104.Final.jar.sha1 deleted file mode 100644 index 9ed9b896d4b4e..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f51fcfd3baac88b2c0b8dc715932ad5622d17429 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..4c74bb06fd83b --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +0a1d32debf2ed07c5852ab5b2904c43adb76c39e \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.104.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.104.Final.jar.sha1 deleted file mode 100644 index 478e7cfba1470..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2db1556de1b8dc07695604bf51a0a133263ad63f \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..38eb2e5bad80a --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +04d8e9e51b7254bd26a42fe17bdcae32e4c6ebb3 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.104.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.104.Final.jar.sha1 deleted file mode 100644 index 1b533eea3b3b3..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ec5fc4a7c5475eb20805e14f7274aa28872b5ba1 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..bbe91c6ccfb1d --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +4f17a547530d64becd7179507b25f4154bcfba57 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.104.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.104.Final.jar.sha1 deleted file mode 100644 index 70777be4dc636..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -18c0e659950cdef5f12c36eccfa14cbd2ad2049d \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..ba27b38632622 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +d4c6b05f4d9aca117981297fb7f02953102ebb5e \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.104.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.104.Final.jar.sha1 deleted file mode 100644 index d7c15af9312fe..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dfa4fe5c3a6eabb7af09902eb63266829876d8a2 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..3bc0f7b3fed09 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +dfee84308341a42131dd0f8ac0e1e02d627c19f3 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.104.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.104.Final.jar.sha1 deleted file mode 100644 index 5cacaf11a29ce..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da7b263b6fedc5add70e78ee8927c8bd2b9bb589 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..19419999300dd --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +d6a105c621b47d1410e0e09419d7209d2d46e914 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.104.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.104.Final.jar.sha1 deleted file mode 100644 index 30f215e47f8ad..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -756797174b94a3aee11ce83522473f3c18287a43 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..beb44fc0f4cf9 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +8509a72b8a5a2d33d611e99254aed39765c3ad82 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.104.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.104.Final.jar.sha1 deleted file mode 100644 index 9ed9b896d4b4e..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f51fcfd3baac88b2c0b8dc715932ad5622d17429 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..4c74bb06fd83b --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +0a1d32debf2ed07c5852ab5b2904c43adb76c39e \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.104.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.104.Final.jar.sha1 deleted file mode 100644 index 0232fc58f9357..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8e237ce67ab230ed1ba749d6651b278333c21b3f \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..d823de7ffadd4 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +945e8ad5ab7ec4f11fb0257d2594af0cfae1d4b7 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.104.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.104.Final.jar.sha1 deleted file mode 100644 index 478e7cfba1470..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2db1556de1b8dc07695604bf51a0a133263ad63f \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..38eb2e5bad80a --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +04d8e9e51b7254bd26a42fe17bdcae32e4c6ebb3 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 deleted file mode 100644 index f0242709f34f7..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5bb757929f7c4d1bf12740a378a99643caaad1ac \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..5b3d3311edc9f --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +3885ffe7dd05c9773df70c61009f34a5a8a383ec \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.104.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.104.Final.jar.sha1 deleted file mode 100644 index 1b533eea3b3b3..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ec5fc4a7c5475eb20805e14f7274aa28872b5ba1 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..bbe91c6ccfb1d --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +4f17a547530d64becd7179507b25f4154bcfba57 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.104.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.104.Final.jar.sha1 deleted file mode 100644 index 70777be4dc636..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -18c0e659950cdef5f12c36eccfa14cbd2ad2049d \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..ba27b38632622 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +d4c6b05f4d9aca117981297fb7f02953102ebb5e \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.104.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.104.Final.jar.sha1 deleted file mode 100644 index d7c15af9312fe..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dfa4fe5c3a6eabb7af09902eb63266829876d8a2 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..3bc0f7b3fed09 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +dfee84308341a42131dd0f8ac0e1e02d627c19f3 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.104.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.104.Final.jar.sha1 deleted file mode 100644 index 987b524aedc98..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f1210e5856fecb9182d58c0d33fa6e946b344b40 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..a62cb0fefcc40 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +d655d09e972dee46f580dbcf41c0d1356aea9e1b \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.104.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.104.Final.jar.sha1 deleted file mode 100644 index 5cacaf11a29ce..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da7b263b6fedc5add70e78ee8927c8bd2b9bb589 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..19419999300dd --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +d6a105c621b47d1410e0e09419d7209d2d46e914 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 deleted file mode 100644 index 64797bf11aedc..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d4da9f7237ac3ac292891e0b2d5364acbce128cf \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..407ecaffdad30 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +4d61d4959741109b3eccd7337f11fc89fa90a74a \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.13.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.13.jar.sha1 deleted file mode 100644 index 5eaf96739ed72..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -faea23e582978a34f6a932b81e86206ec2314990 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.17.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.17.jar.sha1 new file mode 100644 index 0000000000000..3d631bc904f24 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.17.jar.sha1 @@ -0,0 +1 @@ +319b1d41f28e92b31b7ca0f19183337f5539bb44 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.13.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.13.jar.sha1 deleted file mode 100644 index 091125169c696..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c5af7bc746050d080891a5446cca2c96a0c51d03 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.17.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.17.jar.sha1 new file mode 100644 index 0000000000000..9ceef6959744b --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.17.jar.sha1 @@ -0,0 +1 @@ +9ed949dcd050ef30d9eeedd53d95d1dce20ce832 \ No newline at end of file diff --git a/qa/evil-tests/src/test/java/org/opensearch/bootstrap/OpenSearchPolicyUnitTests.java b/qa/evil-tests/src/test/java/org/opensearch/bootstrap/OpenSearchPolicyUnitTests.java index db532f9a1c503..4ea23e415c994 100644 --- a/qa/evil-tests/src/test/java/org/opensearch/bootstrap/OpenSearchPolicyUnitTests.java +++ b/qa/evil-tests/src/test/java/org/opensearch/bootstrap/OpenSearchPolicyUnitTests.java @@ -50,6 +50,7 @@ * Unit tests for OpenSearchPolicy: these cannot run with security manager, * we don't allow messing with the policy */ +@SuppressWarnings("removal") public class OpenSearchPolicyUnitTests extends OpenSearchTestCase { /** * Test policy with null codesource. diff --git a/qa/evil-tests/src/test/java/org/opensearch/bootstrap/SystemCallFilterTests.java b/qa/evil-tests/src/test/java/org/opensearch/bootstrap/SystemCallFilterTests.java index 56d6c72705a78..99c9ee7e96d01 100644 --- a/qa/evil-tests/src/test/java/org/opensearch/bootstrap/SystemCallFilterTests.java +++ b/qa/evil-tests/src/test/java/org/opensearch/bootstrap/SystemCallFilterTests.java @@ -41,6 +41,7 @@ public class SystemCallFilterTests extends OpenSearchTestCase { /** command to try to run in tests */ static final String EXECUTABLE = Constants.WINDOWS ? "calc" : "ls"; + @SuppressWarnings("removal") @Override public void setUp() throws Exception { super.setUp(); diff --git a/qa/evil-tests/src/test/java/org/opensearch/plugins/PluginSecurityTests.java b/qa/evil-tests/src/test/java/org/opensearch/plugins/PluginSecurityTests.java index 430df1f899708..04eae95f6fe12 100644 --- a/qa/evil-tests/src/test/java/org/opensearch/plugins/PluginSecurityTests.java +++ b/qa/evil-tests/src/test/java/org/opensearch/plugins/PluginSecurityTests.java @@ -41,6 +41,7 @@ import static org.hamcrest.Matchers.containsInAnyOrder; /** Tests plugin manager security check */ +@SuppressWarnings("removal") public class PluginSecurityTests extends OpenSearchTestCase { /** Test that we can parse the set of permissions correctly for a simple policy */ diff --git a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/PluginInfoIT.java b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/PluginInfoIT.java new file mode 100644 index 0000000000000..d4e7017aab8c2 --- /dev/null +++ b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/PluginInfoIT.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.upgrades; + +import org.opensearch.client.Request; +import org.opensearch.client.Response; +import org.opensearch.test.rest.yaml.ObjectPath; + +import java.util.Map; + +public class PluginInfoIT extends AbstractFullClusterRestartTestCase { + public void testPluginInfoSerialization() throws Exception { + // Ensure all nodes are able to come up, validate with GET _nodes. + Response response = client().performRequest(new Request("GET", "_nodes")); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + final Map<String, Object> nodeMap = objectPath.evaluate("nodes"); + // Any issue in PluginInfo serialization logic will result into connection failures + // and hence reduced number of nodes. + assertEquals(2, nodeMap.keySet().size()); + } +} diff --git a/qa/logging-config/src/test/java/org/opensearch/qa/custom_logging/CustomLoggingConfigIT.java b/qa/logging-config/src/test/java/org/opensearch/qa/custom_logging/CustomLoggingConfigIT.java index 73c546b80d431..c2f799d7d48d2 100644 --- a/qa/logging-config/src/test/java/org/opensearch/qa/custom_logging/CustomLoggingConfigIT.java +++ b/qa/logging-config/src/test/java/org/opensearch/qa/custom_logging/CustomLoggingConfigIT.java @@ -62,6 +62,7 @@ public void testSuccessfulStartupWithCustomConfig() throws Exception { }); } + @SuppressWarnings("removal") private List<String> readAllLines(Path logFile) { return AccessController.doPrivileged((PrivilegedAction<List<String>>) () -> { try { diff --git a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/PluginInfoIT.java b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/PluginInfoIT.java new file mode 100644 index 0000000000000..47e454a7549cb --- /dev/null +++ b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/PluginInfoIT.java @@ -0,0 +1,26 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.backwards; + +import org.opensearch.client.Request; +import org.opensearch.client.Response; +import org.opensearch.test.rest.OpenSearchRestTestCase; +import org.opensearch.test.rest.yaml.ObjectPath; + +import java.util.Map; + +public class PluginInfoIT extends OpenSearchRestTestCase { + public void testPluginInfoSerialization() throws Exception { + // Ensure all nodes are able to come up, validate with GET _nodes. + Response response = client().performRequest(new Request("GET", "_nodes")); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + final Map<String, Object> nodeMap = objectPath.evaluate("nodes"); + assertEquals(4, nodeMap.keySet().size()); + } +} diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java index f963f8d221bb5..8e8734b5d62b3 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java @@ -40,10 +40,10 @@ import org.opensearch.common.Booleans; import org.opensearch.common.io.Streams; import org.opensearch.common.settings.Settings; +import org.opensearch.index.IndexSettings; import org.opensearch.index.codec.CodecService; import org.opensearch.index.engine.EngineConfig; import org.opensearch.indices.replication.common.ReplicationType; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.rest.yaml.ObjectPath; import java.io.IOException; @@ -262,7 +262,6 @@ public void testIndexing() throws Exception { * @throws Exception if index creation fail * @throws UnsupportedOperationException if cluster type is unknown */ - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/7679") public void testIndexingWithSegRep() throws Exception { if (UPGRADE_FROM_VERSION.before(Version.V_2_4_0)) { logger.info("--> Skip test for version {} where segment replication feature is not available", UPGRADE_FROM_VERSION); @@ -345,6 +344,88 @@ public void testIndexingWithSegRep() throws Exception { } } + public void testIndexingWithFuzzyFilterPostings() throws Exception { + if (UPGRADE_FROM_VERSION.onOrBefore(Version.V_2_11_1)) { + logger.info("--> Skip test for version {} where fuzzy filter postings format feature is not available", UPGRADE_FROM_VERSION); + return; + } + final String indexName = "test-index-fuzzy-set"; + final int shardCount = 3; + final int replicaCount = 1; + logger.info("--> Case {}", CLUSTER_TYPE); + printClusterNodes(); + logger.info("--> _cat/shards before test execution \n{}", EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/shards?v")).getEntity())); + switch (CLUSTER_TYPE) { + case OLD: + Settings.Builder settings = Settings.builder() + .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), shardCount) + .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), replicaCount) + .put( + EngineConfig.INDEX_CODEC_SETTING.getKey(), + randomFrom(new ArrayList<>(CODECS) { + { + add(CodecService.LUCENE_DEFAULT_CODEC); + } + }) + ) + .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms"); + createIndex(indexName, settings.build()); + waitForClusterHealthWithNoShardMigration(indexName, "green"); + bulk(indexName, "_OLD", 5); + break; + case MIXED: + waitForClusterHealthWithNoShardMigration(indexName, "yellow"); + break; + case UPGRADED: + Settings.Builder settingsBuilder = Settings.builder() + .put(IndexSettings.INDEX_DOC_ID_FUZZY_SET_ENABLED_SETTING.getKey(), true); + updateIndexSettings(indexName, settingsBuilder); + waitForClusterHealthWithNoShardMigration(indexName, "green"); + break; + default: + throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]"); + } + + int expectedCount; + switch (CLUSTER_TYPE) { + case OLD: + expectedCount = 5; + break; + case MIXED: + if (Booleans.parseBoolean(System.getProperty("tests.first_round"))) { + expectedCount = 5; + } else { + expectedCount = 10; + } + break; + case UPGRADED: + expectedCount = 15; + break; + default: + throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]"); + } + + assertCount(indexName, expectedCount); + + if (CLUSTER_TYPE != ClusterType.OLD) { + bulk(indexName, "_" + CLUSTER_TYPE, 5); + logger.info("--> Index one doc (to be deleted next) and verify doc count"); + Request toBeDeleted = new Request("PUT", "/" + indexName + "/_doc/to_be_deleted"); + toBeDeleted.addParameter("refresh", "true"); + toBeDeleted.setJsonEntity("{\"f1\": \"delete-me\"}"); + client().performRequest(toBeDeleted); + assertCount(indexName, expectedCount + 6); + + logger.info("--> Delete previously added doc and verify doc count"); + Request delete = new Request("DELETE", "/" + indexName + "/_doc/to_be_deleted"); + delete.addParameter("refresh", "true"); + client().performRequest(delete); + assertCount(indexName, expectedCount + 5); + + //forceMergeAndVerify(indexName, shardCount * (1 + replicaCount)); + } + } + public void testAutoIdWithOpTypeCreate() throws IOException { final String indexName = "auto_id_and_op_type_create_index"; StringBuilder b = new StringBuilder(); diff --git a/qa/unconfigured-node-name/src/test/java/org/opensearch/unconfigured_node_name/JsonLogsFormatAndParseIT.java b/qa/unconfigured-node-name/src/test/java/org/opensearch/unconfigured_node_name/JsonLogsFormatAndParseIT.java index d14c834405f32..8a6e5d62112c8 100644 --- a/qa/unconfigured-node-name/src/test/java/org/opensearch/unconfigured_node_name/JsonLogsFormatAndParseIT.java +++ b/qa/unconfigured-node-name/src/test/java/org/opensearch/unconfigured_node_name/JsonLogsFormatAndParseIT.java @@ -61,6 +61,7 @@ protected Matcher<String> nodeNameMatcher() { return equalTo(HOSTNAME); } + @SuppressWarnings("removal") @Override protected BufferedReader openReader(Path logFile) { return AccessController.doPrivileged((PrivilegedAction<BufferedReader>) () -> { diff --git a/release-notes/opensearch.release-notes-1.3.15.md b/release-notes/opensearch.release-notes-1.3.15.md new file mode 100644 index 0000000000000..a5b446ad1ec49 --- /dev/null +++ b/release-notes/opensearch.release-notes-1.3.15.md @@ -0,0 +1,5 @@ +## 2024-03-01 Version 1.3.15 Release Notes + +### Upgrades +- Bump `netty` from 4.1.100.Final to 4.1.107.Final ([#11294](https://github.com/opensearch-project/OpenSearch/pull/11294), [#11775](https://github.com/opensearch-project/OpenSearch/pull/11775)), [#12034](https://github.com/opensearch-project/OpenSearch/pull/12034), [#12372](https://github.com/opensearch-project/OpenSearch/pull/12372)) +- Bump `org.apache.kerby:kerb-admin` from 1.0.1 to 2.0.3 ([#12194](https://github.com/opensearch-project/OpenSearch/pull/12194)) diff --git a/release-notes/opensearch.release-notes-2.12.0.md b/release-notes/opensearch.release-notes-2.12.0.md new file mode 100644 index 0000000000000..49955c1f969f0 --- /dev/null +++ b/release-notes/opensearch.release-notes-2.12.0.md @@ -0,0 +1,175 @@ +## 2024-02-09 Version 2.12.0 Release Notes + +## [2.12.0] +### Added +- [Admission control] Add Resource usage collector service and resource usage tracker ([#9890](https://github.com/opensearch-project/OpenSearch/pull/9890)) +- [Admission control] Add enhancements to FS stats to include read/write time, queue size and IO time ([#10541](https://github.com/opensearch-project/OpenSearch/pull/10541)) +- [Remote cluster state] Change file names for remote cluster state ([#10557](https://github.com/opensearch-project/OpenSearch/pull/10557)) +- [Search Pipelines] Add request-scoped state shared between processors (and three new processors) ([#9405](https://github.com/opensearch-project/OpenSearch/pull/9405)) +- Per request phase latency ([#10351](https://github.com/opensearch-project/OpenSearch/issues/10351)) +- [Remote Store] Add repository stats for remote store([#10567](https://github.com/opensearch-project/OpenSearch/pull/10567)) +- [Remote cluster state] Upload global metadata in cluster state to remote store([#10404](https://github.com/opensearch-project/OpenSearch/pull/10404)) +- [Remote cluster state] Download functionality of global metadata from remote store ([#10535](https://github.com/opensearch-project/OpenSearch/pull/10535)) +- [Remote cluster state] Restore global metadata from remote store when local state is lost after quorum loss ([#10404](https://github.com/opensearch-project/OpenSearch/pull/10404)) +- [Remote cluster state] Make index and global metadata upload timeout dynamic cluster settings ([#10814](https://github.com/opensearch-project/OpenSearch/pull/10814)) +- Add search query categorizer ([#10255](https://github.com/opensearch-project/OpenSearch/pull/10255)) +- Per request phase latency ([#10351](https://github.com/opensearch-project/OpenSearch/issues/10351)) +- Add cluster state stats ([#10670](https://github.com/opensearch-project/OpenSearch/pull/10670)) +- Remove ingest processor supports excluding fields ([#10967](https://github.com/opensearch-project/OpenSearch/pull/10967), [#11983](https://github.com/opensearch-project/OpenSearch/pull/11983)) +- [Tiered caching] Enabling serialization for IndicesRequestCache key object ([#10275](https://github.com/opensearch-project/OpenSearch/pull/10275)) +- [Tiered caching] Defining interfaces, listeners and extending IndicesRequestCache with Tiered cache support ([#10753](https://github.com/opensearch-project/OpenSearch/pull/10753)) +- [Remote cluster state] Restore cluster state version during remote state auto restore ([#10853](https://github.com/opensearch-project/OpenSearch/pull/10853)) +- Update the indexRandom function to create more segments for concurrent search tests ([10247](https://github.com/opensearch-project/OpenSearch/pull/10247)) +- Add support for query profiler with concurrent aggregation ([#9248](https://github.com/opensearch-project/OpenSearch/pull/9248)) +- Introduce ConcurrentQueryProfiler to profile query using concurrent segment search path and support concurrency during rewrite and create weight ([10352](https://github.com/opensearch-project/OpenSearch/pull/10352)) +- Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679)) +- Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618)) +- [Streaming Indexing] Introduce new experimental server HTTP transport based on Netty 4 and Project Reactor (Reactor Netty) ([#9672](https://github.com/opensearch-project/OpenSearch/pull/9672)) +- Enable must_exist parameter for update aliases API ([#11210](https://github.com/opensearch-project/OpenSearch/pull/11210)) +- Add back half_float BKD based sort query optimization ([#11024](https://github.com/opensearch-project/OpenSearch/pull/11024)) +- Request level coordinator slow logs ([#10650](https://github.com/opensearch-project/OpenSearch/pull/10650)) +- Add template snippets support for field and target_field in KV ingest processor ([#10040](https://github.com/opensearch-project/OpenSearch/pull/10040)) +- Allowing pipeline processors to access index mapping info by passing ingest service ref as part of the processor factory parameters ([#10307](https://github.com/opensearch-project/OpenSearch/pull/10307)) +- Add experimental SIMD implementation of B-tree to round down dates ([#11194](https://github.com/opensearch-project/OpenSearch/issues/11194)) +- Make number of segment metadata files in remote segment store configurable ([#11329](https://github.com/opensearch-project/OpenSearch/pull/11329)) +- Allow changing number of replicas of searchable snapshot index ([#11317](https://github.com/opensearch-project/OpenSearch/pull/11317)) +- Adding slf4j license header to LoggerMessageFormat.java ([#11069](https://github.com/opensearch-project/OpenSearch/pull/11069)) +- [BWC and API enforcement] Introduce checks for enforcing the API restrictions ([#11175](https://github.com/opensearch-project/OpenSearch/pull/11175)) +- Maintainer approval check ([#11378](https://github.com/opensearch-project/OpenSearch/pull/11378)) +- Create separate transport action for render search template action ([#11170](https://github.com/opensearch-project/OpenSearch/pull/11170)) +- Add additional handling in SearchTemplateRequest when simulate is set to true ([#11591](https://github.com/opensearch-project/OpenSearch/pull/11591)) +- Introduce cluster level setting `cluster.index.restrict.replication.type` to prevent replication type setting override during index creations([#11583](https://github.com/opensearch-project/OpenSearch/pull/11583)) +- Add match_only_text field that is optimized for storage by trading off positional queries performance ([#6836](https://github.com/opensearch-project/OpenSearch/pull/11039)) +- Add copy ingest processor ([#11870](https://github.com/opensearch-project/OpenSearch/pull/11870)) +- Introduce new feature flag "WRITEABLE_REMOTE_INDEX" to gate the writeable remote index functionality ([#11717](https://github.com/opensearch-project/OpenSearch/pull/11170)) +- Bump OpenTelemetry from 1.32.0 to 1.34.1 ([#11891](https://github.com/opensearch-project/OpenSearch/pull/11891)) +- Add remove_by_pattern ingest processor ([#11920](https://github.com/opensearch-project/OpenSearch/pull/11920)) +- Support index level allocation filtering for searchable snapshot index ([#11563](https://github.com/opensearch-project/OpenSearch/pull/11563)) +- Add `org.opensearch.rest.MethodHandlers` and `RestController#getAllHandlers` ([11876](https://github.com/opensearch-project/OpenSearch/pull/11876)) +- New DateTime format for RFC3339 compatible date fields ([#11465](https://github.com/opensearch-project/OpenSearch/pull/11465)) +- Add community_id ingest processor ([#12121](https://github.com/opensearch-project/OpenSearch/pull/12121)) +- Add support for Google Application Default Credentials in repository-gcs ([#8394](https://github.com/opensearch-project/OpenSearch/pull/8394)) +- Remove concurrent segment search feature flag for GA launch ([#12074](https://github.com/opensearch-project/OpenSearch/pull/12074)) +- Enable Fuzzy codec for doc id fields using a bloom filter ([#11022](https://github.com/opensearch-project/OpenSearch/pull/11022)) +- [Metrics Framework] Adds support for Histogram metric ([#12062](https://github.com/opensearch-project/OpenSearch/pull/12062)) + +### Dependencies +- Bumps jetty version to 9.4.52.v20230823 to fix GMS-2023-1857 ([#9822](https://github.com/opensearch-project/OpenSearch/pull/9822)) +- Bump Lucene from 9.7.0 to 9.8.0 ([10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) +- Bump `commons-io:commons-io` from 2.13.0 to 2.15.1 ([#10294](https://github.com/opensearch-project/OpenSearch/pull/10294), [#11001](https://github.com/opensearch-project/OpenSearch/pull/11001), [#11002](https://github.com/opensearch-project/OpenSearch/pull/11002), [#11446](https://github.com/opensearch-project/OpenSearch/pull/11446), [#11554](https://github.com/opensearch-project/OpenSearch/pull/11554), [#11560](https://github.com/opensearch-project/OpenSearch/pull/11560), [#11796](https://github.com/opensearch-project/OpenSearch/pull/11796)) +- Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298)) +- Bump `com.netflix.nebula.ospackage-base` from 11.4.0 to 11.8.0 ([#10295](https://github.com/opensearch-project/OpenSearch/pull/10295), [#11630](https://github.com/opensearch-project/OpenSearch/pull/11630), [#12167](https://github.com/opensearch-project/OpenSearch/pull/12167)) +- Bump `org.apache.zookeeper:zookeeper` from 3.9.0 to 3.9.1 ([#10506](https://github.com/opensearch-project/OpenSearch/pull/10506)) +- Bump `de.thetaphi:forbiddenapis` from 3.5.1 to 3.6 ([#10508](https://github.com/opensearch-project/OpenSearch/pull/10508)) +- Bump `org.codehaus.woodstox:stax2-api` from 4.2.1 to 4.2.2 ([#10639](https://github.com/opensearch-project/OpenSearch/pull/10639)) +- Bump `org.bouncycastle:bc-fips` from 1.0.2.3 to 1.0.2.4 ([#10297](https://github.com/opensearch-project/OpenSearch/pull/10297)) +- Bump `com.google.http-client:google-http-client` from 1.43.2 to 1.43.3 ([#10635](https://github.com/opensearch-project/OpenSearch/pull/10635)) +- Bump `com.squareup.okio:okio` from 3.5.0 to 3.7.0 ([#10637](https://github.com/opensearch-project/OpenSearch/pull/10637), [#11632](https://github.com/opensearch-project/OpenSearch/pull/11632)) +- Bump `org.apache.logging.log4j:log4j-core` from 2.20.0 to 2.22.1 ([#10858](https://github.com/opensearch-project/OpenSearch/pull/10858), [#11000](https://github.com/opensearch-project/OpenSearch/pull/11000), [#11270](https://github.com/opensearch-project/OpenSearch/pull/11270), [#11695](https://github.com/opensearch-project/OpenSearch/pull/11695)) +- Bump `aws-actions/configure-aws-credentials` from 2 to 4 ([#10504](https://github.com/opensearch-project/OpenSearch/pull/10504)) +- Bump `stefanzweifel/git-auto-commit-action` from 4 to 5 ([#11171](https://github.com/opensearch-project/OpenSearch/pull/11171)) +- Bump `actions/github-script` from 6 to 7.0.1 ([#11271](https://github.com/opensearch-project/OpenSearch/pull/11271), [#12166](https://github.com/opensearch-project/OpenSearch/pull/12166)) +- Bump `jackson` and `jackson_databind` from 2.15.2 to 2.16.0 ([#11273](https://github.com/opensearch-project/OpenSearch/pull/11273)) +- Bump `netty` from 4.1.100.Final to 4.1.106.Final ([#11294](https://github.com/opensearch-project/OpenSearch/pull/11294), [#11775](https://github.com/opensearch-project/OpenSearch/pull/11775)), [#12034](https://github.com/opensearch-project/OpenSearch/pull/12034)) +- Bump `com.avast.gradle:gradle-docker-compose-plugin` from 0.16.12 to 0.17.6 ([#10163](https://github.com/opensearch-project/OpenSearch/pull/10163), [#11692](https://github.com/opensearch-project/OpenSearch/pull/11692)) +- Bump `com.squareup.okhttp3:okhttp` from 4.11.0 to 4.12.0 ([#10861](https://github.com/opensearch-project/OpenSearch/pull/10861)) +- Bump `org.apache.commons:commons-text` from 1.10.0 to 1.11.0 ([#11344](https://github.com/opensearch-project/OpenSearch/pull/11344)) +- Bump `reactor-netty-core` from 1.1.12 to 1.1.15 ([#11350](https://github.com/opensearch-project/OpenSearch/pull/11350)), ([#12042](https://github.com/opensearch-project/OpenSearch/pull/12042)) +- Bump `com.gradle.enterprise` from 3.14.1 to 3.16.2 ([#11339](https://github.com/opensearch-project/OpenSearch/pull/11339), [#11629](https://github.com/opensearch-project/OpenSearch/pull/11629), [#12056](https://github.com/opensearch-project/OpenSearch/pull/12056)) +- Bump `actions/setup-java` from 3 to 4 ([#11447](https://github.com/opensearch-project/OpenSearch/pull/11447)) +- Bump `commons-net:commons-net` from 3.9.0 to 3.10.0 ([#11450](https://github.com/opensearch-project/OpenSearch/pull/11450)) +- Bump `org.apache.maven:maven-model` from 3.9.4 to 3.9.6 ([#11445](https://github.com/opensearch-project/OpenSearch/pull/11445)) +- Bump `org.apache.xmlbeans:xmlbeans` from 5.1.1 to 5.2.0 ([#11448](https://github.com/opensearch-project/OpenSearch/pull/11448)) +- Bump `logback-core` and `logback-classic` to 1.2.13 ([#11521](https://github.com/opensearch-project/OpenSearch/pull/11521)) +- Bumps `jetty` version from 9.4.52.v20230823 to 9.4.53.v20231009 ([#11539](https://github.com/opensearch-project/OpenSearch/pull/11539)) +- Bump `org.wiremock:wiremock-standalone` from 3.1.0 to 3.3.1 ([#11555](https://github.com/opensearch-project/OpenSearch/pull/11555)) +- Bump `org.apache.commons:commons-compress` from 1.24.0 to 1.25.0 ([#11556](https://github.com/opensearch-project/OpenSearch/pull/11556)) +- Bump `actions/stale` from 8 to 9 ([#11557](https://github.com/opensearch-project/OpenSearch/pull/11557)) +- Bump `com.netflix.nebula:nebula-publishing-plugin` from 20.3.0 to 21.0.0 ([#11671](https://github.com/opensearch-project/OpenSearch/pull/11671)) +- Bump `commons-cli:commons-cli` from 1.5.0 to 1.6.0 ([#10996](https://github.com/opensearch-project/OpenSearch/pull/10996)) +- Bump `com.maxmind.geoip2:geoip2` from 4.1.0 to 4.2.0 ([#11559](https://github.com/opensearch-project/OpenSearch/pull/11559)) +- Bump `org.apache.commons:commons-lang3` from 3.13.0 to 3.14.0 ([#11691](https://github.com/opensearch-project/OpenSearch/pull/11691)) +- Bump `com.maxmind.db:maxmind-db` from 3.0.0 to 3.1.0 ([#11693](https://github.com/opensearch-project/OpenSearch/pull/11693)) +- Bump `net.java.dev.jna:jna` from 5.13.0 to 5.14.0 ([#11798](https://github.com/opensearch-project/OpenSearch/pull/11798)) +- Bump `lycheeverse/lychee-action` from 1.8.0 to 1.9.1 ([#11795](https://github.com/opensearch-project/OpenSearch/pull/11795), [#11887](https://github.com/opensearch-project/OpenSearch/pull/11887)) +- Bump `Lucene` from 9.8.0 to 9.9.2 ([#11421](https://github.com/opensearch-project/OpenSearch/pull/11421)), ([#12063](https://github.com/opensearch-project/OpenSearch/pull/12063)) +- Bump `com.networknt:json-schema-validator` from 1.0.86 to 1.2.0 ([#11886](https://github.com/opensearch-project/OpenSearch/pull/11886), [#11963](https://github.com/opensearch-project/OpenSearch/pull/11963)) +- Bump `com.google.api:gax-httpjson` from 0.103.1 to 2.42.0 ([#11794](https://github.com/opensearch-project/OpenSearch/pull/11794), [#12165](https://github.com/opensearch-project/OpenSearch/pull/12165)) +- Bump `com.google.oauth-client:google-oauth-client` from 1.34.1 to 1.35.0 ([#11960](https://github.com/opensearch-project/OpenSearch/pull/11960)) +- Bump `com.diffplug.spotless` from 6.23.2 to 6.25.0 ([#11962](https://github.com/opensearch-project/OpenSearch/pull/11962), [#12055](https://github.com/opensearch-project/OpenSearch/pull/12055)) +- Bump `com.google.cloud:google-cloud-core` from 2.5.10 to 2.30.0 ([#11961](https://github.com/opensearch-project/OpenSearch/pull/11961)) +- Bump `reactor-core` from 3.5.11 to 3.5.14 ([#12042](https://github.com/opensearch-project/OpenSearch/pull/12042)) +- Bump `org.apache.shiro:shiro-core` from 1.11.0 to 1.13.0 ([#12200](https://github.com/opensearch-project/OpenSearch/pull/12200)) +- Bump `com.google.http-client:google-http-client-jackson2` from 1.43.3 to 1.44.1 ([#12059](https://github.com/opensearch-project/OpenSearch/pull/12059)) +- Bump `peter-evans/create-issue-from-file` from 4 to 5 ([#12057](https://github.com/opensearch-project/OpenSearch/pull/12057)) +- Bump `org.gradle.test-retry` from 1.5.4 to 1.5.8 ([#12168](https://github.com/opensearch-project/OpenSearch/pull/12168)) +- Bump `org.apache.kerby:kerb-admin` from 1.0.1 to 2.0.3 ([#12194](https://github.com/opensearch-project/OpenSearch/pull/12194)) + +### Changed +- Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) +- Force merge with `only_expunge_deletes` honors max segment size ([#10036](https://github.com/opensearch-project/OpenSearch/pull/10036)) +- Add the means to extract the contextual properties from HttpChannel, TcpCChannel and TrasportChannel without excessive typecasting ([#10562](https://github.com/opensearch-project/OpenSearch/pull/10562)), ([#11751](https://github.com/opensearch-project/OpenSearch/pull/11751)) +- Introduce new dynamic cluster setting to control slice computation for concurrent segment search ([#9107](https://github.com/opensearch-project/OpenSearch/pull/9107)) +- Search pipelines now support asynchronous request and response processors to avoid blocking on a transport thread ([#10598](https://github.com/opensearch-project/OpenSearch/pull/10598)) +- [Remote Store] Add Remote Store backpressure rejection stats to `_nodes/stats` ([#10524](https://github.com/opensearch-project/OpenSearch/pull/10524)) +- [BUG] Fix java.lang.SecurityException in repository-gcs plugin ([#10642](https://github.com/opensearch-project/OpenSearch/pull/10642)) +- Add telemetry tracer/metric enable flag and integ test. ([#10395](https://github.com/opensearch-project/OpenSearch/pull/10395)) +- Performance improvement for Datetime field caching ([#4558](https://github.com/opensearch-project/OpenSearch/issues/4558)) +- Add instrumentation for indexing in transport bulk action and transport shard bulk action. ([#10273](https://github.com/opensearch-project/OpenSearch/pull/10273)) +- Disallow removing some metadata fields by remove ingest processor ([#10895](https://github.com/opensearch-project/OpenSearch/pull/10895), [#11607](https://github.com/opensearch-project/OpenSearch/pull/11607)) +- Performance improvement for MultiTerm Queries on Keyword fields ([#7057](https://github.com/opensearch-project/OpenSearch/issues/7057)) +- Refactor common parts from the Rounding class into a separate 'round' package ([#11023](https://github.com/opensearch-project/OpenSearch/issues/11023)) +- Performance improvement for date histogram aggregations without sub-aggregations ([#11083](https://github.com/opensearch-project/OpenSearch/pull/11083)) +- Apply the fast filter optimization to composite aggregation of date histogram source ([#11505](https://github.com/opensearch-project/OpenSearch/pull/11083)) +- Disable concurrent aggs for Diversified Sampler and Sampler aggs ([#11087](https://github.com/opensearch-project/OpenSearch/issues/11087)) +- Made leader/follower check timeout setting dynamic ([#10528](https://github.com/opensearch-project/OpenSearch/pull/10528)) +- Improved performance of numeric exact-match queries ([#11209](https://github.com/opensearch-project/OpenSearch/pull/11209)) +- Change error message when per shard document limit is breached ([#11312](https://github.com/opensearch-project/OpenSearch/pull/11312)) +- Improve boolean parsing performance ([#11308](https://github.com/opensearch-project/OpenSearch/pull/11308)) +- Interpret byte array as primitive using VarHandles ([#11362](https://github.com/opensearch-project/OpenSearch/pull/11362)) +- Automatically add scheme to discovery.ec2.endpoint ([#11512](https://github.com/opensearch-project/OpenSearch/pull/11512)) +- Restore support for Java 8 for RestClient ([#11562](https://github.com/opensearch-project/OpenSearch/pull/11562)) +- Add deleted doc count in _cat/shards ([#11678](https://github.com/opensearch-project/OpenSearch/pull/11678)) +- Capture information for additional query types and aggregation types ([#11582](https://github.com/opensearch-project/OpenSearch/pull/11582)) +- Use slice_size == shard_size heuristic in terms aggs for concurrent segment search and properly calculate the doc_count_error ([#11732](https://github.com/opensearch-project/OpenSearch/pull/11732)) +- Added Support for dynamically adding SearchRequestOperationsListeners with SearchRequestOperationsCompositeListenerFactory ([#11526](https://github.com/opensearch-project/OpenSearch/pull/11526)) +- Ensure Jackson default maximums introduced in 2.16.0 do not conflict with OpenSearch settings ([#11890](https://github.com/opensearch-project/OpenSearch/pull/11890)) +- Extract cluster management for integration tests into JUnit test rule out of OpenSearchIntegTestCase ([#11877](https://github.com/opensearch-project/OpenSearch/pull/11877)), ([#12000](https://github.com/opensearch-project/OpenSearch/pull/12000)) +- Workaround for https://bugs.openjdk.org/browse/JDK-8323659 regression, introduced in JDK-21.0.2 ([#11968](https://github.com/opensearch-project/OpenSearch/pull/11968)) +- Updates IpField to be searchable when only `doc_values` are enabled ([#11508](https://github.com/opensearch-project/OpenSearch/pull/11508)) +- [Query Insights] Query Insights Framework which currently supports retrieving the most time-consuming queries within the last configured time window ([#11903](https://github.com/opensearch-project/OpenSearch/pull/11903)) +- [Query Insights] Implement Top N Queries feature to collect and gather information about high latency queries in a window ([#11904](https://github.com/opensearch-project/OpenSearch/pull/11904)) +- Add override support for sampling based on action ([#9621](https://github.com/opensearch-project/OpenSearch/issues/9621)) +- Added custom sampler support based on transport action in request ([#9621](https://github.com/opensearch-project/OpenSearch/issues/9621)) +- Disable concurrent search for composite aggregation([#12375](https://github.com/opensearch-project/OpenSearch/pull/12375)) + +### Removed +- Remove deprecated classes for Rounding ([#10956](https://github.com/opensearch-project/OpenSearch/issues/10956)) + +### Fixed +- Fix failure in dissect ingest processor parsing empty brackets ([#9225](https://github.com/opensearch-project/OpenSearch/pull/9255)) +- Fix `class_cast_exception` when passing int to `_version` and other metadata fields in ingest simulate API ([#10101](https://github.com/opensearch-project/OpenSearch/pull/10101)) +- Fix Segment Replication ShardLockObtainFailedException bug during index corruption ([10370](https://github.com/opensearch-project/OpenSearch/pull/10370)) +- Fix some test methods in SimulatePipelineRequestParsingTests never run and fix test failure ([#10496](https://github.com/opensearch-project/OpenSearch/pull/10496)) +- Fix passing wrong parameter when calling newConfigurationException() in DotExpanderProcessor ([#10737](https://github.com/opensearch-project/OpenSearch/pull/10737)) +- Delegating CachingWeightWrapper#count to internal weight object ([#10543](https://github.com/opensearch-project/OpenSearch/pull/10543)) +- Fix per request latency last phase not tracked ([#10934](https://github.com/opensearch-project/OpenSearch/pull/10934)) +- Fix SuggestSearch.testSkipDuplicates by forcing refresh when indexing its test documents ([#11068](https://github.com/opensearch-project/OpenSearch/pull/11068)) +- [BUG] Fix the thread context that is not properly cleared and messes up the traces ([#10873](https://github.com/opensearch-project/OpenSearch/pull/10873)) +- Handle canMatchSearchAfter for frozen context scenario ([#11249](https://github.com/opensearch-project/OpenSearch/pull/11249)) +- Fix the issue with DefaultSpanScope restoring wrong span in the TracerContextStorage upon detach ([#11316](https://github.com/opensearch-project/OpenSearch/issues/11316)) +- Remove shadowJar from `lang-painless` module publication ([#11369](https://github.com/opensearch-project/OpenSearch/issues/11369)) +- Fix remote shards balancer and remove unused variables ([#11167](https://github.com/opensearch-project/OpenSearch/pull/11167)) +- Fix parsing of flat object fields with dots in keys ([#11425](https://github.com/opensearch-project/OpenSearch/pull/11425)) +- Fix bug where replication lag grows post primary relocation ([#11238](https://github.com/opensearch-project/OpenSearch/pull/11238)) +- Fix noop_update_total metric in indexing stats cannot be updated by bulk API ([#11485](https://github.com/opensearch-project/OpenSearch/pull/11485),[#11917](https://github.com/opensearch-project/OpenSearch/pull/11917)) +- Fix for stuck update action in a bulk with `retry_on_conflict` property ([#11152](https://github.com/opensearch-project/OpenSearch/issues/11152)) +- Fix template setting override for replication type ([#11417](https://github.com/opensearch-project/OpenSearch/pull/11417)) +- Fix Automatic addition of protocol broken in #11512 ([#11609](https://github.com/opensearch-project/OpenSearch/pull/11609)) +- Fix issue when calling Delete PIT endpoint and no PITs exist ([#11711](https://github.com/opensearch-project/OpenSearch/pull/11711)) +- Fix tracing context propagation for local transport instrumentation ([#11490](https://github.com/opensearch-project/OpenSearch/pull/11490)) +- Fix parsing of single line comments in `lang-painless` ([#11815](https://github.com/opensearch-project/OpenSearch/issues/11815)) +- Fix memory leak issue in ReorganizingLongHash ([#11953](https://github.com/opensearch-project/OpenSearch/issues/11953)) +- Prevent setting remote_snapshot store type on index creation ([#11867](https://github.com/opensearch-project/OpenSearch/pull/11867)) +- [BUG] Fix remote shards balancer when filtering throttled nodes ([#11724](https://github.com/opensearch-project/OpenSearch/pull/11724)) +- Add advance(int) for numeric values in order to allow point based optimization to kick in ([#12089](https://github.com/opensearch-project/OpenSearch/pull/12089)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.forcemerge.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.forcemerge.json index 02fbcc36dfe64..986bce55f41e5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.forcemerge.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.forcemerge.json @@ -63,6 +63,10 @@ "wait_for_completion": { "type" : "boolean", "description" : "If false, the request will return a task immediately and the operation will run in background. Defaults to true." + }, + "primary_only": { + "type" : "boolean", + "description" : "Specify whether the operation should only perform on primary shards. Defaults to false." } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json index e0fbeeb83ffc4..e78d49a67a98a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json @@ -229,6 +229,11 @@ "search_pipeline": { "type": "string", "description": "The search pipeline to use to execute this request" + }, + "include_named_queries_score":{ + "type": "boolean", + "description":"Indicates whether hit.matched_queries should be rendered as a map that includes the name of the matched query associated with its score (true) or as an array containing the name of the matched queries (false)", + "default":false } }, "body":{ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml index b572ed9e62ea9..29fbf55417961 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml @@ -1,7 +1,7 @@ "Help": - skip: version: " - 2.11.99" - reason: deleted docs added in 2.12.0 + reason: deleted docs and concurrent search are added in 2.12.0 features: node_selector - do: cat.shards: @@ -66,6 +66,10 @@ search.query_current .+ \n search.query_time .+ \n search.query_total .+ \n + search.concurrent_query_current .+ \n + search.concurrent_query_time .+ \n + search.concurrent_query_total .+ \n + search.concurrent_avg_slice_count .+ \n search.scroll_current .+ \n search.scroll_time .+ \n search.scroll_total .+ \n diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yml index 00ec838489f63..39c8040993f2a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yml @@ -11,6 +11,33 @@ / #node_name name active queue rejected ^ (\S+ \s+ \S+ \s+ \d+ \s+ \d+ \s+ \d+ \n)+ $/ + - do: + cat.thread_pool: + thread_pool_patterns: search,search_throttled,generic + h: name,total_wait_time,twt + v: true + + - match: + $body: | + /^ name \s+ total_wait_time \s+ twt \n + (generic \s+ -1 \s+ -1 \n + search \s+ \d*\.*\d*\D+ \s+ \d*\.*\d*\D+ \n + search_throttled \s+ \d*\.*\d*\D+ \s+ \d*\.*\d*\D+ \n)+ $/ + +--- +"Test cat thread_pool total_wait_time output with concurrent search thread_pool": + - skip: + version: " - 2.11.99" + reason: index_search thread_pool was introduced in V_2.12.0 + + - do: + cat.thread_pool: {} + + - match: + $body: | + / #node_name name active queue rejected + ^ (\S+ \s+ \S+ \s+ \d+ \s+ \d+ \s+ \d+ \n)+ $/ + - do: cat.thread_pool: thread_pool_patterns: search,search_throttled,index_searcher,generic @@ -21,6 +48,7 @@ $body: | /^ name \s+ total_wait_time \s+ twt \n (generic \s+ -1 \s+ -1 \n + index_searcher \s+ \d*\.*\d*\D+ \s+ \d*\.*\d*\D+ \n search \s+ \d*\.*\d*\D+ \s+ \d*\.*\d*\D+ \n search_throttled \s+ \d*\.*\d*\D+ \s+ \d*\.*\d*\D+ \n)+ $/ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/10_basic.yml index d62c4c8882b13..39fb1604d9596 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/10_basic.yml @@ -27,3 +27,23 @@ index: test max_num_segments: 10 only_expunge_deletes: true + +--- +"Test primary_only parameter": + - skip: + version: " - 2.99.99" + reason: "primary_only is available in 3.0+" + + - do: + indices.create: + index: test + body: + settings: + index.number_of_shards: 2 + index.number_of_replicas: 1 + + - do: + indices.forcemerge: + index: test + primary_only: true + - match: { _shards.total: 2 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/20_wait_for_completion.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/20_wait_for_completion.yml index 9561ecd89fdad..efa239547e84a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/20_wait_for_completion.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/20_wait_for_completion.yml @@ -25,7 +25,7 @@ wait_for_completion: true task_id: $taskId - match: { task.action: "indices:admin/forcemerge" } - - match: { task.description: "Force-merge indices [test_index], maxSegments[1], onlyExpungeDeletes[false], flush[true]" } + - match: { task.description: "Force-merge indices [test_index], maxSegments[1], onlyExpungeDeletes[false], flush[true], primaryOnly[false]" } # .tasks index is created when the force-merge operation completes, so we should delete .tasks index finally, # if not, the .tasks index may introduce unexpected warnings and then cause other test cases to fail. diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/20_missing_field.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/20_missing_field.yml index a36f807e63e0e..a65908b238013 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/20_missing_field.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/20_missing_field.yml @@ -1,9 +1,5 @@ --- "Return empty object if field doesn't exist, but index does": - - skip: - version: "all" - reason: "AwaitsFix https://github.com/opensearch-project/OpenSearch/issues/2440" - - do: indices.create: index: test_index @@ -18,7 +14,5 @@ indices.get_field_mapping: index: test_index fields: not_existent - ignore: 404 # ignore 404 failures for now - # see: https://github.com/opensearch-project/OpenSearch/issues/2440 - match: { 'test_index.mappings': {}} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/50_noop_update.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/50_noop_update.yml index dd8c2a2deb721..2192873623715 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/50_noop_update.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/50_noop_update.yml @@ -23,8 +23,8 @@ setup: --- "Test noop_update_total metric can be updated by both update API and bulk API": - skip: - version: " - 2.99.99" #TODO: change to 2.11.99 after the PR is backported to 2.x branch - reason: "fixed in 3.0" + version: " - 2.11.99" + reason: "fixed in 2.12.0" - do: update: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml index 09278690f5d05..2808be8cd7045 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml @@ -509,6 +509,134 @@ setup: - match: { aggregations.1.2.buckets.1.key.nested: 1000 } - match: { aggregations.1.2.buckets.1.doc_count: 1 } +--- +"Composite aggregation with filtered nested parent": + - skip: + version: " - 2.99.99" + reason: fixed in 3.0.0 + - do: + search: + rest_total_hits_as_int: true + index: test + body: + aggregations: + 1: + nested: + path: nested + aggs: + 2: + filter: + range: + nested.nested_long: + gt: 0 + lt: 100 + aggs: + 3: + composite: + sources: [ + "nested": { + "terms": { + "field": "nested.nested_long" + } + } + ] + + - match: {hits.total: 6} + - length: { aggregations.1.2.3.buckets: 2 } + - match: { aggregations.1.2.3.buckets.0.key.nested: 10 } + - match: { aggregations.1.2.3.buckets.0.doc_count: 2 } + - match: { aggregations.1.2.3.buckets.1.key.nested: 20 } + - match: { aggregations.1.2.3.buckets.1.doc_count: 2 } + - do: + search: + rest_total_hits_as_int: true + index: test + body: + aggregations: + 1: + nested: + path: nested + aggs: + 2: + filter: + range: + nested.nested_long: + gt: 0 + lt: 100 + aggs: + 3: + composite: + after: { "nested": 10 } + sources: [ + "nested": { + "terms": { + "field": "nested.nested_long" + } + } + ] + - match: {hits.total: 6} + - length: { aggregations.1.2.3.buckets: 1 } + - match: { aggregations.1.2.3.buckets.0.key.nested: 20 } + - match: { aggregations.1.2.3.buckets.0.doc_count: 2 } + +--- +"Composite aggregation with filtered reverse nested parent": + - skip: + version: " - 2.99.99" + reason: fixed in 3.0.0 + - do: + search: + rest_total_hits_as_int: true + index: test + body: + aggregations: + 1: + nested: + path: nested + aggs: + 2: + filter: + range: + nested.nested_long: + gt: 0 + lt: 20 + aggs: + 3: + reverse_nested: {} + aggs: + 4: + composite: + sources: [ + { + "long": { + "terms": { + "field": "long" + } + } + }, + { + "kw": { + "terms": { + "field": "keyword" + } + } + } + ] + - match: {hits.total: 6} + - length: { aggregations.1.2.3.4.buckets: 4 } + - match: { aggregations.1.2.3.4.buckets.0.key.long: 0 } + - match: { aggregations.1.2.3.4.buckets.0.key.kw: "bar" } + - match: { aggregations.1.2.3.4.buckets.0.doc_count: 1 } + - match: { aggregations.1.2.3.4.buckets.1.key.long: 10 } + - match: { aggregations.1.2.3.4.buckets.1.key.kw: "foo" } + - match: { aggregations.1.2.3.4.buckets.1.doc_count: 1 } + - match: { aggregations.1.2.3.4.buckets.2.key.long: 20 } + - match: { aggregations.1.2.3.4.buckets.2.key.kw: "foo" } + - match: { aggregations.1.2.3.4.buckets.2.doc_count: 1 } + - match: { aggregations.1.2.3.4.buckets.3.key.long: 100 } + - match: { aggregations.1.2.3.4.buckets.3.key.kw: "bar" } + - match: { aggregations.1.2.3.4.buckets.3.doc_count: 1 } + --- "Composite aggregation with unmapped field": - skip: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/30_max_analyzed_offset.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/30_max_analyzed_offset.yml index a18ac45e62175..39fbf9bbe970e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/30_max_analyzed_offset.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/30_max_analyzed_offset.yml @@ -78,3 +78,15 @@ setup: index: test1 body: {"query" : {"match" : {"field2" : "fox"}}, "highlight" : {"type" : "plain", "fields" : {"field2" : {}}}} - match: { error.root_cause.0.type: "illegal_argument_exception" } + +--- +"Plain highlighter on a field WITHOUT OFFSETS using max_analyzer_offset should SUCCEED": + - skip: + version: " - 2.12.99" + reason: only starting supporting the parameter max_analyzer_offset with plain highlighter on version 2.13 + - do: + search: + rest_total_hits_as_int: true + index: test1 + body: {"query" : {"match" : {"field1" : "quick"}}, "highlight" : {"type" : "plain", "fields" : {"field1" : {"max_analyzer_offset": 10}}}} + - match: {hits.hits.0.highlight.field1.0: "The <em>quick</em> "} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_doc_values_field.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_doc_values_field.yml index c7b00d5fbbef2..d5ece1719dc48 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_doc_values_field.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_doc_values_field.yml @@ -1,8 +1,3 @@ -setup: - - skip: - features: [ "headers" ] - version: " - 2.11.99" - reason: "searching with only doc_values was added in 2.12.0" --- "search on fields with both index and doc_values enabled": - do: @@ -47,6 +42,10 @@ setup: type: unsigned_long index: true doc_values: true + ip_field: + type: ip + index: true + doc_values: true - do: bulk: @@ -54,11 +53,11 @@ setup: refresh: true body: - '{"index": {"_index": "test-iodvq", "_id": "1" }}' - - '{ "some_keyword": "ingesting some random keyword data", "byte": 120, "double": 100.0, "float": "800.0", "half_float": "400.0", "integer": 1290, "long": 13456, "short": 150, "unsigned_long": 10223372036854775800 }' + - '{ "some_keyword": "ingesting some random keyword data", "byte": 120, "double": 100.0, "float": "800.0", "half_float": "400.0", "integer": 1290, "long": 13456, "short": 150, "unsigned_long": 10223372036854775800, "ip_field": "192.168.0.1" }' - '{ "index": { "_index": "test-iodvq", "_id": "2" }}' - - '{ "some_keyword": "400", "byte": 121, "double": 101.0, "float": "801.0", "half_float": "401.0", "integer": 1291, "long": 13457, "short": 151, "unsigned_long": 10223372036854775801 }' + - '{ "some_keyword": "400", "byte": 121, "double": 101.0, "float": "801.0", "half_float": "401.0", "integer": 1291, "long": 13457, "short": 151, "unsigned_long": 10223372036854775801, "ip_field": "192.168.0.2" }' - '{ "index": { "_index": "test-iodvq", "_id": "3" } }' - - '{ "some_keyword": "5", "byte": 122, "double": 102.0, "float": "802.0", "half_float": "402.0", "integer": 1292, "long": 13458, "short": 152, "unsigned_long": 10223372036854775802 }' + - '{ "some_keyword": "5", "byte": 122, "double": 102.0, "float": "802.0", "half_float": "402.0", "integer": 1292, "long": 13458, "short": 152, "unsigned_long": 10223372036854775802, "ip_field": "192.168.0.3" }' - do: search: @@ -162,7 +161,6 @@ setup: - match: { hits.total: 1 } - - do: search: rest_total_hits_as_int: true @@ -174,6 +172,16 @@ setup: - match: { hits.total: 1 } + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + term: + ip_field: "192.168.0.1" + + - match: {hits.total: 1} - do: search: @@ -186,7 +194,6 @@ setup: - match: { hits.total: 2 } - - do: search: rest_total_hits_as_int: true @@ -264,6 +271,17 @@ setup: - match: { hits.total: 2 } + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + terms: + ip_field: ["192.168.0.1", "192.168.0.2"] + + - match: { hits.total: 2 } + - do: search: rest_total_hits_as_int: true @@ -384,6 +402,19 @@ setup: - match: { hits.total: 2 } + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + range: + ip_field: + gte: "192.168.0.1" + lte: "192.168.0.2" + + - match: { hits.total: 2 } + --- "search on fields with only index enabled": - do: @@ -428,6 +459,10 @@ setup: type: unsigned_long index: true doc_values: false + ip_field: + type: ip + index: true + doc_values: false - do: bulk: @@ -435,11 +470,11 @@ setup: refresh: true body: - '{"index": {"_index": "test-index", "_id": "1" }}' - - '{ "some_keyword": "ingesting some random keyword data", "byte": 120, "double": 100.0, "float": "800.0", "half_float": "400.0", "integer": 1290, "long": 13456, "short": 150, "unsigned_long": 10223372036854775800 }' + - '{ "some_keyword": "ingesting some random keyword data", "byte": 120, "double": 100.0, "float": "800.0", "half_float": "400.0", "integer": 1290, "long": 13456, "short": 150, "unsigned_long": 10223372036854775800, "ip_field": "192.168.0.1" }' - '{ "index": { "_index": "test-index", "_id": "2" }}' - - '{ "some_keyword": "400", "byte": 121, "double": 101.0, "float": "801.0", "half_float": "401.0", "integer": 1291, "long": 13457, "short": 151, "unsigned_long": 10223372036854775801 }' + - '{ "some_keyword": "400", "byte": 121, "double": 101.0, "float": "801.0", "half_float": "401.0", "integer": 1291, "long": 13457, "short": 151, "unsigned_long": 10223372036854775801, "ip_field": "192.168.0.2" }' - '{ "index": { "_index": "test-index", "_id": "3" } }' - - '{ "some_keyword": "5", "byte": 122, "double": 102.0, "float": "802.0", "half_float": "402.0", "integer": 1292, "long": 13458, "short": 152, "unsigned_long": 10223372036854775802 }' + - '{ "some_keyword": "5", "byte": 122, "double": 102.0, "float": "802.0", "half_float": "402.0", "integer": 1292, "long": 13458, "short": 152, "unsigned_long": 10223372036854775802, "ip_field": "192.168.0.3" }' - do: search: @@ -465,7 +500,6 @@ setup: - match: { hits.total: 2 } - - do: search: rest_total_hits_as_int: true @@ -555,6 +589,16 @@ setup: - match: { hits.total: 1 } + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + term: + ip_field: "192.168.0.1" + + - match: {hits.total: 1} - do: search: @@ -567,7 +611,6 @@ setup: - match: { hits.total: 2 } - - do: search: rest_total_hits_as_int: true @@ -645,6 +688,17 @@ setup: - match: { hits.total: 2 } + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + terms: + ip_field: ["192.168.0.1", "192.168.0.2"] + + - match: { hits.total: 2 } + - do: search: rest_total_hits_as_int: true @@ -765,8 +819,24 @@ setup: - match: { hits.total: 2 } + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + range: + ip_field: + gte: "192.168.0.1" + lte: "192.168.0.2" + + - match: { hits.total: 2 } --- "search on fields with only doc_values enabled": + - skip: + features: [ "headers" ] + version: " - 2.99.99" + reason: "searching with only doc_values was added in 3.0.0" - do: indices.create: index: test-doc-values @@ -809,6 +879,10 @@ setup: type: unsigned_long index: false doc_values: true + ip_field: + type: ip + index: false + doc_values: true - do: bulk: @@ -816,11 +890,11 @@ setup: refresh: true body: - '{"index": {"_index": "test-doc-values", "_id": "1" }}' - - '{ "some_keyword": "ingesting some random keyword data", "byte": 120, "double": 100.0, "float": "800.0", "half_float": "400.0", "integer": 1290, "long": 13456, "short": 150, "unsigned_long": 10223372036854775800 }' + - '{ "some_keyword": "ingesting some random keyword data", "byte": 120, "double": 100.0, "float": "800.0", "half_float": "400.0", "integer": 1290, "long": 13456, "short": 150, "unsigned_long": 10223372036854775800, "ip_field": "192.168.0.1" }' - '{ "index": { "_index": "test-doc-values", "_id": "2" }}' - - '{ "some_keyword": "400", "byte": 121, "double": 101.0, "float": "801.0", "half_float": "401.0", "integer": 1291, "long": 13457, "short": 151, "unsigned_long": 10223372036854775801 }' + - '{ "some_keyword": "400", "byte": 121, "double": 101.0, "float": "801.0", "half_float": "401.0", "integer": 1291, "long": 13457, "short": 151, "unsigned_long": 10223372036854775801, "ip_field": "192.168.0.2" }' - '{ "index": { "_index": "test-doc-values", "_id": "3" } }' - - '{ "some_keyword": "5", "byte": 122, "double": 102.0, "float": "802.0", "half_float": "402.0", "integer": 1292, "long": 13458, "short": 152, "unsigned_long": 10223372036854775802 }' + - '{ "some_keyword": "5", "byte": 122, "double": 102.0, "float": "802.0", "half_float": "402.0", "integer": 1292, "long": 13458, "short": 152, "unsigned_long": 10223372036854775802, "ip_field": "192.168.0.3" }' - do: search: @@ -846,7 +920,6 @@ setup: - match: { hits.total: 2 } - - do: search: rest_total_hits_as_int: true @@ -924,7 +997,6 @@ setup: - match: { hits.total: 1 } - - do: search: rest_total_hits_as_int: true @@ -936,6 +1008,16 @@ setup: - match: { hits.total: 1 } + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + term: + ip_field: "192.168.0.3" + + - match: { hits.total: 1 } - do: search: @@ -948,7 +1030,6 @@ setup: - match: { hits.total: 2 } - - do: search: rest_total_hits_as_int: true @@ -1145,3 +1226,16 @@ setup: } - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + range: + ip_field: + gte: "192.168.0.1" + lte: "192.168.0.2" + + - match: { hits.total: 2 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/350_matched_queries.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/350_matched_queries.yml new file mode 100644 index 0000000000000..08a20df093c01 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/350_matched_queries.yml @@ -0,0 +1,103 @@ +setup: + - skip: + version: " - 2.12.99" + reason: "implemented for versions 2.13.0 and above" + +--- +"matched queries": + - do: + indices.create: + index: test + + - do: + bulk: + refresh: true + body: + - '{ "index" : { "_index" : "test_1", "_id" : "1" } }' + - '{"field" : 1 }' + - '{ "index" : { "_index" : "test_1", "_id" : "2" } }' + - '{"field" : [1, 2] }' + + - do: + search: + index: test_1 + body: + query: + bool: { + should: [ + { + match: { + field: { + query: 1, + _name: match_field_1 + } + } + }, + { + match: { + field: { + query: 2, + _name: match_field_2, + boost: 10 + } + } + } + ] + } + + - match: {hits.total.value: 2} + - length: {hits.hits.0.matched_queries: 2} + - match: {hits.hits.0.matched_queries: [ "match_field_1", "match_field_2" ]} + - length: {hits.hits.1.matched_queries: 1} + - match: {hits.hits.1.matched_queries: [ "match_field_1" ]} + +--- + +"matched queries with scores": + - do: + indices.create: + index: test + + - do: + bulk: + refresh: true + body: + - '{ "index" : { "_index" : "test_1", "_id" : "1" } }' + - '{"field" : 1 }' + - '{ "index" : { "_index" : "test_1", "_id" : "2" } }' + - '{"field" : [1, 2] }' + + - do: + search: + include_named_queries_score: true + index: test_1 + body: + query: + bool: { + should: [ + { + match: { + field: { + query: 1, + _name: match_field_1 + } + } + }, + { + match: { + field: { + query: 2, + _name: match_field_2, + boost: 10 + } + } + } + ] + } + + - match: { hits.total.value: 2 } + - length: { hits.hits.0.matched_queries: 2 } + - match: { hits.hits.0.matched_queries.match_field_1: 1 } + - match: { hits.hits.0.matched_queries.match_field_2: 10 } + - length: { hits.hits.1.matched_queries: 1 } + - match: { hits.hits.1.matched_queries.match_field_1: 1 } diff --git a/server/licenses/lucene-analysis-common-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-analysis-common-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..9cab77f4e7394 --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +6f0cfa07a5e4b36423e398cd1fd51c6825773d9c \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.9.1.jar.sha1 b/server/licenses/lucene-analysis-common-9.9.1.jar.sha1 deleted file mode 100644 index c9e6120da7497..0000000000000 --- a/server/licenses/lucene-analysis-common-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -24c8401b530308f9568eb7b408c2029c63f564c6 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-backward-codecs-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..2d5d1a281a0f0 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +ab201b997c8449db1ecd2fa88bd42d2f457286fa \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.9.1.jar.sha1 b/server/licenses/lucene-backward-codecs-9.9.1.jar.sha1 deleted file mode 100644 index 69ecf6aa68200..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -11c46007366bb037be7d271ab0a5849b1d544662 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-core-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..25beb34873c0c --- /dev/null +++ b/server/licenses/lucene-core-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +fe1cf5663be8bdb6aa757fd4101bc551684c90fb \ No newline at end of file diff --git a/server/licenses/lucene-core-9.9.1.jar.sha1 b/server/licenses/lucene-core-9.9.1.jar.sha1 deleted file mode 100644 index ae596196d9e6a..0000000000000 --- a/server/licenses/lucene-core-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -55249fa9a0ed321adcf8283c6f3b649a6812b0a9 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-grouping-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..27d4f0f5874e9 --- /dev/null +++ b/server/licenses/lucene-grouping-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +a988f92842e48195c75a49377432533c9170d93d \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.9.1.jar.sha1 b/server/licenses/lucene-grouping-9.9.1.jar.sha1 deleted file mode 100644 index e7df056400661..0000000000000 --- a/server/licenses/lucene-grouping-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2f2785e17c5c823cc8f41a7ddb4647aaca8ee773 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-highlighter-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..2545822f2ac7b --- /dev/null +++ b/server/licenses/lucene-highlighter-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +7c669e2c01565d3bdf175cd61a1e4d0bdfc44311 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.9.1.jar.sha1 b/server/licenses/lucene-highlighter-9.9.1.jar.sha1 deleted file mode 100644 index 828c7294aa586..0000000000000 --- a/server/licenses/lucene-highlighter-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -30928513461bf79a5cb057e84da7d34a1e53227d \ No newline at end of file diff --git a/server/licenses/lucene-join-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-join-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..fe44ad772335f --- /dev/null +++ b/server/licenses/lucene-join-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +633a6d44b4cde8e149daa3407e8b8f644eece951 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.9.1.jar.sha1 b/server/licenses/lucene-join-9.9.1.jar.sha1 deleted file mode 100644 index 34b44ca8c6ad5..0000000000000 --- a/server/licenses/lucene-join-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b9c8cc99632280148f92b4c0a64111c482d5d0ac \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-memory-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..85bfbe066ff56 --- /dev/null +++ b/server/licenses/lucene-memory-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +25390259c9e5592354efbc2f250bb396402016b2 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.9.1.jar.sha1 b/server/licenses/lucene-memory-9.9.1.jar.sha1 deleted file mode 100644 index b75fba4c331e9..0000000000000 --- a/server/licenses/lucene-memory-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -49f820b1b321860fa42a4f7583e8ed8f77b9c1c2 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-misc-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..4dba5837b66de --- /dev/null +++ b/server/licenses/lucene-misc-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +056f87a3d30c223b08d2f45fe465ddf11210b85f \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.9.1.jar.sha1 b/server/licenses/lucene-misc-9.9.1.jar.sha1 deleted file mode 100644 index f1e1e056004e9..0000000000000 --- a/server/licenses/lucene-misc-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -db7c30217602dfcda394a4d0f0a9e68140d385a6 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-queries-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..b6b8c441eefb1 --- /dev/null +++ b/server/licenses/lucene-queries-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +e640f850b4fb13190be8422fe74c14c9d6603bb5 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.9.1.jar.sha1 b/server/licenses/lucene-queries-9.9.1.jar.sha1 deleted file mode 100644 index 888b9b4a05ec8..0000000000000 --- a/server/licenses/lucene-queries-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d157547bd24edc8e9d9d59c273107dc3ac5fde5e \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-queryparser-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..d0e77b04db51a --- /dev/null +++ b/server/licenses/lucene-queryparser-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +8eb57762bf408fa51d7511f5e3b917627be61d1d \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.9.1.jar.sha1 b/server/licenses/lucene-queryparser-9.9.1.jar.sha1 deleted file mode 100644 index 1ce8a069a0f4e..0000000000000 --- a/server/licenses/lucene-queryparser-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -12d844fe224f6f97c510ac20d68903ed7f626f6c \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-sandbox-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..179df9f07a594 --- /dev/null +++ b/server/licenses/lucene-sandbox-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +615b4a90c5402084c2d5916a4c1fadc9d9177782 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.9.1.jar.sha1 b/server/licenses/lucene-sandbox-9.9.1.jar.sha1 deleted file mode 100644 index 14fd86dadc404..0000000000000 --- a/server/licenses/lucene-sandbox-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -272e588fd3d8c0a401b28a1ac715f27044bf62ec \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-spatial-extras-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..9b88b24c21b12 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +19b42cdb5f76f63dece3ef5128207ebdd3741d48 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.9.1.jar.sha1 b/server/licenses/lucene-spatial-extras-9.9.1.jar.sha1 deleted file mode 100644 index 0efd5a7595bfe..0000000000000 --- a/server/licenses/lucene-spatial-extras-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e066432e7ab02b2a4914f989bcd8c44adbf340ad \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-spatial3d-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..789ab1d52ea8c --- /dev/null +++ b/server/licenses/lucene-spatial3d-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +882691fe917e716fe6bcf8c0dd984b153495d015 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.9.1.jar.sha1 b/server/licenses/lucene-spatial3d-9.9.1.jar.sha1 deleted file mode 100644 index 7f06466e4c721..0000000000000 --- a/server/licenses/lucene-spatial3d-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fa54c9b962778e28ebc0efb9f75297781350361a \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-suggest-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..8cfaf60763724 --- /dev/null +++ b/server/licenses/lucene-suggest-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +ef6d483960f776d5dbdd1009863786ee09ba5707 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.9.1.jar.sha1 b/server/licenses/lucene-suggest-9.9.1.jar.sha1 deleted file mode 100644 index 06732480d1b6c..0000000000000 --- a/server/licenses/lucene-suggest-9.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9554de5b22ae7483b344b94a9a956960b7a5d49c \ No newline at end of file diff --git a/server/licenses/reactor-core-3.5.11.jar.sha1 b/server/licenses/reactor-core-3.5.11.jar.sha1 deleted file mode 100644 index e5ffdbc8a7840..0000000000000 --- a/server/licenses/reactor-core-3.5.11.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -db2299757f562261eb775d13658e86ff06f91e8a \ No newline at end of file diff --git a/server/licenses/reactor-core-3.5.15.jar.sha1 b/server/licenses/reactor-core-3.5.15.jar.sha1 new file mode 100644 index 0000000000000..02df47ed58b9d --- /dev/null +++ b/server/licenses/reactor-core-3.5.15.jar.sha1 @@ -0,0 +1 @@ +4e07a24c671235a2a806e75e9b8ff23d7d1db3d4 \ No newline at end of file diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/ReloadSecureSettingsIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/ReloadSecureSettingsIT.java index 5605e4872887a..c81d491719e4b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/ReloadSecureSettingsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/ReloadSecureSettingsIT.java @@ -449,6 +449,7 @@ public void onFailure(Exception e) { } } + @SuppressWarnings("removal") private SecureSettings writeEmptyKeystore(Environment environment, char[] password) throws Exception { final KeyStoreWrapper keyStoreWrapper = KeyStoreWrapper.create(); try { diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksIT.java index c4dcedcc722cf..bdb36b62ada21 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksIT.java @@ -52,7 +52,6 @@ import org.opensearch.common.action.ActionFuture; import org.opensearch.common.inject.Inject; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.util.set.Sets; @@ -69,7 +68,7 @@ import org.opensearch.tasks.TaskInfo; import org.opensearch.tasks.TaskManager; import org.opensearch.test.InternalTestCluster; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportResponseHandler; @@ -99,7 +98,7 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; -public class CancellableTasksIT extends ParameterizedOpenSearchIntegTestCase { +public class CancellableTasksIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { static int idGenerator = 0; static final Map<TestRequest, CountDownLatch> beforeSendLatches = ConcurrentCollections.newConcurrentMap(); @@ -107,8 +106,8 @@ public class CancellableTasksIT extends ParameterizedOpenSearchIntegTestCase { static final Map<TestRequest, CountDownLatch> beforeExecuteLatches = ConcurrentCollections.newConcurrentMap(); static final Map<TestRequest, CountDownLatch> completedLatches = ConcurrentCollections.newConcurrentMap(); - public CancellableTasksIT(Settings dynamicSettings) { - super(dynamicSettings); + public CancellableTasksIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -119,11 +118,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Before public void resetTestStates() { idGenerator = 0; diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/ConcurrentSearchTasksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/ConcurrentSearchTasksIT.java index e6fd9139d45f2..8b3c40c43e2d2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/ConcurrentSearchTasksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/ConcurrentSearchTasksIT.java @@ -15,9 +15,9 @@ import org.opensearch.common.settings.FeatureFlagSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.tasks.resourcetracker.ThreadResourceInfo; import org.opensearch.index.query.QueryBuilders; +import org.opensearch.search.SearchService; import org.opensearch.tasks.TaskInfo; import org.hamcrest.MatcherAssert; @@ -44,6 +44,7 @@ protected Settings nodeSettings(int nodeOrdinal) { .put(super.nodeSettings(nodeOrdinal)) .put("thread_pool.index_searcher.size", INDEX_SEARCHER_THREADS) .put("thread_pool.index_searcher.queue_size", INDEX_SEARCHER_THREADS) + .put(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true) .build(); } @@ -66,7 +67,6 @@ protected Settings featureFlagSettings() { for (Setting builtInFlag : FeatureFlagSettings.BUILT_IN_FEATURE_FLAGS) { featureSettings.put(builtInFlag.getKey(), builtInFlag.getDefaultRaw(Settings.EMPTY)); } - featureSettings.put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, true); return featureSettings.build(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java index a081110e6c5a1..f50e8fd0a38cf 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java @@ -50,6 +50,8 @@ import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; import org.opensearch.test.VersionUtils; +import java.util.concurrent.ExecutionException; + import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.equalTo; @@ -130,4 +132,61 @@ public void testCreateCloneIndex() { } + public void testCreateCloneIndexFailure() throws ExecutionException, InterruptedException { + Version version = VersionUtils.randomIndexCompatibleVersion(random()); + int numPrimaryShards = 1; + prepareCreate("source").setSettings( + Settings.builder().put(indexSettings()).put("number_of_shards", numPrimaryShards).put("index.version.created", version) + ).get(); + final int docs = 2; + for (int i = 0; i < docs; i++) { + client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + internalCluster().ensureAtLeastNumDataNodes(2); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + // relocate all shards to one node such that we can merge it. + client().admin().indices().prepareUpdateSettings("source").setSettings(Settings.builder().put("index.blocks.write", true)).get(); + ensureGreen(); + + // disable rebalancing to be able to capture the right stats. balancing can move the target primary + // making it hard to pin point the source shards. + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none")) + .get(); + try { + setFailRate(REPOSITORY_NAME, 100); + + client().admin() + .indices() + .prepareResizeIndex("source", "target") + .setResizeType(ResizeType.CLONE) + .setWaitForActiveShards(0) + .setSettings(Settings.builder().put("index.number_of_replicas", 0).putNull("index.blocks.write").build()) + .get(); + + Thread.sleep(2000); + ensureYellow("target"); + + } catch (ExecutionException | InterruptedException e) { + throw new RuntimeException(e); + } finally { + setFailRate(REPOSITORY_NAME, 0); + ensureGreen(); + // clean up + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), (String) null) + ) + .get(); + } + + } + } diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeIT.java index 09af533292e9a..5090af1706d5a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeIT.java @@ -100,6 +100,24 @@ public void testForceMergeUUIDConsistent() throws IOException { assertThat(primaryForceMergeUUID, is(replicaForceMergeUUID)); } + public void testForceMergeOnlyOnPrimaryShards() throws IOException { + internalCluster().ensureAtLeastNumDataNodes(2); + final String index = "test-index"; + createIndex( + index, + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build() + ); + ensureGreen(index); + final ForceMergeResponse forceMergeResponse = client().admin() + .indices() + .prepareForceMerge(index) + .setMaxNumSegments(1) + .setPrimaryOnly(true) + .get(); + assertThat(forceMergeResponse.getFailedShards(), is(0)); + assertThat(forceMergeResponse.getSuccessfulShards(), is(1)); + } + private static String getForceMergeUUID(IndexShard indexShard) throws IOException { try (GatedCloseable<IndexCommit> wrappedIndexCommit = indexShard.acquireLastIndexCommit(true)) { return wrappedIndexCommit.get().getUserData().get(Engine.FORCE_MERGE_UUID_KEY); diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/view/ViewIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/view/ViewIT.java new file mode 100644 index 0000000000000..85c70e098652c --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/view/ViewIT.java @@ -0,0 +1,154 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.cluster.metadata.View; +import org.opensearch.index.IndexNotFoundException; +import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; +import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import org.hamcrest.MatcherAssert; + +import java.util.List; + +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; + +@ClusterScope(scope = Scope.TEST, numDataNodes = 2) +public class ViewIT extends ViewTestBase { + + public void testCreateView() throws Exception { + final String viewName = randomAlphaOfLength(8); + final String indexPattern = randomAlphaOfLength(8); + + logger.info("Testing createView with valid parameters"); + final View view = createView(viewName, indexPattern).getView(); + MatcherAssert.assertThat(view.getName(), is(viewName)); + MatcherAssert.assertThat(view.getTargets().size(), is(1)); + MatcherAssert.assertThat(view.getTargets().first().getIndexPattern(), is(indexPattern)); + + logger.info("Testing createView with existing view name"); + final Exception ex = assertThrows(ViewAlreadyExistsException.class, () -> createView(viewName, randomAlphaOfLength(8))); + MatcherAssert.assertThat(ex.getMessage(), is("View [" + viewName + "] already exists")); + } + + public void testCreateViewTargetsSet() throws Exception { + final String viewName = randomAlphaOfLength(8); + final String indexPattern = "a" + randomAlphaOfLength(8); + final String indexPattern2 = "b" + randomAlphaOfLength(8); + final List<String> targetPatterns = List.of(indexPattern2, indexPattern, indexPattern); + + logger.info("Testing createView with targets that will be reordered and deduplicated"); + final View view = createView(viewName, targetPatterns).getView(); + MatcherAssert.assertThat(view.getName(), is(viewName)); + MatcherAssert.assertThat(view.getTargets().size(), is(2)); + MatcherAssert.assertThat(view.getTargets().first().getIndexPattern(), is(indexPattern)); + MatcherAssert.assertThat(view.getTargets().last().getIndexPattern(), is(indexPattern2)); + } + + public void testGetView() throws Exception { + final String viewName = randomAlphaOfLength(8); + createView(viewName, randomAlphaOfLength(8)); + + final View view = getView(viewName).getView(); + MatcherAssert.assertThat(view.getName(), is(viewName)); + + logger.info("Testing getView with non-existent view"); + final String nonExistentView = "non-existent-" + randomAlphaOfLength(8); + final Exception whenNeverExistedEx = assertThrows(ViewNotFoundException.class, () -> getView(nonExistentView)); + MatcherAssert.assertThat(whenNeverExistedEx.getMessage(), is("View [" + nonExistentView + "] does not exist")); + } + + public void testDeleteView() throws Exception { + final String viewName = randomAlphaOfLength(8); + createView(viewName, randomAlphaOfLength(8)); + + logger.info("Testing deleteView with existing view"); + deleteView(viewName); + final Exception whenDeletedEx = assertThrows(ViewNotFoundException.class, () -> getView(viewName)); + MatcherAssert.assertThat(whenDeletedEx.getMessage(), is("View [" + viewName + "] does not exist")); + + logger.info("Testing deleteView with non-existent view"); + final String nonExistentView = "non-existent-" + randomAlphaOfLength(8); + final Exception whenNeverExistedEx = assertThrows(ViewNotFoundException.class, () -> deleteView(nonExistentView)); + MatcherAssert.assertThat(whenNeverExistedEx.getMessage(), is("View [" + nonExistentView + "] does not exist")); + } + + public void testUpdateView() throws Exception { + final String viewName = randomAlphaOfLength(8); + final String originalIndexPattern = randomAlphaOfLength(8); + final View originalView = createView(viewName, originalIndexPattern).getView(); + + logger.info("Testing updateView with existing view"); + final String newDescription = randomAlphaOfLength(20); + final String newIndexPattern = "newPattern-" + originalIndexPattern; + final View updatedView = updateView(viewName, newDescription, newIndexPattern).getView(); + + MatcherAssert.assertThat(updatedView, not(is(originalView))); + MatcherAssert.assertThat(updatedView.getDescription(), is(newDescription)); + MatcherAssert.assertThat(updatedView.getTargets(), hasSize(1)); + MatcherAssert.assertThat(updatedView.getTargets().first().getIndexPattern(), is(newIndexPattern)); + + logger.info("Testing updateView with non-existent view"); + final String nonExistentView = "non-existent-" + randomAlphaOfLength(8); + final Exception whenNeverExistedEx = assertThrows(ViewNotFoundException.class, () -> updateView(nonExistentView, null, "index-*")); + MatcherAssert.assertThat(whenNeverExistedEx.getMessage(), is("View [" + nonExistentView + "] does not exist")); + } + + public void testListViewNames() throws Exception { + logger.info("Testing listViewNames when no views have been created"); + MatcherAssert.assertThat(listViewNames(), is(List.of())); + + final String view1 = "view1"; + final String view2 = "view2"; + createView(view1, "index-1-*"); + createView(view2, "index-2-*"); + + logger.info("Testing listViewNames"); + final List<String> views = listViewNames(); + MatcherAssert.assertThat(views, containsInAnyOrder(view1, view2)); + + logger.info("Testing listViewNames after deleting a view"); + deleteView(view1); + final List<String> viewsAfterDeletion = listViewNames(); + MatcherAssert.assertThat(viewsAfterDeletion, not(contains(view1))); + MatcherAssert.assertThat(viewsAfterDeletion, contains(view2)); + } + + public void testSearchOperations() throws Exception { + final String indexInView1 = "index-1"; + final String indexInView2 = "index-2"; + final String indexNotInView = "another-index-1"; + + final int indexInView1DocCount = createIndexWithDocs(indexInView1); + final int indexInView2DocCount = createIndexWithDocs(indexInView2); + createIndexWithDocs(indexNotInView); + + logger.info("Testing view with no matches"); + createView("no-matches", "this-pattern-will-match-nothing"); + final Exception ex = assertThrows(IndexNotFoundException.class, () -> searchView("no-matches")); + MatcherAssert.assertThat(ex.getMessage(), is("no such index [this-pattern-will-match-nothing]")); + + logger.info("Testing view with exact index match"); + createView("only-index-1", "index-1"); + assertHitCount(searchView("only-index-1"), indexInView1DocCount); + + logger.info("Testing view with wildcard matches"); + createView("both-indices", "index-*"); + assertHitCount(searchView("both-indices"), indexInView1DocCount + indexInView2DocCount); + + logger.info("Testing searchView with non-existent view"); + final String nonExistentView = "non-existent-" + randomAlphaOfLength(8); + final Exception whenNeverExistedEx = assertThrows(ViewNotFoundException.class, () -> searchView(nonExistentView)); + MatcherAssert.assertThat(whenNeverExistedEx.getMessage(), is("View [" + nonExistentView + "] does not exist")); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/view/ViewTestBase.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/view/ViewTestBase.java new file mode 100644 index 0000000000000..a44ba0cf7c717 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/view/ViewTestBase.java @@ -0,0 +1,79 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.test.BackgroundIndexer; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.List; +import java.util.stream.Collectors; + +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; + +public abstract class ViewTestBase extends OpenSearchIntegTestCase { + + protected int createIndexWithDocs(final String indexName) throws Exception { + createIndex(indexName); + ensureGreen(indexName); + + final int numOfDocs = scaledRandomIntBetween(0, 200); + try (final BackgroundIndexer indexer = new BackgroundIndexer(indexName, "_doc", client(), numOfDocs)) { + waitForDocs(numOfDocs, indexer); + } + + refresh(indexName); + assertHitCount(client().prepareSearch(indexName).setSize(0).get(), numOfDocs); + return numOfDocs; + } + + protected GetViewAction.Response createView(final String name, final String indexPattern) throws Exception { + return createView(name, List.of(indexPattern)); + } + + protected GetViewAction.Response createView(final String name, final List<String> targets) throws Exception { + final CreateViewAction.Request request = new CreateViewAction.Request( + name, + null, + targets.stream().map(CreateViewAction.Request.Target::new).collect(Collectors.toList()) + ); + return client().admin().indices().createView(request).actionGet(); + } + + protected GetViewAction.Response getView(final String name) { + return client().admin().indices().getView(new GetViewAction.Request(name)).actionGet(); + + } + + protected void deleteView(final String name) { + client().admin().indices().deleteView(new DeleteViewAction.Request(name)).actionGet(); + performRemoteStoreTestAction(); + } + + protected List<String> listViewNames() { + return client().listViewNames(new ListViewNamesAction.Request()).actionGet().getViewNames(); + } + + protected SearchResponse searchView(final String viewName) throws Exception { + final SearchViewAction.Request request = new SearchViewAction.Request(viewName, new SearchRequest()); + final SearchResponse response = client().searchView(request).actionGet(); + return response; + } + + protected GetViewAction.Response updateView(final String name, final String description, final String indexPattern) { + final CreateViewAction.Request request = new CreateViewAction.Request( + name, + description, + List.of(new CreateViewAction.Request.Target(indexPattern)) + ); + final GetViewAction.Response response = client().admin().indices().updateView(request).actionGet(); + return response; + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/action/termvectors/GetTermVectorsIT.java b/server/src/internalClusterTest/java/org/opensearch/action/termvectors/GetTermVectorsIT.java index 7bd1467933e00..280f574b1baf9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/termvectors/GetTermVectorsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/termvectors/GetTermVectorsIT.java @@ -74,8 +74,8 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase { - public GetTermVectorsIT(Settings dynamicSettings) { - super(dynamicSettings); + public GetTermVectorsIT(Settings staticSettings) { + super(staticSettings); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/action/termvectors/MultiTermVectorsIT.java b/server/src/internalClusterTest/java/org/opensearch/action/termvectors/MultiTermVectorsIT.java index 7c6c47c682281..3fc3235701f17 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/termvectors/MultiTermVectorsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/termvectors/MultiTermVectorsIT.java @@ -52,8 +52,8 @@ import static org.hamcrest.Matchers.nullValue; public class MultiTermVectorsIT extends AbstractTermVectorsTestCase { - public MultiTermVectorsIT(Settings dynamicSettings) { - super(dynamicSettings); + public MultiTermVectorsIT(Settings staticSettings) { + super(staticSettings); } public void testDuelESLucene() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/SimpleClusterStateIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/SimpleClusterStateIT.java index f0337e9c0c84c..af5900b1cba6c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/SimpleClusterStateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/SimpleClusterStateIT.java @@ -97,12 +97,17 @@ protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singletonList(PrivateCustomPlugin.class); } + @Override + protected boolean useRandomReplicationStrategy() { + return true; + } + @Before public void indexData() throws Exception { index("foo", "bar", "1", XContentFactory.jsonBuilder().startObject().field("foo", "foo").endObject()); index("fuu", "buu", "1", XContentFactory.jsonBuilder().startObject().field("fuu", "fuu").endObject()); index("baz", "baz", "1", XContentFactory.jsonBuilder().startObject().field("baz", "baz").endObject()); - refresh(); + refreshAndWaitForReplication(); } public void testRoutingTable() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java index 2e0dd579d6910..d6d22c95ee5a2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java @@ -13,12 +13,14 @@ import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingResponse; import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingResponse; import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingResponse; +import org.opensearch.action.admin.cluster.state.ClusterStateRequest; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.settings.Settings; import org.opensearch.core.rest.RestStatus; import org.opensearch.discovery.ClusterManagerNotDiscoveredException; import org.opensearch.plugins.Plugin; import org.opensearch.snapshots.mockstore.MockRepository; +import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.disruption.NetworkDisruption; import org.opensearch.test.transport.MockTransportService; @@ -715,4 +717,144 @@ public void testClusterHealthResponseWithEnsureNodeWeighedInParam() throws Excep assertFalse(nodeLocalHealth.isTimedOut()); assertTrue(nodeLocalHealth.hasDiscoveredClusterManager()); } + + public void testReadWriteWeightedRoutingMetadataOnNodeRestart() throws Exception { + Settings commonSettings = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .build(); + + internalCluster().startClusterManagerOnlyNode(Settings.builder().put(commonSettings).build()); + + logger.info("--> starting 1 nodes on zones 'a' & 'b' & 'c'"); + List<String> nodes_in_zone_a = internalCluster().startDataOnlyNodes( + 1, + Settings.builder().put(commonSettings).put("node.attr.zone", "a").build() + ); + List<String> nodes_in_zone_b = internalCluster().startDataOnlyNodes( + 1, + Settings.builder().put(commonSettings).put("node.attr.zone", "b").build() + ); + List<String> nodes_in_zone_c = internalCluster().startDataOnlyNodes( + 1, + Settings.builder().put(commonSettings).put("node.attr.zone", "c").build() + ); + + logger.info("--> waiting for nodes to form a cluster"); + ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("4").execute().actionGet(); + assertThat(health.isTimedOut(), equalTo(false)); + + ensureGreen(); + + logger.info("--> setting shard routing weights for weighted round robin"); + Map<String, Double> weights = Map.of("a", 1.0, "b", 2.0, "c", 3.0); + WeightedRouting weightedRouting = new WeightedRouting("zone", weights); + + ClusterPutWeightedRoutingResponse response = client().admin() + .cluster() + .prepareWeightedRouting() + .setWeightedRouting(weightedRouting) + .setVersion(-1) + .get(); + assertEquals(response.isAcknowledged(), true); + + ClusterDeleteWeightedRoutingResponse deleteResponse = client().admin().cluster().prepareDeleteWeightedRouting().setVersion(0).get(); + assertTrue(deleteResponse.isAcknowledged()); + + // check weighted routing metadata after node restart, ensure node comes healthy after restart + internalCluster().restartNode(nodes_in_zone_a.get(0), new InternalTestCluster.RestartCallback()); + ensureGreen(); + assertNotNull(internalCluster().clusterService().state().metadata().weightedRoutingMetadata()); + + // make sure restarted node joins the cluster + assertEquals(3, internalCluster().clusterService().state().nodes().getDataNodes().size()); + assertNotNull( + internalCluster().client(nodes_in_zone_a.get(0)) + .admin() + .cluster() + .state(new ClusterStateRequest().local(true)) + .get() + .getState() + .metadata() + .weightedRoutingMetadata() + ); + assertNotNull( + internalCluster().client(nodes_in_zone_b.get(0)) + .admin() + .cluster() + .state(new ClusterStateRequest().local(true)) + .get() + .getState() + .metadata() + .weightedRoutingMetadata() + ); + assertNotNull( + internalCluster().client(nodes_in_zone_c.get(0)) + .admin() + .cluster() + .state(new ClusterStateRequest().local(true)) + .get() + .getState() + .metadata() + .weightedRoutingMetadata() + ); + assertNotNull( + internalCluster().client(internalCluster().getClusterManagerName()) + .admin() + .cluster() + .state(new ClusterStateRequest().local(true)) + .get() + .getState() + .metadata() + .weightedRoutingMetadata() + ); + + internalCluster().restartNode(internalCluster().getClusterManagerName(), new InternalTestCluster.RestartCallback()); + ensureGreen(); + assertNotNull(internalCluster().clusterService().state().metadata().weightedRoutingMetadata()); + + // make sure restarted node joins the cluster + assertEquals(3, internalCluster().clusterService().state().nodes().getDataNodes().size()); + assertNotNull( + internalCluster().client(nodes_in_zone_a.get(0)) + .admin() + .cluster() + .state(new ClusterStateRequest().local(true)) + .get() + .getState() + .metadata() + .weightedRoutingMetadata() + ); + assertNotNull( + internalCluster().client(nodes_in_zone_b.get(0)) + .admin() + .cluster() + .state(new ClusterStateRequest().local(true)) + .get() + .getState() + .metadata() + .weightedRoutingMetadata() + ); + assertNotNull( + internalCluster().client(nodes_in_zone_c.get(0)) + .admin() + .cluster() + .state(new ClusterStateRequest().local(true)) + .get() + .getState() + .metadata() + .weightedRoutingMetadata() + ); + assertNotNull( + internalCluster().client(internalCluster().getClusterManagerName()) + .admin() + .cluster() + .state(new ClusterStateRequest().local(true)) + .get() + .getState() + .metadata() + .weightedRoutingMetadata() + ); + + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java index 089a91a30dd17..cc8747e5f5666 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java @@ -245,8 +245,10 @@ public void testIndexCreateBlockIsRemovedWhenAnyNodesNotExceedHighWatermarkWithA (discoveryNode, fsInfoPath) -> setDiskUsage(fsInfoPath, TOTAL_SPACE_BYTES, TOTAL_SPACE_BYTES) ); - // Validate if index create block is removed on the cluster + // Validate if index create block is removed on the cluster. Need to refresh this periodically as well to remove + // the node from high watermark breached list. assertBusy(() -> { + clusterInfoService.refresh(); ClusterState state1 = client().admin().cluster().prepareState().setLocal(true).get().getState(); assertFalse(state1.blocks().hasGlobalBlockWithId(Metadata.CLUSTER_CREATE_INDEX_BLOCK.id())); }, 30L, TimeUnit.SECONDS); diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java index fb97ae59aae91..5eef7074e1dd6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java @@ -245,23 +245,22 @@ public void testIndexCreationOverLimitForDotIndexesFail() { assertFalse(clusterState.getMetadata().hasIndex(".test-index")); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/6287") public void testCreateIndexWithMaxClusterShardSetting() { - int dataNodes = client().admin().cluster().prepareState().get().getState().getNodes().getDataNodes().size(); - ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); - setMaxShardLimit(dataNodes, shardsPerNodeKey); + int maxAllowedShardsPerNode = client().admin().cluster().prepareState().get().getState().getNodes().getDataNodes().size(); + setMaxShardLimit(maxAllowedShardsPerNode, shardsPerNodeKey); - int maxAllowedShards = dataNodes + 1; - int extraShardCount = maxAllowedShards + 1; + // Always keep + int maxAllowedShardsPerCluster = maxAllowedShardsPerNode * 1000; + int extraShardCount = 1; // Getting total active shards in the cluster. int currentActiveShards = client().admin().cluster().prepareHealth().get().getActiveShards(); try { - setMaxShardLimit(maxAllowedShards, SETTING_MAX_SHARDS_PER_CLUSTER_KEY); + setMaxShardLimit(maxAllowedShardsPerCluster, SETTING_MAX_SHARDS_PER_CLUSTER_KEY); prepareCreate("test_index_with_cluster_shard_limit").setSettings( Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, extraShardCount).put(SETTING_NUMBER_OF_REPLICAS, 0).build() ).get(); } catch (final IllegalArgumentException ex) { - verifyException(Math.min(maxAllowedShards, dataNodes * dataNodes), currentActiveShards, extraShardCount, ex); + verifyException(maxAllowedShardsPerCluster, currentActiveShards, extraShardCount, ex); } finally { setMaxShardLimit(-1, SETTING_MAX_SHARDS_PER_CLUSTER_KEY); } diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/GatewayRecoveryTestUtils.java b/server/src/internalClusterTest/java/org/opensearch/gateway/GatewayRecoveryTestUtils.java new file mode 100644 index 0000000000000..2b6a5b4ee6867 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/GatewayRecoveryTestUtils.java @@ -0,0 +1,77 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway; + +import org.opensearch.action.admin.cluster.state.ClusterStateRequest; +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.env.NodeEnvironment; +import org.opensearch.index.shard.ShardPath; +import org.opensearch.indices.store.ShardAttributes; + +import java.io.IOException; +import java.nio.file.DirectoryStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; + +import static org.opensearch.test.OpenSearchIntegTestCase.client; +import static org.opensearch.test.OpenSearchIntegTestCase.internalCluster; +import static org.opensearch.test.OpenSearchIntegTestCase.resolveIndex; + +public class GatewayRecoveryTestUtils { + + public static DiscoveryNode[] getDiscoveryNodes() throws ExecutionException, InterruptedException { + final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + clusterStateRequest.local(false); + clusterStateRequest.clear().nodes(true).routingTable(true).indices("*"); + ClusterStateResponse clusterStateResponse = client().admin().cluster().state(clusterStateRequest).get(); + final List<DiscoveryNode> nodes = new LinkedList<>(clusterStateResponse.getState().nodes().getDataNodes().values()); + DiscoveryNode[] disNodesArr = new DiscoveryNode[nodes.size()]; + nodes.toArray(disNodesArr); + return disNodesArr; + } + + public static Map<ShardId, ShardAttributes> prepareRequestMap(String[] indices, int primaryShardCount) { + Map<ShardId, ShardAttributes> shardIdShardAttributesMap = new HashMap<>(); + for (String indexName : indices) { + final Index index = resolveIndex(indexName); + final String customDataPath = IndexMetadata.INDEX_DATA_PATH_SETTING.get( + client().admin().indices().prepareGetSettings(indexName).get().getIndexToSettings().get(indexName) + ); + for (int shardIdNum = 0; shardIdNum < primaryShardCount; shardIdNum++) { + final ShardId shardId = new ShardId(index, shardIdNum); + shardIdShardAttributesMap.put(shardId, new ShardAttributes(shardId, customDataPath)); + } + } + return shardIdShardAttributesMap; + } + + public static void corruptShard(String nodeName, ShardId shardId) throws IOException, InterruptedException { + for (Path path : internalCluster().getInstance(NodeEnvironment.class, nodeName).availableShardPaths(shardId)) { + final Path indexPath = path.resolve(ShardPath.INDEX_FOLDER_NAME); + if (Files.exists(indexPath)) { // multi data path might only have one path in use + try (DirectoryStream<Path> stream = Files.newDirectoryStream(indexPath)) { + for (Path item : stream) { + if (item.getFileName().toString().startsWith("segments_")) { + Files.delete(item); + } + } + } + } + } + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java index 229cd7bffad2f..ba03532a9aa2f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java @@ -32,10 +32,14 @@ package org.opensearch.gateway; +import org.opensearch.Version; import org.opensearch.action.admin.cluster.configuration.AddVotingConfigExclusionsAction; import org.opensearch.action.admin.cluster.configuration.AddVotingConfigExclusionsRequest; import org.opensearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsAction; import org.opensearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsRequest; +import org.opensearch.action.admin.cluster.reroute.ClusterRerouteResponse; +import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsGroup; +import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsResponse; import org.opensearch.action.admin.indices.recovery.RecoveryResponse; import org.opensearch.action.admin.indices.stats.IndexStats; import org.opensearch.action.admin.indices.stats.ShardStats; @@ -44,6 +48,7 @@ import org.opensearch.cluster.coordination.ElectionSchedulerFactory; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Settings; @@ -51,6 +56,7 @@ import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.env.NodeEnvironment; +import org.opensearch.gateway.TransportNodesGatewayStartedShardHelper.GatewayStartedShard; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.index.MergePolicyProvider; @@ -60,6 +66,9 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; +import org.opensearch.indices.store.ShardAttributes; +import org.opensearch.indices.store.TransportNodesListShardStoreMetadataBatch; +import org.opensearch.indices.store.TransportNodesListShardStoreMetadataHelper; import org.opensearch.plugins.Plugin; import org.opensearch.test.InternalSettingsPlugin; import org.opensearch.test.InternalTestCluster.RestartCallback; @@ -79,12 +88,18 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.ExecutionException; import java.util.stream.IntStream; +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; import static org.opensearch.cluster.coordination.ClusterBootstrapService.INITIAL_CLUSTER_MANAGER_NODES_SETTING; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.gateway.GatewayRecoveryTestUtils.corruptShard; +import static org.opensearch.gateway.GatewayRecoveryTestUtils.getDiscoveryNodes; +import static org.opensearch.gateway.GatewayRecoveryTestUtils.prepareRequestMap; import static org.opensearch.gateway.GatewayService.RECOVER_AFTER_NODES_SETTING; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; @@ -706,11 +721,11 @@ public Settings onNodeStopped(String nodeName) throws Exception { ); assertThat(response.getNodes(), hasSize(1)); - assertThat(response.getNodes().get(0).allocationId(), notNullValue()); + assertThat(response.getNodes().get(0).getGatewayShardStarted().allocationId(), notNullValue()); if (corrupt) { - assertThat(response.getNodes().get(0).storeException(), notNullValue()); + assertThat(response.getNodes().get(0).getGatewayShardStarted().storeException(), notNullValue()); } else { - assertThat(response.getNodes().get(0).storeException(), nullValue()); + assertThat(response.getNodes().get(0).getGatewayShardStarted().storeException(), nullValue()); } // start another node so cluster consistency checks won't time out due to the lack of state @@ -734,4 +749,217 @@ public void testMessyElectionsStillMakeClusterGoGreen() throws Exception { internalCluster().fullRestart(); ensureGreen("test"); } + + public void testSingleShardFetchUsingBatchAction() { + String indexName = "test"; + int numOfShards = 1; + prepareIndex(indexName, numOfShards); + Map<ShardId, ShardAttributes> shardIdShardAttributesMap = prepareRequestMap(new String[] { indexName }, numOfShards); + + ClusterSearchShardsResponse searchShardsResponse = client().admin().cluster().prepareSearchShards(indexName).get(); + + TransportNodesListGatewayStartedShardsBatch.NodesGatewayStartedShardsBatch response; + response = ActionTestUtils.executeBlocking( + internalCluster().getInstance(TransportNodesListGatewayStartedShardsBatch.class), + new TransportNodesListGatewayStartedShardsBatch.Request(searchShardsResponse.getNodes(), shardIdShardAttributesMap) + ); + final Index index = resolveIndex(indexName); + final ShardId shardId = new ShardId(index, 0); + GatewayStartedShard gatewayStartedShard = response.getNodesMap() + .get(searchShardsResponse.getNodes()[0].getId()) + .getNodeGatewayStartedShardsBatch() + .get(shardId); + assertNodeGatewayStartedShardsHappyCase(gatewayStartedShard); + } + + public void testShardFetchMultiNodeMultiIndexesUsingBatchAction() { + // start node + internalCluster().startNode(); + String indexName1 = "test1"; + String indexName2 = "test2"; + int numShards = internalCluster().numDataNodes(); + // assign one primary shard each to the data nodes + prepareIndex(indexName1, numShards); + prepareIndex(indexName2, numShards); + Map<ShardId, ShardAttributes> shardIdShardAttributesMap = prepareRequestMap(new String[] { indexName1, indexName2 }, numShards); + ClusterSearchShardsResponse searchShardsResponse = client().admin().cluster().prepareSearchShards(indexName1, indexName2).get(); + assertEquals(internalCluster().numDataNodes(), searchShardsResponse.getNodes().length); + TransportNodesListGatewayStartedShardsBatch.NodesGatewayStartedShardsBatch response; + response = ActionTestUtils.executeBlocking( + internalCluster().getInstance(TransportNodesListGatewayStartedShardsBatch.class), + new TransportNodesListGatewayStartedShardsBatch.Request(searchShardsResponse.getNodes(), shardIdShardAttributesMap) + ); + for (ClusterSearchShardsGroup clusterSearchShardsGroup : searchShardsResponse.getGroups()) { + ShardId shardId = clusterSearchShardsGroup.getShardId(); + assertEquals(1, clusterSearchShardsGroup.getShards().length); + String nodeId = clusterSearchShardsGroup.getShards()[0].currentNodeId(); + GatewayStartedShard gatewayStartedShard = response.getNodesMap().get(nodeId).getNodeGatewayStartedShardsBatch().get(shardId); + assertNodeGatewayStartedShardsHappyCase(gatewayStartedShard); + } + } + + public void testShardFetchCorruptedShardsUsingBatchAction() throws Exception { + String indexName = "test"; + int numOfShards = 1; + prepareIndex(indexName, numOfShards); + Map<ShardId, ShardAttributes> shardIdShardAttributesMap = prepareRequestMap(new String[] { indexName }, numOfShards); + ClusterSearchShardsResponse searchShardsResponse = client().admin().cluster().prepareSearchShards(indexName).get(); + final Index index = resolveIndex(indexName); + final ShardId shardId = new ShardId(index, 0); + corruptShard(searchShardsResponse.getNodes()[0].getName(), shardId); + TransportNodesListGatewayStartedShardsBatch.NodesGatewayStartedShardsBatch response; + internalCluster().restartNode(searchShardsResponse.getNodes()[0].getName()); + response = ActionTestUtils.executeBlocking( + internalCluster().getInstance(TransportNodesListGatewayStartedShardsBatch.class), + new TransportNodesListGatewayStartedShardsBatch.Request(getDiscoveryNodes(), shardIdShardAttributesMap) + ); + DiscoveryNode[] discoveryNodes = getDiscoveryNodes(); + GatewayStartedShard gatewayStartedShard = response.getNodesMap() + .get(discoveryNodes[0].getId()) + .getNodeGatewayStartedShardsBatch() + .get(shardId); + assertNotNull(gatewayStartedShard.storeException()); + assertNotNull(gatewayStartedShard.allocationId()); + assertTrue(gatewayStartedShard.primary()); + } + + public void testSingleShardStoreFetchUsingBatchAction() throws ExecutionException, InterruptedException { + String indexName = "test"; + DiscoveryNode[] nodes = getDiscoveryNodes(); + TransportNodesListShardStoreMetadataBatch.NodesStoreFilesMetadataBatch response = prepareAndSendRequest( + new String[] { indexName }, + nodes + ); + Index index = resolveIndex(indexName); + ShardId shardId = new ShardId(index, 0); + TransportNodesListShardStoreMetadataBatch.NodeStoreFilesMetadata nodeStoreFilesMetadata = response.getNodesMap() + .get(nodes[0].getId()) + .getNodeStoreFilesMetadataBatch() + .get(shardId); + assertNodeStoreFilesMetadataSuccessCase(nodeStoreFilesMetadata, shardId); + } + + public void testShardStoreFetchMultiNodeMultiIndexesUsingBatchAction() throws Exception { + internalCluster().startNodes(2); + String indexName1 = "test1"; + String indexName2 = "test2"; + DiscoveryNode[] nodes = getDiscoveryNodes(); + TransportNodesListShardStoreMetadataBatch.NodesStoreFilesMetadataBatch response = prepareAndSendRequest( + new String[] { indexName1, indexName2 }, + nodes + ); + ClusterSearchShardsResponse searchShardsResponse = client().admin().cluster().prepareSearchShards(indexName1, indexName2).get(); + for (ClusterSearchShardsGroup clusterSearchShardsGroup : searchShardsResponse.getGroups()) { + ShardId shardId = clusterSearchShardsGroup.getShardId(); + ShardRouting[] shardRoutings = clusterSearchShardsGroup.getShards(); + assertEquals(2, shardRoutings.length); + for (ShardRouting shardRouting : shardRoutings) { + TransportNodesListShardStoreMetadataBatch.NodeStoreFilesMetadata nodeStoreFilesMetadata = response.getNodesMap() + .get(shardRouting.currentNodeId()) + .getNodeStoreFilesMetadataBatch() + .get(shardId); + assertNodeStoreFilesMetadataSuccessCase(nodeStoreFilesMetadata, shardId); + } + } + } + + public void testShardStoreFetchNodeNotConnectedUsingBatchAction() { + DiscoveryNode nonExistingNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); + String indexName = "test"; + TransportNodesListShardStoreMetadataBatch.NodesStoreFilesMetadataBatch response = prepareAndSendRequest( + new String[] { indexName }, + new DiscoveryNode[] { nonExistingNode } + ); + assertTrue(response.hasFailures()); + assertEquals(1, response.failures().size()); + assertEquals(nonExistingNode.getId(), response.failures().get(0).nodeId()); + } + + public void testShardStoreFetchCorruptedIndexUsingBatchAction() throws Exception { + internalCluster().startNodes(2); + String index1Name = "test1"; + String index2Name = "test2"; + prepareIndices(new String[] { index1Name, index2Name }, 1, 1); + Map<ShardId, ShardAttributes> shardAttributesMap = prepareRequestMap(new String[] { index1Name, index2Name }, 1); + Index index1 = resolveIndex(index1Name); + ShardId shardId1 = new ShardId(index1, 0); + ClusterSearchShardsResponse searchShardsResponse = client().admin().cluster().prepareSearchShards(index1Name).get(); + assertEquals(2, searchShardsResponse.getNodes().length); + + // corrupt test1 index shards + corruptShard(searchShardsResponse.getNodes()[0].getName(), shardId1); + corruptShard(searchShardsResponse.getNodes()[1].getName(), shardId1); + ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setRetryFailed(false).get(); + DiscoveryNode[] discoveryNodes = getDiscoveryNodes(); + TransportNodesListShardStoreMetadataBatch.NodesStoreFilesMetadataBatch response; + response = ActionTestUtils.executeBlocking( + internalCluster().getInstance(TransportNodesListShardStoreMetadataBatch.class), + new TransportNodesListShardStoreMetadataBatch.Request(shardAttributesMap, discoveryNodes) + ); + Map<ShardId, TransportNodesListShardStoreMetadataBatch.NodeStoreFilesMetadata> nodeStoreFilesMetadata = response.getNodesMap() + .get(discoveryNodes[0].getId()) + .getNodeStoreFilesMetadataBatch(); + // We don't store exception in case of corrupt index, rather just return an empty response + assertNull(nodeStoreFilesMetadata.get(shardId1).getStoreFileFetchException()); + assertEquals(shardId1, nodeStoreFilesMetadata.get(shardId1).storeFilesMetadata().shardId()); + assertTrue(nodeStoreFilesMetadata.get(shardId1).storeFilesMetadata().isEmpty()); + + Index index2 = resolveIndex(index2Name); + ShardId shardId2 = new ShardId(index2, 0); + assertNodeStoreFilesMetadataSuccessCase(nodeStoreFilesMetadata.get(shardId2), shardId2); + } + + private void prepareIndices(String[] indices, int numberOfPrimaryShards, int numberOfReplicaShards) { + for (String index : indices) { + createIndex( + index, + Settings.builder() + .put(SETTING_NUMBER_OF_SHARDS, numberOfPrimaryShards) + .put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicaShards) + .build() + ); + index(index, "type", "1", Collections.emptyMap()); + flush(index); + } + } + + private TransportNodesListShardStoreMetadataBatch.NodesStoreFilesMetadataBatch prepareAndSendRequest( + String[] indices, + DiscoveryNode[] nodes + ) { + Map<ShardId, ShardAttributes> shardAttributesMap = null; + prepareIndices(indices, 1, 1); + shardAttributesMap = prepareRequestMap(indices, 1); + TransportNodesListShardStoreMetadataBatch.NodesStoreFilesMetadataBatch response; + return ActionTestUtils.executeBlocking( + internalCluster().getInstance(TransportNodesListShardStoreMetadataBatch.class), + new TransportNodesListShardStoreMetadataBatch.Request(shardAttributesMap, nodes) + ); + } + + private void assertNodeStoreFilesMetadataSuccessCase( + TransportNodesListShardStoreMetadataBatch.NodeStoreFilesMetadata nodeStoreFilesMetadata, + ShardId shardId + ) { + assertNull(nodeStoreFilesMetadata.getStoreFileFetchException()); + TransportNodesListShardStoreMetadataHelper.StoreFilesMetadata storeFileMetadata = nodeStoreFilesMetadata.storeFilesMetadata(); + assertFalse(storeFileMetadata.isEmpty()); + assertEquals(shardId, storeFileMetadata.shardId()); + assertNotNull(storeFileMetadata.peerRecoveryRetentionLeases()); + } + + private void assertNodeGatewayStartedShardsHappyCase(GatewayStartedShard gatewayStartedShard) { + assertNull(gatewayStartedShard.storeException()); + assertNotNull(gatewayStartedShard.allocationId()); + assertTrue(gatewayStartedShard.primary()); + } + + private void prepareIndex(String indexName, int numberOfPrimaryShards) { + createIndex( + indexName, + Settings.builder().put(SETTING_NUMBER_OF_SHARDS, numberOfPrimaryShards).put(SETTING_NUMBER_OF_REPLICAS, 0).build() + ); + index(indexName, "type", "1", Collections.emptyMap()); + flush(indexName); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/index/IndexSortIT.java b/server/src/internalClusterTest/java/org/opensearch/index/IndexSortIT.java index bb6e356db188f..369c9f9b1a653 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/IndexSortIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/IndexSortIT.java @@ -39,9 +39,8 @@ import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.SortedSetSortField; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.Arrays; @@ -51,11 +50,11 @@ import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.hamcrest.Matchers.containsString; -public class IndexSortIT extends ParameterizedOpenSearchIntegTestCase { +public class IndexSortIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final XContentBuilder TEST_MAPPING = createTestMapping(); - public IndexSortIT(Settings dynamicSettings) { - super(dynamicSettings); + public IndexSortIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -66,11 +65,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - private static XContentBuilder createTestMapping() { try { return jsonBuilder().startObject() diff --git a/server/src/internalClusterTest/java/org/opensearch/index/search/MatchPhraseQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/index/search/MatchPhraseQueryIT.java index 2d28578dbebcc..df423edeca9c1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/search/MatchPhraseQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/search/MatchPhraseQueryIT.java @@ -38,10 +38,9 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.MatchPhraseQueryBuilder; import org.opensearch.index.search.MatchQuery.ZeroTermsQuery; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.junit.Before; import java.util.ArrayList; @@ -55,12 +54,12 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; -public class MatchPhraseQueryIT extends ParameterizedOpenSearchIntegTestCase { +public class MatchPhraseQueryIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String INDEX = "test"; - public MatchPhraseQueryIT(Settings dynamicSettings) { - super(dynamicSettings); + public MatchPhraseQueryIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -71,11 +70,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Before public void setUp() throws Exception { super.setUp(); diff --git a/server/src/internalClusterTest/java/org/opensearch/index/suggest/stats/SuggestStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/index/suggest/stats/SuggestStatsIT.java index 6332b1b97426f..a1ff2da249d69 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/suggest/stats/SuggestStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/suggest/stats/SuggestStatsIT.java @@ -44,13 +44,12 @@ import org.opensearch.cluster.routing.ShardIterator; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.search.stats.SearchStats; import org.opensearch.search.suggest.SuggestBuilder; import org.opensearch.search.suggest.phrase.PhraseSuggestionBuilder; import org.opensearch.search.suggest.term.TermSuggestionBuilder; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -68,10 +67,10 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; @OpenSearchIntegTestCase.ClusterScope(minNumDataNodes = 2) -public class SuggestStatsIT extends ParameterizedOpenSearchIntegTestCase { +public class SuggestStatsIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public SuggestStatsIT(Settings dynamicSettings) { - super(dynamicSettings); + public SuggestStatsIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -82,11 +81,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected int numberOfReplicas() { return 0; diff --git a/server/src/internalClusterTest/java/org/opensearch/indexing/IndexActionIT.java b/server/src/internalClusterTest/java/org/opensearch/indexing/IndexActionIT.java index 73d6d9aff7b72..8cb54631b593f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indexing/IndexActionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indexing/IndexActionIT.java @@ -32,19 +32,23 @@ package org.opensearch.indexing; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.DocWriteResponse; import org.opensearch.action.bulk.BulkResponse; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.index.IndexResponse; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.metadata.MetadataCreateIndexService; +import org.opensearch.common.settings.Settings; import org.opensearch.index.VersionType; import org.opensearch.index.mapper.MapperParsingException; import org.opensearch.indices.InvalidIndexNameException; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; import java.util.ArrayList; +import java.util.Collection; import java.util.List; import java.util.Locale; import java.util.Random; @@ -57,7 +61,17 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; -public class IndexActionIT extends OpenSearchIntegTestCase { +public class IndexActionIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public IndexActionIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return replicationSettings; + } + /** * This test tries to simulate load while creating an index and indexing documents * while the index is being created. diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java index 51dba07a8f9f8..52b4dad553180 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java @@ -49,7 +49,7 @@ import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.histogram.Histogram.Bucket; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; import java.time.ZoneId; @@ -69,7 +69,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; -public class IndicesRequestCacheIT extends ParameterizedOpenSearchIntegTestCase { +public class IndicesRequestCacheIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public IndicesRequestCacheIT(Settings settings) { super(settings); } @@ -78,13 +78,15 @@ public IndicesRequestCacheIT(Settings settings) { public static Collection<Object[]> parameters() { return Arrays.asList( new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() }, + new Object[] { Settings.builder().put(FeatureFlags.PLUGGABLE_CACHE, "true").build() }, + new Object[] { Settings.builder().put(FeatureFlags.PLUGGABLE_CACHE, "false").build() } ); } @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + protected boolean useRandomReplicationStrategy() { + return true; } // One of the primary purposes of the query cache is to cache aggs results @@ -186,7 +188,7 @@ public void testQueryRewrite() throws Exception { // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get(); OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); - refresh(); + refreshAndWaitForReplication(); ensureSearchable("index"); assertCacheState(client, "index", 0, 0); @@ -256,7 +258,7 @@ public void testQueryRewriteMissingValues() throws Exception { // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get(); OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); - refresh(); + refreshAndWaitForReplication(); ensureSearchable("index"); assertCacheState(client, "index", 0, 0); @@ -322,7 +324,7 @@ public void testQueryRewriteDates() throws Exception { // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get(); OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); - refresh(); + refreshAndWaitForReplication(); ensureSearchable("index"); assertCacheState(client, "index", 0, 0); @@ -395,7 +397,7 @@ public void testQueryRewriteDatesWithNow() throws Exception { .setFlush(true) .get(); OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); - refresh(); + refreshAndWaitForReplication(); ensureSearchable("index-1", "index-2", "index-3"); assertCacheState(client, "index-1", 0, 0); @@ -466,7 +468,7 @@ public void testCanCache() throws Exception { // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get(); OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); - refresh(); + refreshAndWaitForReplication(); ensureSearchable("index"); assertCacheState(client, "index", 0, 0); @@ -560,7 +562,7 @@ public void testCacheWithFilteredAlias() throws InterruptedException { // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get(); OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); - refresh(); + refreshAndWaitForReplication(); indexRandomForConcurrentSearch("index"); @@ -667,7 +669,7 @@ public void testCacheWithInvalidation() throws Exception { assertCacheState(client, "index", 1, 1); // Explicit refresh would invalidate cache - refresh(); + refreshAndWaitForReplication(); // Hit same query again resp = client.prepareSearch("index").setRequestCache(true).setQuery(QueryBuilders.termQuery("k", "hello")).get(); assertSearchResponse(resp); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/memory/breaker/CircuitBreakerServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/memory/breaker/CircuitBreakerServiceIT.java index 0c5780210901d..73e888eea362c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/memory/breaker/CircuitBreakerServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/memory/breaker/CircuitBreakerServiceIT.java @@ -50,7 +50,6 @@ import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.breaker.CircuitBreaker; import org.opensearch.core.common.breaker.CircuitBreakingException; import org.opensearch.core.common.breaker.NoopCircuitBreaker; @@ -63,7 +62,7 @@ import org.opensearch.search.SearchService; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.junit.After; import org.junit.Before; @@ -94,9 +93,9 @@ * Integration tests for InternalCircuitBreakerService */ @ClusterScope(scope = TEST, numClientNodes = 0, maxNumDataNodes = 1) -public class CircuitBreakerServiceIT extends ParameterizedOpenSearchIntegTestCase { - public CircuitBreakerServiceIT(Settings dynamicSettings) { - super(dynamicSettings); +public class CircuitBreakerServiceIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + public CircuitBreakerServiceIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -107,11 +106,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexPrimaryRelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexPrimaryRelocationIT.java index c049c8ed2d4a6..9decd17d95eab 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexPrimaryRelocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexPrimaryRelocationIT.java @@ -66,19 +66,16 @@ public void testPrimaryRelocationWhileIndexing() throws Exception { ensureGreen("test"); AtomicInteger numAutoGenDocs = new AtomicInteger(); final AtomicBoolean finished = new AtomicBoolean(false); - Thread indexingThread = new Thread() { - @Override - public void run() { - while (finished.get() == false && numAutoGenDocs.get() < 10_000) { - IndexResponse indexResponse = client().prepareIndex("test").setId("id").setSource("field", "value").get(); - assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); - DeleteResponse deleteResponse = client().prepareDelete("test", "id").get(); - assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); - client().prepareIndex("test").setSource("auto", true).get(); - numAutoGenDocs.incrementAndGet(); - } + Thread indexingThread = new Thread(() -> { + while (finished.get() == false && numAutoGenDocs.get() < 10_000) { + IndexResponse indexResponse = client().prepareIndex("test").setId("id").setSource("field", "value").get(); + assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); + DeleteResponse deleteResponse = client().prepareDelete("test", "id").get(); + assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); + client().prepareIndex("test").setSource("auto", true).get(); + numAutoGenDocs.incrementAndGet(); } - }; + }); indexingThread.start(); ClusterState initialState = client().admin().cluster().prepareState().get().getState(); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java index e4f1f8717f899..8ce87f37d77cd 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java @@ -298,7 +298,6 @@ public void testGatewayRecovery() throws Exception { logger.info("--> request recoveries"); RecoveryResponse response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet(); assertThat(response.shardRecoveryStates().size(), equalTo(SHARD_COUNT)); - assertThat(response.shardRecoveryStates().get(INDEX_NAME).size(), equalTo(1)); List<RecoveryState> recoveryStates = response.shardRecoveryStates().get(INDEX_NAME); assertThat(recoveryStates.size(), equalTo(1)); @@ -523,12 +522,12 @@ public void testRerouteRecovery() throws Exception { logger.info("--> waiting for recovery to start both on source and target"); final Index index = resolveIndex(INDEX_NAME); - assertBusy(() -> { + assertBusyWithFixedSleepTime(() -> { IndicesService indicesService = internalCluster().getInstance(IndicesService.class, nodeA); assertThat(indicesService.indexServiceSafe(index).getShard(0).recoveryStats().currentAsSource(), equalTo(1)); indicesService = internalCluster().getInstance(IndicesService.class, nodeB); assertThat(indicesService.indexServiceSafe(index).getShard(0).recoveryStats().currentAsTarget(), equalTo(1)); - }); + }, TimeValue.timeValueSeconds(10), TimeValue.timeValueMillis(500)); logger.info("--> request recoveries"); RecoveryResponse response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet(); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java index f485d4e402b41..30edea6551067 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java @@ -22,6 +22,7 @@ import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.junit.annotations.TestLogging; import java.util.ArrayList; import java.util.List; @@ -128,6 +129,7 @@ public void testPerIndexPrimaryAllocation() throws Exception { * ensures the primary shard distribution is balanced. * */ + @TestLogging(reason = "Enable debug logs from cluster and index replication package", value = "org.opensearch.cluster:DEBUG,org.opensearch.indices.replication:DEBUG") public void testSingleIndexShardAllocation() throws Exception { internalCluster().startClusterManagerOnlyNode(); final int maxReplicaCount = 1; diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java index 641f714d33414..be849452c0f5e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java @@ -8,6 +8,7 @@ package org.opensearch.indices.replication; +import org.apache.lucene.index.SegmentInfos; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; @@ -16,10 +17,11 @@ import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.Nullable; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.Settings; import org.opensearch.core.index.Index; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexService; import org.opensearch.index.SegmentReplicationShardStats; @@ -28,12 +30,14 @@ import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.transport.TransportService; +import java.io.IOException; import java.util.Arrays; import java.util.Collection; import java.util.List; @@ -175,17 +179,6 @@ private IndexShard getIndexShard(ClusterState state, ShardRouting routing, Strin return getIndexShard(state.nodes().get(routing.currentNodeId()).getName(), routing.shardId(), indexName); } - /** - * Fetch IndexShard by shardId, multiple shards per node allowed. - */ - protected IndexShard getIndexShard(String node, ShardId shardId, String indexName) { - final Index index = resolveIndex(indexName); - IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node); - IndexService indexService = indicesService.indexServiceSafe(index); - final Optional<Integer> id = indexService.shardIds().stream().filter(sid -> sid == shardId.id()).findFirst(); - return indexService.getShard(id.get()); - } - /** * Fetch IndexShard, assumes only a single shard per node. */ @@ -242,4 +235,14 @@ protected void assertReplicaCheckpointUpdated(IndexShard primaryShard) throws Ex } }, 30, TimeUnit.SECONDS); } + + /** + * Returns the latest SIS for a shard but does not incref the segments. + */ + protected SegmentInfos getLatestSegmentInfos(IndexShard shard) throws IOException { + final Tuple<GatedCloseable<SegmentInfos>, ReplicationCheckpoint> tuple = shard.getLatestSegmentInfosAndCheckpoint(); + try (final GatedCloseable<SegmentInfos> closeable = tuple.v1()) { + return closeable.get(); + } + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java index 5511bc7945d65..70da3b0e38472 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java @@ -25,6 +25,7 @@ import org.opensearch.action.admin.cluster.stats.ClusterStatsResponse; import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.admin.indices.flush.FlushRequest; +import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.opensearch.action.admin.indices.stats.IndicesStatsRequest; import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; import org.opensearch.action.get.GetResponse; @@ -53,12 +54,11 @@ import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.allocation.command.CancelAllocationCommand; import org.opensearch.common.action.ActionFuture; -import org.opensearch.common.collect.Tuple; -import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.set.Sets; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.XContentBuilder; @@ -73,7 +73,6 @@ import org.opensearch.index.engine.NRTReplicationReaderManager; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.recovery.FileChunkRequest; -import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.node.NodeClosedException; import org.opensearch.search.SearchService; @@ -92,6 +91,8 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -400,6 +401,14 @@ public void testMultipleShards() throws Exception { } public void testReplicationAfterForceMerge() throws Exception { + performReplicationAfterForceMerge(false, SHARD_COUNT * (1 + REPLICA_COUNT)); + } + + public void testReplicationAfterForceMergeOnPrimaryShardsOnly() throws Exception { + performReplicationAfterForceMerge(true, SHARD_COUNT); + } + + private void performReplicationAfterForceMerge(boolean primaryOnly, int expectedSuccessfulShards) throws Exception { final String nodeA = internalCluster().startDataOnlyNode(); final String nodeB = internalCluster().startDataOnlyNode(); createIndex(INDEX_NAME); @@ -430,8 +439,16 @@ public void testReplicationAfterForceMerge() throws Exception { waitForDocs(expectedHitCount, indexer); waitForSearchableDocs(expectedHitCount, nodeA, nodeB); - // Force a merge here so that the in memory SegmentInfos does not reference old segments on disk. - client().admin().indices().prepareForceMerge(INDEX_NAME).setMaxNumSegments(1).setFlush(false).get(); + // Perform force merge only on the primary shards. + final ForceMergeResponse forceMergeResponse = client().admin() + .indices() + .prepareForceMerge(INDEX_NAME) + .setPrimaryOnly(primaryOnly) + .setMaxNumSegments(1) + .setFlush(false) + .get(); + assertThat(forceMergeResponse.getFailedShards(), is(0)); + assertThat(forceMergeResponse.getSuccessfulShards(), is(expectedSuccessfulShards)); refresh(INDEX_NAME); verifyStoreContent(); } @@ -594,6 +611,67 @@ public void testCancellation() throws Exception { assertDocCounts(docCount, primaryNode); } + public void testCancellationDuringGetCheckpointInfo() throws Exception { + cancelDuringReplicaAction(SegmentReplicationSourceService.Actions.GET_CHECKPOINT_INFO); + } + + public void testCancellationDuringGetSegments() throws Exception { + cancelDuringReplicaAction(SegmentReplicationSourceService.Actions.GET_SEGMENT_FILES); + } + + private void cancelDuringReplicaAction(String actionToblock) throws Exception { + // this test stubs transport calls specific to node-node replication. + assumeFalse( + "Skipping the test as its not compatible with segment replication with remote store.", + segmentReplicationWithRemoteEnabled() + ); + final String primaryNode = internalCluster().startDataOnlyNode(); + createIndex(INDEX_NAME, Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build()); + ensureYellow(INDEX_NAME); + + final String replicaNode = internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + final SegmentReplicationTargetService targetService = internalCluster().getInstance( + SegmentReplicationTargetService.class, + replicaNode + ); + final IndexShard replicaShard = getIndexShard(replicaNode, INDEX_NAME); + CountDownLatch startCancellationLatch = new CountDownLatch(1); + CountDownLatch latch = new CountDownLatch(1); + + MockTransportService primaryTransportService = (MockTransportService) internalCluster().getInstance( + TransportService.class, + primaryNode + ); + primaryTransportService.addRequestHandlingBehavior(actionToblock, (handler, request, channel, task) -> { + logger.info("action {}", actionToblock); + try { + startCancellationLatch.countDown(); + latch.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + }); + + // index a doc and trigger replication + client().prepareIndex(INDEX_NAME).setId("1").setSource("foo", "bar").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + + // remove the replica and ensure it is cleaned up. + startCancellationLatch.await(); + SegmentReplicationTarget target = targetService.get(replicaShard.shardId()); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)) + ); + assertEquals("Replication not closed: " + target.getId(), 0, target.refCount()); + assertEquals("Store has a positive refCount", 0, replicaShard.store().refCount()); + // stop the replica, this will do additional checks on shutDown to ensure the replica and its store are closed properly + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode)); + latch.countDown(); + } + public void testStartReplicaAfterPrimaryIndexesDocs() throws Exception { final String primaryNode = internalCluster().startDataOnlyNode(); createIndex(INDEX_NAME, Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build()); @@ -992,32 +1070,31 @@ private void assertAllocationIdsInReplicaShardStats(Set<String> expected, Set<Se public void testScrollCreatedOnReplica() throws Exception { // create the cluster with one primary node containing primary shard and replica node containing replica shard final String primary = internalCluster().startDataOnlyNode(); - createIndex(INDEX_NAME); + prepareCreate( + INDEX_NAME, + Settings.builder() + .put(indexSettings()) + // we want to control refreshes + .put("index.refresh_interval", -1) + ).get(); ensureYellowAndNoInitializingShards(INDEX_NAME); final String replica = internalCluster().startDataOnlyNode(); ensureGreen(INDEX_NAME); - // index 10 docs - for (int i = 0; i < 10; i++) { - client().prepareIndex(INDEX_NAME) - .setId(String.valueOf(i)) - .setSource(jsonBuilder().startObject().field("field", i).endObject()) - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .get(); - refresh(INDEX_NAME); - } + client().prepareIndex(INDEX_NAME) + .setId(String.valueOf(0)) + .setSource(jsonBuilder().startObject().field("field", 0).endObject()) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + refresh(INDEX_NAME); + assertBusy( () -> assertEquals( getIndexShard(primary, INDEX_NAME).getLatestReplicationCheckpoint().getSegmentInfosVersion(), getIndexShard(replica, INDEX_NAME).getLatestReplicationCheckpoint().getSegmentInfosVersion() ) ); - final IndexShard replicaShard = getIndexShard(replica, INDEX_NAME); - final Tuple<GatedCloseable<SegmentInfos>, ReplicationCheckpoint> tuple = replicaShard.getLatestSegmentInfosAndCheckpoint(); - final Collection<String> snapshottedSegments; - try (final GatedCloseable<SegmentInfos> closeable = tuple.v1()) { - snapshottedSegments = closeable.get().files(false); - } + // opens a scrolled query before a flush is called. // this is for testing scroll segment consistency between refresh and flush SearchResponse searchResponse = client(replica).prepareSearch() @@ -1031,17 +1108,20 @@ public void testScrollCreatedOnReplica() throws Exception { .setScroll(TimeValue.timeValueDays(1)) .get(); - // force call flush - flush(INDEX_NAME); + final IndexShard replicaShard = getIndexShard(replica, INDEX_NAME); + SegmentInfos latestSegmentInfos = getLatestSegmentInfos(replicaShard); + final Set<String> snapshottedSegments = new HashSet<>(latestSegmentInfos.files(false)); + logger.info("Segments {}", snapshottedSegments); - for (int i = 3; i < 5; i++) { - client().prepareDelete(INDEX_NAME, String.valueOf(i)).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + // index more docs and force merge down to 1 segment + for (int i = 1; i < 5; i++) { + client().prepareIndex(INDEX_NAME) + .setId(String.valueOf(i)) + .setSource(jsonBuilder().startObject().field("field", i).endObject()) + .get(); refresh(INDEX_NAME); - if (randomBoolean()) { - client().admin().indices().prepareForceMerge(INDEX_NAME).setMaxNumSegments(1).setFlush(true).get(); - flush(INDEX_NAME); - } } + // create new on-disk segments and copy them out. assertBusy(() -> { assertEquals( getIndexShard(primary, INDEX_NAME).getLatestReplicationCheckpoint().getSegmentInfosVersion(), @@ -1049,13 +1129,19 @@ public void testScrollCreatedOnReplica() throws Exception { ); }); + // force merge and flush. client().admin().indices().prepareForceMerge(INDEX_NAME).setMaxNumSegments(1).setFlush(true).get(); + // wait for replication to complete assertBusy(() -> { assertEquals( getIndexShard(primary, INDEX_NAME).getLatestReplicationCheckpoint().getSegmentInfosVersion(), getIndexShard(replica, INDEX_NAME).getLatestReplicationCheckpoint().getSegmentInfosVersion() ); }); + logger.info("Local segments after force merge and commit {}", getLatestSegmentInfos(replicaShard).files(false)); + List<String> filesBeforeClearScroll = List.of(replicaShard.store().directory().listAll()); + assertTrue("Files should be preserved", filesBeforeClearScroll.containsAll(snapshottedSegments)); + // Test stats logger.info("--> Collect all scroll query hits"); long scrollHits = 0; @@ -1064,20 +1150,23 @@ public void testScrollCreatedOnReplica() throws Exception { searchResponse = client(replica).prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueDays(1)).get(); assertAllSuccessful(searchResponse); } while (searchResponse.getHits().getHits().length > 0); - - List<String> currentFiles = List.of(replicaShard.store().directory().listAll()); - assertTrue("Files should be preserved", currentFiles.containsAll(snapshottedSegments)); + assertEquals(1, scrollHits); client(replica).prepareClearScroll().addScrollId(searchResponse.getScrollId()).get(); - - assertBusy( - () -> assertFalse( - "Files should be cleaned up post scroll clear request", - List.of(replicaShard.store().directory().listAll()).containsAll(snapshottedSegments) - ) + final Set<String> filesAfterClearScroll = Arrays.stream(replicaShard.store().directory().listAll()).collect(Collectors.toSet()); + // there should be no active readers, snapshots, or on-disk commits containing the snapshotted files, check that they have been + // deleted. + Set<String> latestCommitSegments = new HashSet<>(replicaShard.store().readLastCommittedSegmentsInfo().files(false)); + assertEquals( + "Snapshotted files are no longer part of the latest commit", + Collections.emptySet(), + Sets.intersection(latestCommitSegments, snapshottedSegments) + ); + assertEquals( + "All snapshotted files should be deleted", + Collections.emptySet(), + Sets.intersection(filesAfterClearScroll, snapshottedSegments) ); - assertEquals(10, scrollHits); - } /** diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java index dbe0b43441f54..a7be63bc61bc2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java @@ -219,6 +219,7 @@ public void testPrimaryRelocationWithSegRepFailure() throws Exception { * This test verifies primary recovery behavior with continuous ingestion * */ + @TestLogging(reason = "Enable trace logs from replication and recovery package", value = "org.opensearch.indices.recovery:TRACE,org.opensearch.indices.replication:TRACE") public void testRelocateWhileContinuouslyIndexingAndWaitingForRefresh() throws Exception { final String primary = internalCluster().startNode(); createIndex(1); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationResizeRequestIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationResizeRequestIT.java index fb06a97bd51c2..69411b2ff640a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationResizeRequestIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationResizeRequestIT.java @@ -87,7 +87,8 @@ public void testCreateShrinkIndexThrowsExceptionWhenReplicasBehind() throws Exce .get() ); assertEquals( - " For index [test] replica shards haven't caught up with primary, please retry after sometime.", + "Replication still in progress for index [test]. Please wait for replication to complete and retry. " + + "Use the _cat/segment_replication/test api to check if the index is up to date (e.g. bytes_behind == 0).", exception.getMessage() ); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationStatsIT.java index 766471fdc0756..89aef6f0be1a6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationStatsIT.java @@ -268,12 +268,12 @@ public void testMultipleIndices() throws Exception { internalCluster().startClusterManagerOnlyNode(); final String index_2 = "tst-index-2"; List<String> nodes = new ArrayList<>(); - final String primaryNode = internalCluster().startNode(); + final String primaryNode = internalCluster().startDataOnlyNode(); nodes.add(primaryNode); createIndex(INDEX_NAME, index_2); ensureYellowAndNoInitializingShards(INDEX_NAME, index_2); - nodes.add(internalCluster().startNode()); + nodes.add(internalCluster().startDataOnlyNode()); ensureGreen(INDEX_NAME, index_2); final long numDocs = scaledRandomIntBetween(50, 100); @@ -284,6 +284,7 @@ public void testMultipleIndices() throws Exception { refresh(INDEX_NAME, index_2); waitForSearchableDocs(INDEX_NAME, numDocs, nodes); waitForSearchableDocs(index_2, numDocs, nodes); + ensureSearchable(INDEX_NAME, index_2); final IndexShard index_1_primary = getIndexShard(primaryNode, INDEX_NAME); final IndexShard index_2_primary = getIndexShard(primaryNode, index_2); @@ -291,37 +292,39 @@ public void testMultipleIndices() throws Exception { assertTrue(index_1_primary.routingEntry().primary()); assertTrue(index_2_primary.routingEntry().primary()); - // test both indices are returned in the response. - SegmentReplicationStatsResponse segmentReplicationStatsResponse = client().admin() - .indices() - .prepareSegmentReplicationStats() - .execute() - .actionGet(); + assertBusy(() -> { + // test both indices are returned in the response. + SegmentReplicationStatsResponse segmentReplicationStatsResponse = dataNodeClient().admin() + .indices() + .prepareSegmentReplicationStats() + .execute() + .actionGet(); - Map<String, List<SegmentReplicationPerGroupStats>> replicationStats = segmentReplicationStatsResponse.getReplicationStats(); - assertEquals(2, replicationStats.size()); - List<SegmentReplicationPerGroupStats> replicationPerGroupStats = replicationStats.get(INDEX_NAME); - assertEquals(1, replicationPerGroupStats.size()); - SegmentReplicationPerGroupStats perGroupStats = replicationPerGroupStats.get(0); - assertEquals(perGroupStats.getShardId(), index_1_primary.shardId()); - Set<SegmentReplicationShardStats> replicaStats = perGroupStats.getReplicaStats(); - assertEquals(1, replicaStats.size()); - for (SegmentReplicationShardStats replica : replicaStats) { - assertNotNull(replica.getCurrentReplicationState()); - } + Map<String, List<SegmentReplicationPerGroupStats>> replicationStats = segmentReplicationStatsResponse.getReplicationStats(); + assertEquals(2, replicationStats.size()); + List<SegmentReplicationPerGroupStats> replicationPerGroupStats = replicationStats.get(INDEX_NAME); + assertEquals(1, replicationPerGroupStats.size()); + SegmentReplicationPerGroupStats perGroupStats = replicationPerGroupStats.get(0); + assertEquals(perGroupStats.getShardId(), index_1_primary.shardId()); + Set<SegmentReplicationShardStats> replicaStats = perGroupStats.getReplicaStats(); + assertEquals(1, replicaStats.size()); + for (SegmentReplicationShardStats replica : replicaStats) { + assertNotNull(replica.getCurrentReplicationState()); + } - replicationPerGroupStats = replicationStats.get(index_2); - assertEquals(1, replicationPerGroupStats.size()); - perGroupStats = replicationPerGroupStats.get(0); - assertEquals(perGroupStats.getShardId(), index_2_primary.shardId()); - replicaStats = perGroupStats.getReplicaStats(); - assertEquals(1, replicaStats.size()); - for (SegmentReplicationShardStats replica : replicaStats) { - assertNotNull(replica.getCurrentReplicationState()); - } + replicationPerGroupStats = replicationStats.get(index_2); + assertEquals(1, replicationPerGroupStats.size()); + perGroupStats = replicationPerGroupStats.get(0); + assertEquals(perGroupStats.getShardId(), index_2_primary.shardId()); + replicaStats = perGroupStats.getReplicaStats(); + assertEquals(1, replicaStats.size()); + for (SegmentReplicationShardStats replica : replicaStats) { + assertNotNull(replica.getCurrentReplicationState()); + } + }, 30, TimeUnit.SECONDS); // test only single index queried. - segmentReplicationStatsResponse = client().admin() + SegmentReplicationStatsResponse segmentReplicationStatsResponse = dataNodeClient().admin() .indices() .prepareSegmentReplicationStats() .setIndices(index_2) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexIT.java index 547f9e7a8d380..87e5df8c48981 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexIT.java @@ -287,7 +287,7 @@ public void testCloseWhileDeletingIndices() throws Exception { throw new AssertionError(e); } try { - assertAcked(client().admin().indices().prepareDelete(indexToDelete)); + assertAcked(client().admin().indices().prepareDelete(indexToDelete).setTimeout("60s")); } catch (final Exception e) { assertException(e, indexToDelete); } @@ -301,7 +301,7 @@ public void testCloseWhileDeletingIndices() throws Exception { throw new AssertionError(e); } try { - client().admin().indices().prepareClose(indexToClose).get(); + client().admin().indices().prepareClose(indexToClose).setTimeout("60s").get(); } catch (final Exception e) { assertException(e, indexToClose); } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java index 9c96d4861d426..1d5da9370cce3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java @@ -57,7 +57,6 @@ import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamOutput; @@ -85,7 +84,7 @@ import org.opensearch.test.InternalSettingsPlugin; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; @@ -108,6 +107,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAllSuccessful; @@ -122,7 +122,7 @@ @ClusterScope(scope = Scope.SUITE, numDataNodes = 2, numClientNodes = 0) @SuppressCodecs("*") // requires custom completion format -public class IndexStatsIT extends ParameterizedOpenSearchIntegTestCase { +public class IndexStatsIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public IndexStatsIT(Settings settings) { super(settings); } @@ -131,15 +131,11 @@ public IndexStatsIT(Settings settings) { public static Collection<Object[]> parameters() { return Arrays.asList( new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() }, + new Object[] { Settings.builder().put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT).build() } ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(InternalSettingsPlugin.class); @@ -181,7 +177,7 @@ public void testFieldDataStats() throws InterruptedException { ensureGreen(); client().prepareIndex("test").setId("1").setSource("field", "value1", "field2", "value1").execute().actionGet(); client().prepareIndex("test").setId("2").setSource("field", "value2", "field2", "value2").execute().actionGet(); - client().admin().indices().prepareRefresh().execute().actionGet(); + refreshAndWaitForReplication(); indexRandomForConcurrentSearch("test"); NodesStatsResponse nodesStats = client().admin().cluster().prepareNodesStats("data:true").setIndices(true).execute().actionGet(); @@ -305,7 +301,7 @@ public void testClearAllCaches() throws Exception { client().admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); client().prepareIndex("test").setId("1").setSource("field", "value1").execute().actionGet(); client().prepareIndex("test").setId("2").setSource("field", "value2").execute().actionGet(); - client().admin().indices().prepareRefresh().execute().actionGet(); + refreshAndWaitForReplication(); indexRandomForConcurrentSearch("test"); NodesStatsResponse nodesStats = client().admin().cluster().prepareNodesStats("data:true").setIndices(true).execute().actionGet(); @@ -673,7 +669,7 @@ public void testSimpleStats() throws Exception { client().prepareIndex("test1").setId(Integer.toString(1)).setSource("field", "value").execute().actionGet(); client().prepareIndex("test1").setId(Integer.toString(2)).setSource("field", "value").execute().actionGet(); client().prepareIndex("test2").setId(Integer.toString(1)).setSource("field", "value").execute().actionGet(); - refresh(); + refreshAndWaitForReplication(); NumShards test1 = getNumShards("test1"); long test1ExpectedWrites = 2 * test1.dataCopies; @@ -688,7 +684,13 @@ public void testSimpleStats() throws Exception { assertThat(stats.getPrimaries().getIndexing().getTotal().getIndexFailedCount(), equalTo(0L)); assertThat(stats.getPrimaries().getIndexing().getTotal().isThrottled(), equalTo(false)); assertThat(stats.getPrimaries().getIndexing().getTotal().getThrottleTime().millis(), equalTo(0L)); - assertThat(stats.getTotal().getIndexing().getTotal().getIndexCount(), equalTo(totalExpectedWrites)); + + // This assert should not be done on segrep enabled indices because we are asserting Indexing/Write operations count on + // all primary and replica shards. But in case of segrep, Indexing/Write operation don't happen on replica shards. So we can + // ignore this assert check for segrep enabled indices. + if (isSegmentReplicationEnabledForIndex("test1") == false && isSegmentReplicationEnabledForIndex("test2") == false) { + assertThat(stats.getTotal().getIndexing().getTotal().getIndexCount(), equalTo(totalExpectedWrites)); + } assertThat(stats.getTotal().getStore(), notNullValue()); assertThat(stats.getTotal().getMerge(), notNullValue()); assertThat(stats.getTotal().getFlush(), notNullValue()); @@ -831,6 +833,7 @@ public void testMergeStats() { client().admin().indices().prepareForceMerge().setMaxNumSegments(1).execute().actionGet(); stats = client().admin().indices().prepareStats().setMerge(true).execute().actionGet(); + refreshAndWaitForReplication(); assertThat(stats.getTotal().getMerge(), notNullValue()); assertThat(stats.getTotal().getMerge().getTotal(), greaterThan(0L)); } @@ -857,7 +860,7 @@ public void testSegmentsStats() { client().admin().indices().prepareFlush().get(); client().admin().indices().prepareForceMerge().setMaxNumSegments(1).execute().actionGet(); - client().admin().indices().prepareRefresh().get(); + refreshAndWaitForReplication(); stats = client().admin().indices().prepareStats().setSegments(true).get(); assertThat(stats.getTotal().getSegments(), notNullValue()); @@ -875,7 +878,7 @@ public void testAllFlags() throws Exception { client().prepareIndex("test_index").setId(Integer.toString(2)).setSource("field", "value").execute().actionGet(); client().prepareIndex("test_index_2").setId(Integer.toString(1)).setSource("field", "value").execute().actionGet(); - client().admin().indices().prepareRefresh().execute().actionGet(); + refreshAndWaitForReplication(); IndicesStatsRequestBuilder builder = client().admin().indices().prepareStats(); Flag[] values = CommonStatsFlags.Flag.values(); for (Flag flag : values) { @@ -1459,6 +1462,7 @@ public void testZeroRemoteStoreStatsOnNonRemoteStoreIndex() { .get() .status() ); + refreshAndWaitForReplication(); ShardStats shard = client().admin().indices().prepareStats(indexName).setSegments(true).setTranslog(true).get().getShards()[0]; RemoteSegmentStats remoteSegmentStatsFromIndexStats = shard.getStats().getSegments().getRemoteSegmentStats(); assertZeroRemoteSegmentStats(remoteSegmentStatsFromIndexStats); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/store/IndicesStoreIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/store/IndicesStoreIntegrationIT.java index 00f74559ebbf6..0c6631b8d2307 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/store/IndicesStoreIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/store/IndicesStoreIntegrationIT.java @@ -32,6 +32,8 @@ package org.opensearch.indices.store; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.logging.log4j.Logger; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; @@ -60,9 +62,9 @@ import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.plugins.Plugin; import org.opensearch.test.InternalTestCluster; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.disruption.BlockClusterStateProcessing; import org.opensearch.test.transport.MockTransportService; import org.opensearch.transport.ConnectTransportException; @@ -85,7 +87,16 @@ import static org.hamcrest.Matchers.equalTo; @ClusterScope(scope = Scope.TEST, numDataNodes = 0) -public class IndicesStoreIntegrationIT extends OpenSearchIntegTestCase { +public class IndicesStoreIntegrationIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + public IndicesStoreIntegrationIT(Settings nodeSettings) { + super(nodeSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return remoteStoreSettings; + } + @Override protected Settings nodeSettings(int nodeOrdinal) { // simplify this and only use a single data path return Settings.builder() diff --git a/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java b/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java index e2cedea331412..9481a6116cdbc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java @@ -32,6 +32,8 @@ package org.opensearch.ingest; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchParseException; @@ -57,6 +59,7 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -73,7 +76,16 @@ import static org.hamcrest.core.Is.is; @OpenSearchIntegTestCase.ClusterScope(minNumDataNodes = 2) -public class IngestClientIT extends OpenSearchIntegTestCase { +public class IngestClientIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public IngestClientIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return replicationSettings; + } @Override protected Settings nodeSettings(int nodeOrdinal) { diff --git a/server/src/internalClusterTest/java/org/opensearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java index 38f1375bc7504..4c949e11459ab 100644 --- a/server/src/internalClusterTest/java/org/opensearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java @@ -32,13 +32,17 @@ package org.opensearch.ingest; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchParseException; import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.node.NodeService; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.Arrays; @@ -52,12 +56,10 @@ import static org.hamcrest.Matchers.notNullValue; @OpenSearchIntegTestCase.ClusterScope(numDataNodes = 0, numClientNodes = 0, scope = OpenSearchIntegTestCase.Scope.TEST) -public class IngestProcessorNotInstalledOnAllNodesIT extends OpenSearchIntegTestCase { +public class IngestProcessorNotInstalledOnAllNodesIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - private final BytesReference pipelineSource; - private volatile boolean installPlugin; - - public IngestProcessorNotInstalledOnAllNodesIT() throws IOException { + public IngestProcessorNotInstalledOnAllNodesIT(Settings settings) throws IOException { + super(settings); pipelineSource = BytesReference.bytes( jsonBuilder().startObject() .startArray("processors") @@ -70,6 +72,14 @@ public IngestProcessorNotInstalledOnAllNodesIT() throws IOException { ); } + @ParametersFactory + public static Collection<Object[]> parameters() { + return replicationSettings; + } + + private final BytesReference pipelineSource; + private volatile boolean installPlugin; + @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return installPlugin ? Arrays.asList(IngestTestPlugin.class) : Collections.emptyList(); diff --git a/server/src/internalClusterTest/java/org/opensearch/mget/SimpleMgetIT.java b/server/src/internalClusterTest/java/org/opensearch/mget/SimpleMgetIT.java index f77ae80a55276..2f0d4959d217b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/mget/SimpleMgetIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/mget/SimpleMgetIT.java @@ -42,11 +42,10 @@ import org.opensearch.action.get.MultiGetResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.search.fetch.subphase.FetchSourceContext; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.Arrays; @@ -63,10 +62,10 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; -public class SimpleMgetIT extends ParameterizedOpenSearchIntegTestCase { +public class SimpleMgetIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public SimpleMgetIT(Settings dynamicSettings) { - super(dynamicSettings); + public SimpleMgetIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -77,11 +76,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testThatMgetShouldWorkWithOneIndexMissing() throws IOException { createIndex("test"); diff --git a/server/src/internalClusterTest/java/org/opensearch/plugins/PluginsServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/plugins/PluginsServiceIT.java new file mode 100644 index 0000000000000..3cc10b0c0b858 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/plugins/PluginsServiceIT.java @@ -0,0 +1,115 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugins; + +import org.opensearch.Version; +import org.opensearch.common.settings.Settings; +import org.opensearch.env.Environment; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.VersionUtils; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; + +import static org.hamcrest.Matchers.containsString; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class PluginsServiceIT extends OpenSearchIntegTestCase { + + public void testNodeBootstrapWithCompatiblePlugin() throws IOException { + // Prepare the plugins directory and then start a node + Path baseDir = createTempDir(); + Path pluginDir = baseDir.resolve("plugins/dummy-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "dummy desc", + "name", + "dummyPlugin", + "version", + "1.0", + "opensearch.version", + Version.CURRENT.toString(), + "java.version", + System.getProperty("java.specification.version"), + "classname", + "test.DummyPlugin" + ); + try (InputStream jar = PluginsServiceTests.class.getResourceAsStream("dummy-plugin.jar")) { + Files.copy(jar, pluginDir.resolve("dummy-plugin.jar")); + } + internalCluster().startNode(Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), baseDir)); + for (PluginsService pluginsService : internalCluster().getDataNodeInstances(PluginsService.class)) { + // Ensure plugins service was able to load the plugin + assertEquals(1, pluginsService.info().getPluginInfos().stream().filter(info -> info.getName().equals("dummyPlugin")).count()); + } + } + + public void testNodeBootstrapWithRangeCompatiblePlugin() throws IOException { + // Prepare the plugins directory and then start a node + Path baseDir = createTempDir(); + Path pluginDir = baseDir.resolve("plugins/dummy-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "dummy desc", + "name", + "dummyPlugin", + "version", + "1.0", + "dependencies", + "{opensearch:\"~" + Version.CURRENT + "\"}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "test.DummyPlugin" + ); + try (InputStream jar = PluginsServiceTests.class.getResourceAsStream("dummy-plugin.jar")) { + Files.copy(jar, pluginDir.resolve("dummy-plugin.jar")); + } + internalCluster().startNode(Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), baseDir)); + for (PluginsService pluginsService : internalCluster().getDataNodeInstances(PluginsService.class)) { + // Ensure plugins service was able to load the plugin + assertEquals(1, pluginsService.info().getPluginInfos().stream().filter(info -> info.getName().equals("dummyPlugin")).count()); + } + } + + public void testNodeBootstrapWithInCompatiblePlugin() throws IOException { + // Prepare the plugins directory with an incompatible plugin and attempt to start a node + Path baseDir = createTempDir(); + Path pluginDir = baseDir.resolve("plugins/dummy-plugin"); + String incompatibleRange = "~" + + VersionUtils.getVersion(Version.CURRENT.major, Version.CURRENT.minor, (byte) (Version.CURRENT.revision + 1)); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "dummy desc", + "name", + "dummyPlugin", + "version", + "1.0", + "dependencies", + "{opensearch:\"" + incompatibleRange + "\"}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "test.DummyPlugin" + ); + try (InputStream jar = PluginsServiceTests.class.getResourceAsStream("dummy-plugin.jar")) { + Files.copy(jar, pluginDir.resolve("dummy-plugin.jar")); + } + IllegalArgumentException e = assertThrows( + IllegalArgumentException.class, + () -> internalCluster().startNode(Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), baseDir)) + ); + assertThat(e.getMessage(), containsString("Plugin [dummyPlugin] was built for OpenSearch version ")); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlMultiNodeIT.java b/server/src/internalClusterTest/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlMultiNodeIT.java deleted file mode 100644 index 0af3d31f9e846..0000000000000 --- a/server/src/internalClusterTest/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlMultiNodeIT.java +++ /dev/null @@ -1,292 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.ratelimitting.admissioncontrol; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; -import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; -import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; -import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; -import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; -import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; -import org.opensearch.action.admin.indices.stats.ShardStats; -import org.opensearch.action.bulk.BulkRequest; -import org.opensearch.action.bulk.BulkResponse; -import org.opensearch.action.index.IndexRequest; -import org.opensearch.action.search.SearchPhaseExecutionException; -import org.opensearch.action.search.SearchResponse; -import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.cluster.node.DiscoveryNodes; -import org.opensearch.cluster.routing.ShardRouting; -import org.opensearch.common.UUIDs; -import org.opensearch.common.collect.Tuple; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.node.resource.tracker.ResourceTrackerSettings; -import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; -import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; -import org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings; -import org.opensearch.ratelimitting.admissioncontrol.stats.AdmissionControllerStats; -import org.opensearch.test.OpenSearchIntegTestCase; -import org.junit.After; -import org.junit.Before; - -import java.util.Arrays; -import java.util.Collections; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.atomic.AtomicLong; -import java.util.stream.Stream; - -import static org.opensearch.ratelimitting.admissioncontrol.AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE; -import static org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT; -import static org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; - -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 2, numClientNodes = 1) -public class AdmissionControlMultiNodeIT extends OpenSearchIntegTestCase { - - public static final Settings settings = Settings.builder() - .put(ResourceTrackerSettings.GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), TimeValue.timeValueMillis(500)) - .put(ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), TimeValue.timeValueMillis(500)) - .put(ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.ENFORCED) - .put(SEARCH_CPU_USAGE_LIMIT.getKey(), 0) - .put(INDEXING_CPU_USAGE_LIMIT.getKey(), 0) - .build(); - - private static final Logger LOGGER = LogManager.getLogger(AdmissionControlMultiNodeIT.class); - - public static final String INDEX_NAME = "test_index"; - - @Before - public void init() { - assertAcked( - prepareCreate( - INDEX_NAME, - Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) - ) - ); - ensureGreen(INDEX_NAME); - } - - @After - public void cleanup() { - client().admin().indices().prepareDelete(INDEX_NAME).get(); - } - - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(settings).build(); - } - - public void testAdmissionControlRejectionOnEnforced() { - Tuple<String, String> primaryReplicaNodeNames = getPrimaryReplicaNodeNames(INDEX_NAME); - String primaryName = primaryReplicaNodeNames.v1(); - String replicaName = primaryReplicaNodeNames.v2(); - String coordinatingOnlyNode = getCoordinatingOnlyNode(); - AdmissionControlService admissionControlServicePrimary = internalCluster().getInstance(AdmissionControlService.class, primaryName); - AdmissionControlService admissionControlServiceReplica = internalCluster().getInstance(AdmissionControlService.class, replicaName); - final BulkRequest bulkRequest = new BulkRequest(); - for (int i = 0; i < 3; ++i) { - IndexRequest request = new IndexRequest(INDEX_NAME).id(UUIDs.base64UUID()) - .source(Collections.singletonMap("key", randomAlphaOfLength(50))); - bulkRequest.add(request); - } - BulkResponse res = client(coordinatingOnlyNode).bulk(bulkRequest).actionGet(); - assertEquals(429, res.getItems()[0].getFailure().getStatus().getStatus()); - AdmissionControllerStats admissionControlPrimaryStats = admissionControlServicePrimary.stats() - .getAdmissionControllerStatsList() - .get(0); - assertEquals(admissionControlPrimaryStats.rejectionCount.get(AdmissionControlActionType.INDEXING.getType()).longValue(), 1); - Arrays.stream(res.getItems()).forEach(bulkItemResponse -> { - assertTrue(bulkItemResponse.getFailureMessage().contains("OpenSearchRejectedExecutionException")); - }); - SearchResponse searchResponse; - try { - searchResponse = client(coordinatingOnlyNode).prepareSearch(INDEX_NAME).get(); - } catch (Exception exception) { - assertTrue(((SearchPhaseExecutionException) exception).getDetailedMessage().contains("OpenSearchRejectedExecutionException")); - } - AdmissionControllerStats primaryStats = admissionControlServicePrimary.stats().getAdmissionControllerStatsList().get(0); - assertEquals(primaryStats.rejectionCount.get(AdmissionControlActionType.SEARCH.getType()).longValue(), 1); - } - - public void testAdmissionControlEnforcedOnNonACEnabledActions() throws ExecutionException, InterruptedException { - String coordinatingOnlyNode = getCoordinatingOnlyNode(); - ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); - - updateSettingsRequest.transientSettings( - Settings.builder() - .put( - CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), - AdmissionControlMode.ENFORCED.getMode() - ) - ); - assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); - NodesStatsRequest nodesStatsRequest = new NodesStatsRequest(); - nodesStatsRequest.clear() - .indices(true) - .addMetrics( - NodesStatsRequest.Metric.JVM.metricName(), - NodesStatsRequest.Metric.OS.metricName(), - NodesStatsRequest.Metric.FS.metricName(), - NodesStatsRequest.Metric.PROCESS.metricName(), - NodesStatsRequest.Metric.ADMISSION_CONTROL.metricName() - ); - NodesStatsResponse nodesStatsResponse = client(coordinatingOnlyNode).admin().cluster().nodesStats(nodesStatsRequest).actionGet(); - ClusterHealthResponse clusterHealthResponse = client().admin().cluster().health(new ClusterHealthRequest()).actionGet(); - assertEquals(200, clusterHealthResponse.status().getStatus()); - assertFalse(nodesStatsResponse.hasFailures()); - } - - public void testAdmissionControlRejectionOnMonitor() { - Tuple<String, String> primaryReplicaNodeNames = getPrimaryReplicaNodeNames(INDEX_NAME); - String primaryName = primaryReplicaNodeNames.v1(); - String replicaName = primaryReplicaNodeNames.v2(); - String coordinatingOnlyNode = getCoordinatingOnlyNode(); - - AdmissionControlService admissionControlServicePrimary = internalCluster().getInstance(AdmissionControlService.class, primaryName); - AdmissionControlService admissionControlServiceReplica = internalCluster().getInstance(AdmissionControlService.class, replicaName); - - ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); - - updateSettingsRequest.transientSettings( - Settings.builder() - .put( - CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), - AdmissionControlMode.MONITOR.getMode() - ) - ); - assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); - - final BulkRequest bulkRequest = new BulkRequest(); - for (int i = 0; i < 3; ++i) { - IndexRequest request = new IndexRequest(INDEX_NAME).id(UUIDs.base64UUID()) - .source(Collections.singletonMap("key", randomAlphaOfLength(50))); - bulkRequest.add(request); - } - BulkResponse res = client(coordinatingOnlyNode).bulk(bulkRequest).actionGet(); - assertFalse(res.hasFailures()); - AdmissionControllerStats admissionControlPrimaryStats = admissionControlServicePrimary.stats() - .getAdmissionControllerStatsList() - .get(0); - AdmissionControllerStats admissionControlReplicaStats = admissionControlServiceReplica.stats() - .getAdmissionControllerStatsList() - .get(0); - long primaryRejectionCount = admissionControlPrimaryStats.rejectionCount.getOrDefault( - AdmissionControlActionType.INDEXING.getType(), - new AtomicLong(0).longValue() - ); - long replicaRejectionCount = admissionControlReplicaStats.rejectionCount.getOrDefault( - AdmissionControlActionType.INDEXING.getType(), - new AtomicLong(0).longValue() - ); - assertEquals(primaryRejectionCount, 1); - assertEquals(replicaRejectionCount, 0); - SearchResponse searchResponse; - searchResponse = client(coordinatingOnlyNode).prepareSearch(INDEX_NAME).get(); - admissionControlPrimaryStats = admissionControlServicePrimary.stats().getAdmissionControllerStatsList().get(0); - admissionControlReplicaStats = admissionControlServiceReplica.stats().getAdmissionControllerStatsList().get(0); - primaryRejectionCount = admissionControlPrimaryStats.getRejectionCount() - .getOrDefault(AdmissionControlActionType.SEARCH.getType(), new AtomicLong(0).longValue()); - replicaRejectionCount = admissionControlReplicaStats.getRejectionCount() - .getOrDefault(AdmissionControlActionType.SEARCH.getType(), new AtomicLong(0).longValue()); - assertTrue(primaryRejectionCount == 1 || replicaRejectionCount == 1); - assertFalse(primaryRejectionCount == 1 && replicaRejectionCount == 1); - } - - public void testAdmissionControlRejectionOnDisabled() { - Tuple<String, String> primaryReplicaNodeNames = getPrimaryReplicaNodeNames(INDEX_NAME); - String primaryName = primaryReplicaNodeNames.v1(); - String replicaName = primaryReplicaNodeNames.v2(); - String coordinatingOnlyNode = getCoordinatingOnlyNode(); - - AdmissionControlService admissionControlServicePrimary = internalCluster().getInstance(AdmissionControlService.class, primaryName); - AdmissionControlService admissionControlServiceReplica = internalCluster().getInstance(AdmissionControlService.class, replicaName); - - ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); - - updateSettingsRequest.transientSettings( - Settings.builder() - .put( - CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), - AdmissionControlMode.DISABLED.getMode() - ) - ); - assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); - - final BulkRequest bulkRequest = new BulkRequest(); - for (int i = 0; i < 3; ++i) { - IndexRequest request = new IndexRequest(INDEX_NAME).id(UUIDs.base64UUID()) - .source(Collections.singletonMap("key", randomAlphaOfLength(50))); - bulkRequest.add(request); - } - BulkResponse res = client(coordinatingOnlyNode).bulk(bulkRequest).actionGet(); - assertFalse(res.hasFailures()); - AdmissionControllerStats admissionControlPrimaryStats = admissionControlServicePrimary.stats() - .getAdmissionControllerStatsList() - .get(0); - AdmissionControllerStats admissionControlReplicaStats = admissionControlServiceReplica.stats() - .getAdmissionControllerStatsList() - .get(0); - long primaryRejectionCount = admissionControlPrimaryStats.rejectionCount.getOrDefault( - AdmissionControlActionType.INDEXING.getType(), - new AtomicLong(0).longValue() - ); - long replicaRejectionCount = admissionControlReplicaStats.rejectionCount.getOrDefault( - AdmissionControlActionType.INDEXING.getType(), - new AtomicLong(0).longValue() - ); - assertEquals(primaryRejectionCount, 0); - assertEquals(replicaRejectionCount, 0); - SearchResponse searchResponse; - searchResponse = client(coordinatingOnlyNode).prepareSearch(INDEX_NAME).get(); - admissionControlPrimaryStats = admissionControlServicePrimary.stats().getAdmissionControllerStatsList().get(0); - admissionControlReplicaStats = admissionControlServiceReplica.stats().getAdmissionControllerStatsList().get(0); - primaryRejectionCount = admissionControlPrimaryStats.getRejectionCount() - .getOrDefault(AdmissionControlActionType.SEARCH.getType(), new AtomicLong(0).longValue()); - replicaRejectionCount = admissionControlReplicaStats.getRejectionCount() - .getOrDefault(AdmissionControlActionType.SEARCH.getType(), new AtomicLong(0).longValue()); - assertTrue(primaryRejectionCount == 0 && replicaRejectionCount == 0); - } - - private Tuple<String, String> getPrimaryReplicaNodeNames(String indexName) { - IndicesStatsResponse response = client().admin().indices().prepareStats(indexName).get(); - String primaryId = Stream.of(response.getShards()) - .map(ShardStats::getShardRouting) - .filter(ShardRouting::primary) - .findAny() - .get() - .currentNodeId(); - String replicaId = Stream.of(response.getShards()) - .map(ShardStats::getShardRouting) - .filter(sr -> sr.primary() == false) - .findAny() - .get() - .currentNodeId(); - DiscoveryNodes nodes = client().admin().cluster().prepareState().get().getState().nodes(); - String primaryName = nodes.get(primaryId).getName(); - String replicaName = nodes.get(replicaId).getName(); - return new Tuple<>(primaryName, replicaName); - } - - private String getCoordinatingOnlyNode() { - return client().admin() - .cluster() - .prepareState() - .get() - .getState() - .nodes() - .getCoordinatingOnlyNodes() - .values() - .iterator() - .next() - .getName(); - } -} diff --git a/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java b/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java index d28df90216beb..0752ab7c9d0f1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java @@ -32,6 +32,8 @@ package org.opensearch.recovery; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.admin.cluster.health.ClusterHealthRequestBuilder; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.indices.recovery.RecoveryResponse; @@ -44,15 +46,27 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.indices.recovery.RecoveryState; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; + +import java.util.Collection; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; @ClusterScope(scope = Scope.TEST, numDataNodes = 0) -public class FullRollingRestartIT extends OpenSearchIntegTestCase { +public class FullRollingRestartIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public FullRollingRestartIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return replicationSettings; + } + protected void assertTimeout(ClusterHealthRequestBuilder requestBuilder) { ClusterHealthResponse clusterHealth = requestBuilder.get(); if (clusterHealth.isTimedOut()) { @@ -121,7 +135,7 @@ public void testFullRollingRestart() throws Exception { ); logger.info("--> refreshing and checking data"); - refresh(); + refreshAndWaitForReplication(); for (int i = 0; i < 10; i++) { assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 2000L); } @@ -154,7 +168,7 @@ public void testFullRollingRestart() throws Exception { ); logger.info("--> stopped two nodes, verifying data"); - refresh(); + refreshAndWaitForReplication(); for (int i = 0; i < 10; i++) { assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 2000L); } @@ -188,7 +202,7 @@ public void testFullRollingRestart() throws Exception { ); logger.info("--> one node left, verifying data"); - refresh(); + refreshAndWaitForReplication(); for (int i = 0; i < 10; i++) { assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 2000L); } diff --git a/server/src/internalClusterTest/java/org/opensearch/recovery/RecoveryWhileUnderLoadIT.java b/server/src/internalClusterTest/java/org/opensearch/recovery/RecoveryWhileUnderLoadIT.java index 30d5af58df545..988aeac7da541 100644 --- a/server/src/internalClusterTest/java/org/opensearch/recovery/RecoveryWhileUnderLoadIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/recovery/RecoveryWhileUnderLoadIT.java @@ -32,6 +32,8 @@ package org.opensearch.recovery; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.action.admin.indices.refresh.RefreshResponse; @@ -55,7 +57,7 @@ import org.opensearch.plugins.Plugin; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.BackgroundIndexer; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -74,7 +76,17 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoTimeout; -public class RecoveryWhileUnderLoadIT extends OpenSearchIntegTestCase { +public class RecoveryWhileUnderLoadIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public RecoveryWhileUnderLoadIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return replicationSettings; + } + private final Logger logger = LogManager.getLogger(RecoveryWhileUnderLoadIT.class); public static final class RetentionLeaseSyncIntervalSettingPlugin extends Plugin { @@ -150,7 +162,7 @@ public void testRecoverWhileUnderLoadAllocateReplicasTest() throws Exception { logger.info("--> indexing threads stopped"); logger.info("--> refreshing the index"); - refreshAndAssert(); + assertAfterRefreshAndWaitForReplication(); logger.info("--> verifying indexed content"); iterateAssertCount(numberOfShards, 10, indexer.getIds()); } @@ -211,7 +223,7 @@ public void testRecoverWhileUnderLoadAllocateReplicasRelocatePrimariesTest() thr logger.info("--> indexing threads stopped"); logger.info("--> refreshing the index"); - refreshAndAssert(); + assertAfterRefreshAndWaitForReplication(); logger.info("--> verifying indexed content"); iterateAssertCount(numberOfShards, 10, indexer.getIds()); } @@ -325,7 +337,7 @@ public void testRecoverWhileUnderLoadWithReducedAllowedNodes() throws Exception ); logger.info("--> refreshing the index"); - refreshAndAssert(); + assertAfterRefreshAndWaitForReplication(); logger.info("--> verifying indexed content"); iterateAssertCount(numberOfShards, 10, indexer.getIds()); } @@ -375,7 +387,7 @@ public void testRecoverWhileRelocating() throws Exception { ensureGreen(TimeValue.timeValueMinutes(5)); logger.info("--> refreshing the index"); - refreshAndAssert(); + assertAfterRefreshAndWaitForReplication(); logger.info("--> verifying indexed content"); iterateAssertCount(numShards, 10, indexer.getIds()); } @@ -474,10 +486,11 @@ private void logSearchResponse(int numberOfShards, long numberOfDocs, int iterat ); } - private void refreshAndAssert() throws Exception { + private void assertAfterRefreshAndWaitForReplication() throws Exception { assertBusy(() -> { RefreshResponse actionGet = client().admin().indices().prepareRefresh().get(); assertAllSuccessful(actionGet); }, 5, TimeUnit.MINUTES); + waitForReplication(); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/recovery/RelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/recovery/RelocationIT.java index 8c69424939b57..8d8aea7fc1f3b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/recovery/RelocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/recovery/RelocationIT.java @@ -32,6 +32,8 @@ package org.opensearch.recovery; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.tests.util.English; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; @@ -73,9 +75,9 @@ import org.opensearch.test.BackgroundIndexer; import org.opensearch.test.InternalSettingsPlugin; import org.opensearch.test.MockIndexEventListener; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.test.transport.StubbableTransport; import org.opensearch.transport.Transport; @@ -114,7 +116,17 @@ import static org.hamcrest.Matchers.startsWith; @ClusterScope(scope = Scope.TEST, numDataNodes = 0) -public class RelocationIT extends OpenSearchIntegTestCase { +public class RelocationIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public RelocationIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return replicationSettings; + } + private final TimeValue ACCEPTABLE_RELOCATION_TIME = new TimeValue(5, TimeUnit.MINUTES); @Override @@ -158,7 +170,7 @@ public void testSimpleRelocationNoIndexing() { } logger.info("--> verifying count"); - client().admin().indices().prepareRefresh().execute().actionGet(); + refreshAndWaitForReplication(); assertThat(client().prepareSearch("test").setSize(0).execute().actionGet().getHits().getTotalHits().value, equalTo(20L)); logger.info("--> start another node"); @@ -186,7 +198,7 @@ public void testSimpleRelocationNoIndexing() { assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); logger.info("--> verifying count again..."); - client().admin().indices().prepareRefresh().execute().actionGet(); + refreshAndWaitForReplication(); assertThat(client().prepareSearch("test").setSize(0).execute().actionGet().getHits().getTotalHits().value, equalTo(20L)); } @@ -265,7 +277,7 @@ public void testRelocationWhileIndexingRandom() throws Exception { logger.info("--> indexing threads stopped"); logger.info("--> refreshing the index"); - client().admin().indices().prepareRefresh("test").execute().actionGet(); + refreshAndWaitForReplication("test"); logger.info("--> searching the index"); boolean ranOnce = false; for (int i = 0; i < 10; i++) { @@ -650,7 +662,7 @@ public void testRelocateWhileWaitingForRefresh() { assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); logger.info("--> verifying count"); - client().admin().indices().prepareRefresh().execute().actionGet(); + refreshAndWaitForReplication(); assertThat(client().prepareSearch("test").setSize(0).execute().actionGet().getHits().getTotalHits().value, equalTo(20L)); } @@ -726,7 +738,7 @@ public void testRelocateWhileContinuouslyIndexingAndWaitingForRefresh() throws E logger.info("--> verifying count"); assertBusy(() -> { - client().admin().indices().prepareRefresh().execute().actionGet(); + refreshAndWaitForReplication(); assertTrue(pendingIndexResponses.stream().allMatch(ActionFuture::isDone)); }, 1, TimeUnit.MINUTES); diff --git a/server/src/internalClusterTest/java/org/opensearch/recovery/SimpleRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/recovery/SimpleRecoveryIT.java index 85f90738b19ce..1f5fbae6e58e9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/recovery/SimpleRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/recovery/SimpleRecoveryIT.java @@ -32,21 +32,34 @@ package org.opensearch.recovery; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.admin.indices.flush.FlushResponse; import org.opensearch.action.admin.indices.refresh.RefreshResponse; import org.opensearch.action.get.GetResponse; import org.opensearch.common.settings.Settings; import org.opensearch.core.xcontent.MediaTypeRegistry; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; + +import java.util.Collection; import static org.opensearch.client.Requests.flushRequest; import static org.opensearch.client.Requests.getRequest; import static org.opensearch.client.Requests.indexRequest; -import static org.opensearch.client.Requests.refreshRequest; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; -public class SimpleRecoveryIT extends OpenSearchIntegTestCase { +public class SimpleRecoveryIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public SimpleRecoveryIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return replicationSettings; + } + @Override public Settings indexSettings() { return Settings.builder().put(super.indexSettings()).put(recoverySettings()).build(); @@ -72,7 +85,7 @@ public void testSimpleRecovery() throws Exception { assertThat(flushResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries)); assertThat(flushResponse.getFailedShards(), equalTo(0)); client().index(indexRequest("test").id("2").source(source("2", "test"), MediaTypeRegistry.JSON)).actionGet(); - RefreshResponse refreshResponse = client().admin().indices().refresh(refreshRequest("test")).actionGet(); + RefreshResponse refreshResponse = refreshAndWaitForReplication("test"); assertThat(refreshResponse.getTotalShards(), equalTo(numShards.totalNumShards)); assertThat(refreshResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries)); assertThat(refreshResponse.getFailedShards(), equalTo(0)); diff --git a/server/src/internalClusterTest/java/org/opensearch/recovery/TruncatedRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/recovery/TruncatedRecoveryIT.java index 5f0922615a557..bf0533143cf91 100644 --- a/server/src/internalClusterTest/java/org/opensearch/recovery/TruncatedRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/recovery/TruncatedRecoveryIT.java @@ -32,6 +32,8 @@ package org.opensearch.recovery; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.tests.util.English; import org.apache.lucene.tests.util.LuceneTestCase.SuppressCodecs; import org.opensearch.action.admin.cluster.node.stats.NodeStats; @@ -47,6 +49,7 @@ import org.opensearch.node.RecoverySettingsChunkSizePlugin; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.transport.TransportService; @@ -65,7 +68,16 @@ @OpenSearchIntegTestCase.ClusterScope(numDataNodes = 2, numClientNodes = 0, scope = OpenSearchIntegTestCase.Scope.TEST) @SuppressCodecs("*") // test relies on exact file extensions -public class TruncatedRecoveryIT extends OpenSearchIntegTestCase { +public class TruncatedRecoveryIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public TruncatedRecoveryIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return replicationSettings; + } @Override protected Collection<Class<? extends Plugin>> nodePlugins() { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/DocRepMigrationTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/DocRepMigrationTestCase.java new file mode 100644 index 0000000000000..5240949ff87b9 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/DocRepMigrationTestCase.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotemigration; + +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.client.Client; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.List; + +import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) +public class DocRepMigrationTestCase extends MigrationBaseTestCase { + + public void testMixedModeAddDocRep() throws Exception { + internalCluster().setBootstrapClusterManagerNodeIndex(0); + List<String> cmNodes = internalCluster().startNodes(1); + + Client client = internalCluster().client(cmNodes.get(0)); + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings(Settings.builder().put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed")); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + addRemote = false; + internalCluster().startNode(); + String[] allNodes = internalCluster().getNodeNames(); + assertBusy(() -> { assertEquals(client.admin().cluster().prepareClusterStats().get().getNodes().size(), allNodes.length); }); + } + +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java new file mode 100644 index 0000000000000..88d6f6897ee68 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotemigration; + +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.nio.file.Path; + +import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; + +public class MigrationBaseTestCase extends OpenSearchIntegTestCase { + protected static final String REPOSITORY_NAME = "test-remote-store-repo"; + protected static final String REPOSITORY_2_NAME = "test-remote-store-repo-2"; + + protected Path segmentRepoPath; + protected Path translogRepoPath; + + boolean addRemote = false; + + protected Settings nodeSettings(int nodeOrdinal) { + if (segmentRepoPath == null || translogRepoPath == null) { + segmentRepoPath = randomRepoPath().toAbsolutePath(); + translogRepoPath = randomRepoPath().toAbsolutePath(); + } + if (addRemote) { + logger.info("Adding remote store node"); + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(remoteStoreClusterSettings(REPOSITORY_NAME, segmentRepoPath, REPOSITORY_2_NAME, translogRepoPath)) + .put("discovery.initial_state_timeout", "500ms") + .build(); + } else { + logger.info("Adding docrep node"); + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put("discovery.initial_state_timeout", "500ms").build(); + } + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build(); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java new file mode 100644 index 0000000000000..a31d203058565 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java @@ -0,0 +1,74 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotemigration; + +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.client.Client; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.List; + +import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; +import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) +public class RemoteStoreMigrationTestCase extends MigrationBaseTestCase { + public void testMixedModeAddRemoteNodes() throws Exception { + internalCluster().setBootstrapClusterManagerNodeIndex(0); + List<String> cmNodes = internalCluster().startNodes(1); + Client client = internalCluster().client(cmNodes.get(0)); + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings(Settings.builder().put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed")); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + // add remote node in mixed mode cluster + addRemote = true; + internalCluster().startNode(); + internalCluster().startNode(); + internalCluster().validateClusterFormed(); + + // assert repo gets registered + GetRepositoriesRequest gr = new GetRepositoriesRequest(new String[] { REPOSITORY_NAME }); + GetRepositoriesResponse getRepositoriesResponse = client.admin().cluster().getRepositories(gr).actionGet(); + assertEquals(1, getRepositoriesResponse.repositories().size()); + + // add docrep mode in mixed mode cluster + addRemote = true; + internalCluster().startNode(); + assertBusy(() -> { + assertEquals(client.admin().cluster().prepareClusterStats().get().getNodes().size(), internalCluster().getNodeNames().length); + }); + + // add incompatible remote node in remote mixed cluster + Settings.Builder badSettings = Settings.builder() + .put(remoteStoreClusterSettings(REPOSITORY_NAME, segmentRepoPath, "REPOSITORY_2_NAME", translogRepoPath)) + .put("discovery.initial_state_timeout", "500ms"); + String badNode = internalCluster().startNode(badSettings); + assertTrue(client.admin().cluster().prepareClusterStats().get().getNodes().size() < internalCluster().getNodeNames().length); + internalCluster().stopRandomNode(settings -> settings.get("node.name").equals(badNode)); + } + + public void testMigrationDirections() { + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + // add remote node in docrep cluster + updateSettingsRequest.persistentSettings(Settings.builder().put(MIGRATION_DIRECTION_SETTING.getKey(), "docrep")); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + updateSettingsRequest.persistentSettings(Settings.builder().put(MIGRATION_DIRECTION_SETTING.getKey(), "remote_store")); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + updateSettingsRequest.persistentSettings(Settings.builder().put(MIGRATION_DIRECTION_SETTING.getKey(), "random")); + assertThrows(IllegalArgumentException.class, () -> client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java index 67316ed0e6e6b..869032a84c2c2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java @@ -44,6 +44,7 @@ public Settings indexSettings() { .build(); } + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9191") public void testPrimaryRelocationWhileIndexing() throws Exception { internalCluster().startClusterManagerOnlyNode(); super.testPrimaryRelocationWhileIndexing(); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java index c957f1b338bfe..6de61cf203c60 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java @@ -157,10 +157,4 @@ public void testDisconnectsDuringRecovery() { public void testReplicaRecovery() { } - - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9580") - public void testRerouteRecovery() { - - } - } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java index d23e634bb3368..3899c8a80f442 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java @@ -9,6 +9,8 @@ package org.opensearch.remotestore; import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; import org.opensearch.action.admin.indices.get.GetIndexRequest; import org.opensearch.action.admin.indices.get.GetIndexResponse; import org.opensearch.action.bulk.BulkItemResponse; @@ -26,7 +28,6 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; -import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.index.Index; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexService; @@ -37,7 +38,7 @@ import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.blobstore.BlobStoreRepository; -import org.opensearch.repositories.fs.FsRepository; +import org.opensearch.repositories.fs.ReloadableFsRepository; import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.After; @@ -55,11 +56,9 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; -import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; -import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; -import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.repositories.fs.ReloadableFsRepository.REPOSITORIES_FAILRATE_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; public class RemoteStoreBaseIntegTestCase extends OpenSearchIntegTestCase { @@ -84,6 +83,10 @@ public class RemoteStoreBaseIntegTestCase extends OpenSearchIntegTestCase { ); protected Map<String, Long> indexData(int numberOfIterations, boolean invokeFlush, String index) { + return indexData(numberOfIterations, invokeFlush, false, index); + } + + protected Map<String, Long> indexData(int numberOfIterations, boolean invokeFlush, boolean emptyTranslog, String index) { long totalOperations = 0; long refreshedOrFlushedOperations = 0; long maxSeqNo = -1; @@ -96,6 +99,11 @@ protected Map<String, Long> indexData(int numberOfIterations, boolean invokeFlus } else { refresh(index); } + + // skip indexing if last iteration as we dont want to have any data in remote translog + if (emptyTranslog && i == numberOfIterations - 1) { + continue; + } maxSeqNoRefreshedOrFlushed = maxSeqNo; indexingStats.put(MAX_SEQ_NO_REFRESHED_OR_FLUSHED + "-shard-" + shardId, maxSeqNoRefreshedOrFlushed); refreshedOrFlushedOperations = totalOperations; @@ -137,6 +145,18 @@ protected Settings nodeSettings(int nodeOrdinal) { } } + protected void setFailRate(String repoName, int value) throws ExecutionException, InterruptedException { + GetRepositoriesRequest gr = new GetRepositoriesRequest(new String[] { repoName }); + GetRepositoriesResponse res = client().admin().cluster().getRepositories(gr).get(); + RepositoryMetadata rmd = res.repositories().get(0); + Settings.Builder settings = Settings.builder() + .put("location", rmd.settings().get("location")) + .put(REPOSITORIES_FAILRATE_SETTING.getKey(), value); + assertAcked( + client().admin().cluster().preparePutRepository(repoName).setType(ReloadableFsRepository.TYPE).setSettings(settings).get() + ); + } + public Settings indexSettings() { return defaultIndexSettings(); } @@ -167,121 +187,6 @@ protected BulkResponse indexBulk(String indexName, int numDocs) { return client().bulk(bulkRequest).actionGet(); } - public static Settings remoteStoreClusterSettings(String name, Path path) { - return remoteStoreClusterSettings(name, path, name, path); - } - - public static Settings remoteStoreClusterSettings( - String segmentRepoName, - Path segmentRepoPath, - String segmentRepoType, - String translogRepoName, - Path translogRepoPath, - String translogRepoType - ) { - Settings.Builder settingsBuilder = Settings.builder(); - settingsBuilder.put( - buildRemoteStoreNodeAttributes( - segmentRepoName, - segmentRepoPath, - segmentRepoType, - translogRepoName, - translogRepoPath, - translogRepoType, - false - ) - ); - return settingsBuilder.build(); - } - - public static Settings remoteStoreClusterSettings( - String segmentRepoName, - Path segmentRepoPath, - String translogRepoName, - Path translogRepoPath - ) { - Settings.Builder settingsBuilder = Settings.builder(); - settingsBuilder.put(buildRemoteStoreNodeAttributes(segmentRepoName, segmentRepoPath, translogRepoName, translogRepoPath, false)); - return settingsBuilder.build(); - } - - public static Settings buildRemoteStoreNodeAttributes( - String segmentRepoName, - Path segmentRepoPath, - String translogRepoName, - Path translogRepoPath, - boolean withRateLimiterAttributes - ) { - return buildRemoteStoreNodeAttributes( - segmentRepoName, - segmentRepoPath, - FsRepository.TYPE, - translogRepoName, - translogRepoPath, - FsRepository.TYPE, - withRateLimiterAttributes - ); - } - - public static Settings buildRemoteStoreNodeAttributes( - String segmentRepoName, - Path segmentRepoPath, - String segmentRepoType, - String translogRepoName, - Path translogRepoPath, - String translogRepoType, - boolean withRateLimiterAttributes - ) { - String segmentRepoTypeAttributeKey = String.format( - Locale.getDefault(), - "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, - segmentRepoName - ); - String segmentRepoSettingsAttributeKeyPrefix = String.format( - Locale.getDefault(), - "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, - segmentRepoName - ); - String translogRepoTypeAttributeKey = String.format( - Locale.getDefault(), - "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, - translogRepoName - ); - String translogRepoSettingsAttributeKeyPrefix = String.format( - Locale.getDefault(), - "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, - translogRepoName - ); - String stateRepoTypeAttributeKey = String.format( - Locale.getDefault(), - "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, - segmentRepoName - ); - String stateRepoSettingsAttributeKeyPrefix = String.format( - Locale.getDefault(), - "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, - segmentRepoName - ); - - Settings.Builder settings = Settings.builder() - .put("node.attr." + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, segmentRepoName) - .put(segmentRepoTypeAttributeKey, segmentRepoType) - .put(segmentRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath) - .put("node.attr." + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, translogRepoName) - .put(translogRepoTypeAttributeKey, translogRepoType) - .put(translogRepoSettingsAttributeKeyPrefix + "location", translogRepoPath) - .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, segmentRepoName) - .put(stateRepoTypeAttributeKey, segmentRepoType) - .put(stateRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath); - - if (withRateLimiterAttributes) { - settings.put(segmentRepoSettingsAttributeKeyPrefix + "compress", randomBoolean()) - .put(segmentRepoSettingsAttributeKeyPrefix + "chunk_size", 200, ByteSizeUnit.BYTES); - } - - return settings.build(); - } - private Settings defaultIndexSettings() { return Settings.builder() .put(super.indexSettings()) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java index 977bee25bd621..8c8209b80bfd8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java @@ -387,10 +387,16 @@ private void validateCurrentMetadata() throws Exception { internalCluster().getClusterManagerName() ); assertBusy(() -> { - ClusterMetadataManifest manifest = remoteClusterStateService.getLatestClusterMetadataManifest( - getClusterState().getClusterName().value(), - getClusterState().metadata().clusterUUID() - ).get(); + ClusterMetadataManifest manifest; + try { + manifest = remoteClusterStateService.getLatestClusterMetadataManifest( + getClusterState().getClusterName().value(), + getClusterState().metadata().clusterUUID() + ).get(); + } catch (IllegalStateException e) { + // AssertionError helps us use assertBusy and retry validation if failed due to a race condition. + throw new AssertionError("Error while validating latest cluster metadata", e); + } ClusterState clusterState = getClusterState(); Metadata currentMetadata = clusterState.metadata(); assertEquals(currentMetadata.indices().size(), manifest.getIndices().size()); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java index 7626e3dba6424..94acf2b1dbb27 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java @@ -17,8 +17,14 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.index.Index; +import org.opensearch.index.IndexService; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; +import org.opensearch.indices.IndicesService; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; +import org.opensearch.repositories.fs.ReloadableFsRepository; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; @@ -44,7 +50,7 @@ public class RemoteStoreRestoreIT extends BaseRemoteStoreRestoreIT { * @throws IOException IO Exception. */ public void testRemoteTranslogRestoreWithNoDataPostCommit() throws Exception { - testRestoreFlow(1, true, randomIntBetween(1, 5)); + testRestoreFlow(1, true, true, randomIntBetween(1, 5)); } /** @@ -52,7 +58,7 @@ public void testRemoteTranslogRestoreWithNoDataPostCommit() throws Exception { * @throws IOException IO Exception. */ public void testRemoteTranslogRestoreWithNoDataPostRefresh() throws Exception { - testRestoreFlow(1, false, randomIntBetween(1, 5)); + testRestoreFlow(1, false, true, randomIntBetween(1, 5)); } /** @@ -61,7 +67,7 @@ public void testRemoteTranslogRestoreWithNoDataPostRefresh() throws Exception { * @throws IOException IO Exception. */ public void testRemoteTranslogRestoreWithRefreshedData() throws Exception { - testRestoreFlow(randomIntBetween(2, 5), false, randomIntBetween(1, 5)); + testRestoreFlow(randomIntBetween(2, 5), false, false, randomIntBetween(1, 5)); } /** @@ -70,7 +76,7 @@ public void testRemoteTranslogRestoreWithRefreshedData() throws Exception { * @throws IOException IO Exception. */ public void testRemoteTranslogRestoreWithCommittedData() throws Exception { - testRestoreFlow(randomIntBetween(2, 5), true, randomIntBetween(1, 5)); + testRestoreFlow(randomIntBetween(2, 5), true, false, randomIntBetween(1, 5)); } /** @@ -78,7 +84,7 @@ public void testRemoteTranslogRestoreWithCommittedData() throws Exception { * @throws IOException IO Exception. */ public void testRTSRestoreWithNoDataPostCommitPrimaryReplicaDown() throws Exception { - testRestoreFlowBothPrimaryReplicasDown(1, true, randomIntBetween(1, 5)); + testRestoreFlowBothPrimaryReplicasDown(1, true, true, randomIntBetween(1, 5)); } /** @@ -86,7 +92,7 @@ public void testRTSRestoreWithNoDataPostCommitPrimaryReplicaDown() throws Except * @throws IOException IO Exception. */ public void testRTSRestoreWithNoDataPostRefreshPrimaryReplicaDown() throws Exception { - testRestoreFlowBothPrimaryReplicasDown(1, false, randomIntBetween(1, 5)); + testRestoreFlowBothPrimaryReplicasDown(1, false, true, randomIntBetween(1, 5)); } /** @@ -95,7 +101,7 @@ public void testRTSRestoreWithNoDataPostRefreshPrimaryReplicaDown() throws Excep * @throws IOException IO Exception. */ public void testRTSRestoreWithRefreshedDataPrimaryReplicaDown() throws Exception { - testRestoreFlowBothPrimaryReplicasDown(randomIntBetween(2, 5), false, randomIntBetween(1, 5)); + testRestoreFlowBothPrimaryReplicasDown(randomIntBetween(2, 5), false, false, randomIntBetween(1, 5)); } /** @@ -104,7 +110,7 @@ public void testRTSRestoreWithRefreshedDataPrimaryReplicaDown() throws Exception * @throws IOException IO Exception. */ public void testRTSRestoreWithCommittedDataPrimaryReplicaDown() throws Exception { - testRestoreFlowBothPrimaryReplicasDown(randomIntBetween(2, 5), true, randomIntBetween(1, 5)); + testRestoreFlowBothPrimaryReplicasDown(randomIntBetween(2, 5), true, false, randomIntBetween(1, 5)); } private void restoreAndVerify(int shardCount, int replicaCount, Map<String, Long> indexStats) throws Exception { @@ -122,9 +128,9 @@ private void restoreAndVerify(int shardCount, int replicaCount, Map<String, Long * @param invokeFlush If true, a flush is invoked. Otherwise, a refresh is invoked. * @throws IOException IO Exception. */ - private void testRestoreFlow(int numberOfIterations, boolean invokeFlush, int shardCount) throws Exception { + private void testRestoreFlow(int numberOfIterations, boolean invokeFlush, boolean emptyTranslog, int shardCount) throws Exception { prepareCluster(1, 3, INDEX_NAME, 0, shardCount); - Map<String, Long> indexStats = indexData(numberOfIterations, invokeFlush, INDEX_NAME); + Map<String, Long> indexStats = indexData(numberOfIterations, invokeFlush, emptyTranslog, INDEX_NAME); assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards); assertHitCount(client().prepareSearch(INDEX_NAME).setSize(0).get(), indexStats.get(REFRESHED_OR_FLUSHED_OPERATIONS)); @@ -135,15 +141,64 @@ private void testRestoreFlow(int numberOfIterations, boolean invokeFlush, int sh restoreAndVerify(shardCount, 0, indexStats); } + public void testMultipleWriters() throws Exception { + prepareCluster(1, 2, INDEX_NAME, 1, 1); + Map<String, Long> indexStats = indexData(randomIntBetween(2, 5), true, true, INDEX_NAME); + assertEquals(2, getNumShards(INDEX_NAME).totalNumShards); + + // ensure replica has latest checkpoint + flushAndRefresh(INDEX_NAME); + flushAndRefresh(INDEX_NAME); + + Index indexObj = clusterService().state().metadata().indices().get(INDEX_NAME).getIndex(); + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, primaryNodeName(INDEX_NAME)); + IndexService indexService = indicesService.indexService(indexObj); + IndexShard indexShard = indexService.getShard(0); + RemoteSegmentMetadata remoteSegmentMetadataBeforeFailover = indexShard.getRemoteDirectory().readLatestMetadataFile(); + + // ensure all segments synced to replica + assertBusy( + () -> assertHitCount( + client(primaryNodeName(INDEX_NAME)).prepareSearch(INDEX_NAME).setSize(0).get(), + indexStats.get(TOTAL_OPERATIONS) + ), + 30, + TimeUnit.SECONDS + ); + assertBusy( + () -> assertHitCount( + client(replicaNodeName(INDEX_NAME)).prepareSearch(INDEX_NAME).setSize(0).get(), + indexStats.get(TOTAL_OPERATIONS) + ), + 30, + TimeUnit.SECONDS + ); + + String newPrimaryNodeName = replicaNodeName(INDEX_NAME); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(INDEX_NAME))); + ensureYellow(INDEX_NAME); + + indicesService = internalCluster().getInstance(IndicesService.class, newPrimaryNodeName); + indexService = indicesService.indexService(indexObj); + indexShard = indexService.getShard(0); + IndexShard finalIndexShard = indexShard; + assertBusy(() -> assertTrue(finalIndexShard.isStartedPrimary() && finalIndexShard.isPrimaryMode())); + assertEquals( + finalIndexShard.getLatestSegmentInfosAndCheckpoint().v2().getPrimaryTerm(), + remoteSegmentMetadataBeforeFailover.getPrimaryTerm() + 1 + ); + } + /** * Helper function to test restoring an index having replicas from remote store when all the nodes housing the primary/replica drop. * @param numberOfIterations Number of times a refresh/flush should be invoked, followed by indexing some data. * @param invokeFlush If true, a flush is invoked. Otherwise, a refresh is invoked. * @throws IOException IO Exception. */ - private void testRestoreFlowBothPrimaryReplicasDown(int numberOfIterations, boolean invokeFlush, int shardCount) throws Exception { + private void testRestoreFlowBothPrimaryReplicasDown(int numberOfIterations, boolean invokeFlush, boolean emptyTranslog, int shardCount) + throws Exception { prepareCluster(1, 2, INDEX_NAME, 1, shardCount); - Map<String, Long> indexStats = indexData(numberOfIterations, invokeFlush, INDEX_NAME); + Map<String, Long> indexStats = indexData(numberOfIterations, invokeFlush, emptyTranslog, INDEX_NAME); assertEquals(shardCount * 2, getNumShards(INDEX_NAME).totalNumShards); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNodeName(INDEX_NAME))); @@ -391,7 +446,7 @@ public void testRTSRestoreWithCommittedDataExcludeIndicesPatterns() throws Excep * @throws IOException IO Exception. */ public void testRTSRestoreDataOnlyInTranslog() throws Exception { - testRestoreFlow(0, true, randomIntBetween(1, 5)); + testRestoreFlow(0, true, false, randomIntBetween(1, 5)); } public void testRateLimitedRemoteDownloads() throws Exception { @@ -425,7 +480,14 @@ public void testRateLimitedRemoteDownloads() throws Exception { settingsMap.entrySet().forEach(entry -> settings.put(entry.getKey(), entry.getValue())); settings.put("location", segmentRepoPath).put("max_remote_download_bytes_per_sec", 4, ByteSizeUnit.KB); - assertAcked(client().admin().cluster().preparePutRepository(REPOSITORY_NAME).setType("fs").setSettings(settings).get()); + assertAcked( + client().admin() + .cluster() + .preparePutRepository(REPOSITORY_NAME) + .setType(ReloadableFsRepository.TYPE) + .setSettings(settings) + .get() + ); for (RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) { Repository segmentRepo = repositoriesService.repository(REPOSITORY_NAME); @@ -454,7 +516,14 @@ public void testRateLimitedRemoteDownloads() throws Exception { // revert repo metadata to pass asserts on repo metadata vs. node attrs during teardown // https://github.com/opensearch-project/OpenSearch/pull/9569#discussion_r1345668700 settings.remove("max_remote_download_bytes_per_sec"); - assertAcked(client().admin().cluster().preparePutRepository(REPOSITORY_NAME).setType("fs").setSettings(settings).get()); + assertAcked( + client().admin() + .cluster() + .preparePutRepository(REPOSITORY_NAME) + .setType(ReloadableFsRepository.TYPE) + .setSettings(settings) + .get() + ); for (RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) { Repository segmentRepo = repositoriesService.repository(REPOSITORY_NAME); assertNull(segmentRepo.getMetadata().settings().get("max_remote_download_bytes_per_sec")); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreDisruptionIT.java index 8372135fc55c4..3d8d001b17ddf 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreDisruptionIT.java @@ -23,8 +23,6 @@ import org.opensearch.indices.replication.SegmentReplicationState; import org.opensearch.indices.replication.SegmentReplicationTarget; import org.opensearch.indices.replication.SegmentReplicationTargetService; -import org.opensearch.indices.replication.common.ReplicationCollection; -import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.disruption.SlowClusterStateProcessing; @@ -33,6 +31,8 @@ import java.util.Set; import java.util.concurrent.TimeUnit; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + /** * This class runs tests with remote store + segRep while blocking file downloads */ @@ -59,22 +59,18 @@ public void testCancelReplicationWhileSyncingSegments() throws Exception { indexSingleDoc(); refresh(INDEX_NAME); waitForBlock(replicaNode, REPOSITORY_NAME, TimeValue.timeValueSeconds(10)); - final SegmentReplicationState state = targetService.getOngoingEventSegmentReplicationState(indexShard.shardId()); - assertEquals(SegmentReplicationState.Stage.GET_FILES, state.getStage()); - ReplicationCollection.ReplicationRef<SegmentReplicationTarget> segmentReplicationTargetReplicationRef = targetService.get( - state.getReplicationId() - ); - final SegmentReplicationTarget segmentReplicationTarget = segmentReplicationTargetReplicationRef.get(); - // close the target ref here otherwise it will hold a refcount - segmentReplicationTargetReplicationRef.close(); + SegmentReplicationTarget segmentReplicationTarget = targetService.get(indexShard.shardId()); assertNotNull(segmentReplicationTarget); + assertEquals(SegmentReplicationState.Stage.GET_FILES, segmentReplicationTarget.state().getStage()); assertTrue(segmentReplicationTarget.refCount() > 0); - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNode)); - assertBusy(() -> { - assertTrue(indexShard.routingEntry().primary()); - assertNull(targetService.getOngoingEventSegmentReplicationState(indexShard.shardId())); - assertEquals("Target should be closed", 0, segmentReplicationTarget.refCount()); - }); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)) + ); + assertNull(targetService.getOngoingEventSegmentReplicationState(indexShard.shardId())); + assertEquals("Target should be closed", 0, segmentReplicationTarget.refCount()); unblockNode(REPOSITORY_NAME, replicaNode); cleanupRepo(); } @@ -85,7 +81,6 @@ public void testCancelReplicationWhileFetchingMetadata() throws Exception { final Set<String> dataNodeNames = internalCluster().getDataNodeNames(); final String replicaNode = getNode(dataNodeNames, false); - final String primaryNode = getNode(dataNodeNames, true); SegmentReplicationTargetService targetService = internalCluster().getInstance(SegmentReplicationTargetService.class, replicaNode); ensureGreen(INDEX_NAME); @@ -94,22 +89,18 @@ public void testCancelReplicationWhileFetchingMetadata() throws Exception { indexSingleDoc(); refresh(INDEX_NAME); waitForBlock(replicaNode, REPOSITORY_NAME, TimeValue.timeValueSeconds(10)); - final SegmentReplicationState state = targetService.getOngoingEventSegmentReplicationState(indexShard.shardId()); - assertEquals(SegmentReplicationState.Stage.GET_CHECKPOINT_INFO, state.getStage()); - ReplicationCollection.ReplicationRef<SegmentReplicationTarget> segmentReplicationTargetReplicationRef = targetService.get( - state.getReplicationId() - ); - final SegmentReplicationTarget segmentReplicationTarget = segmentReplicationTargetReplicationRef.get(); - // close the target ref here otherwise it will hold a refcount - segmentReplicationTargetReplicationRef.close(); + SegmentReplicationTarget segmentReplicationTarget = targetService.get(indexShard.shardId()); assertNotNull(segmentReplicationTarget); + assertEquals(SegmentReplicationState.Stage.GET_CHECKPOINT_INFO, segmentReplicationTarget.state().getStage()); assertTrue(segmentReplicationTarget.refCount() > 0); - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNode)); - assertBusy(() -> { - assertTrue(indexShard.routingEntry().primary()); - assertNull(targetService.getOngoingEventSegmentReplicationState(indexShard.shardId())); - assertEquals("Target should be closed", 0, segmentReplicationTarget.refCount()); - }); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)) + ); + assertNull(targetService.get(indexShard.shardId())); + assertEquals("Target should be closed", 0, segmentReplicationTarget.refCount()); unblockNode(REPOSITORY_NAME, replicaNode); cleanupRepo(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/script/ScriptCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/script/ScriptCacheIT.java index 2fbaf4ea5a4d3..42c257eb79eff 100644 --- a/server/src/internalClusterTest/java/org/opensearch/script/ScriptCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/script/ScriptCacheIT.java @@ -12,7 +12,6 @@ import org.opensearch.OpenSearchException; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.rest.RestStatus; import org.opensearch.index.MockEngineFactoryPlugin; @@ -21,7 +20,7 @@ import org.opensearch.plugins.Plugin; import org.opensearch.search.MockSearchService; import org.opensearch.test.MockHttpTransport; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.TestGeoShapeFieldMapperPlugin; import org.opensearch.test.store.MockFSIndexStore; import org.opensearch.test.transport.MockTransportService; @@ -38,7 +37,7 @@ import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.apache.logging.log4j.core.util.Throwables.getRootCause; -public class ScriptCacheIT extends ParameterizedOpenSearchIntegTestCase { +public class ScriptCacheIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public ScriptCacheIT(Settings settings) { super(settings); } @@ -51,11 +50,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - protected Settings nodeSettings(int nodeOrdinal) { Settings.Builder builder = Settings.builder() .put(super.nodeSettings(nodeOrdinal)) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/SearchCancellationIT.java b/server/src/internalClusterTest/java/org/opensearch/search/SearchCancellationIT.java index 18b4625761c51..5a19e2b841c08 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/SearchCancellationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/SearchCancellationIT.java @@ -50,7 +50,6 @@ import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.Strings; import org.opensearch.core.tasks.TaskCancelledException; import org.opensearch.core.xcontent.MediaTypeRegistry; @@ -62,7 +61,7 @@ import org.opensearch.search.lookup.LeafFieldsLookup; import org.opensearch.tasks.TaskInfo; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.transport.TransportException; import org.junit.After; @@ -91,7 +90,7 @@ import static org.hamcrest.Matchers.notNullValue; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE) -public class SearchCancellationIT extends ParameterizedOpenSearchIntegTestCase { +public class SearchCancellationIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private TimeValue requestCancellationTimeout = TimeValue.timeValueSeconds(1); private TimeValue clusterCancellationTimeout = TimeValue.timeValueMillis(1500); @@ -109,11 +108,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(ScriptedBlockPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java b/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java index 52cc797ddd8da..ef7da395d2151 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java @@ -38,13 +38,12 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.plugins.Plugin; import org.opensearch.script.MockScriptPlugin; import org.opensearch.script.Script; import org.opensearch.script.ScriptType; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -59,7 +58,7 @@ import static org.opensearch.search.SearchTimeoutIT.ScriptedTimeoutPlugin.SCRIPT_NAME; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE) -public class SearchTimeoutIT extends ParameterizedOpenSearchIntegTestCase { +public class SearchTimeoutIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public SearchTimeoutIT(Settings settings) { super(settings); } @@ -72,11 +71,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(ScriptedTimeoutPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java index d1e66c19c28e2..f08d3e871c579 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java @@ -480,6 +480,7 @@ public void testShardRoutingWithNetworkDisruption_FailOpenDisabled() throws Exce * Assertions are put to make sure such shard search requests are served by data node in zone c. * @throws IOException throws exception */ + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/10673") public void testShardRoutingWithNetworkDisruption_FailOpenEnabled() throws Exception { Settings commonSettings = Settings.builder() @@ -978,6 +979,7 @@ public void testSearchAggregationWithNetworkDisruption_FailOpenEnabled() throws * MultiGet with fail open enabled. No request failure on network disruption * @throws IOException throws exception */ + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/10755") public void testMultiGetWithNetworkDisruption_FailOpenEnabled() throws Exception { Settings commonSettings = Settings.builder() diff --git a/server/src/internalClusterTest/java/org/opensearch/search/SearchWithRejectionsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/SearchWithRejectionsIT.java index 24c72a66da6d0..b7f71b00d802f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/SearchWithRejectionsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/SearchWithRejectionsIT.java @@ -38,9 +38,8 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchType; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -52,10 +51,10 @@ import static org.hamcrest.Matchers.equalTo; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE) -public class SearchWithRejectionsIT extends ParameterizedOpenSearchIntegTestCase { +public class SearchWithRejectionsIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public SearchWithRejectionsIT(Settings dynamicSettings) { - super(dynamicSettings); + public SearchWithRejectionsIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -66,11 +65,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public Settings nodeSettings(int nodeOrdinal) { return Settings.builder() diff --git a/server/src/internalClusterTest/java/org/opensearch/search/StressSearchServiceReaperIT.java b/server/src/internalClusterTest/java/org/opensearch/search/StressSearchServiceReaperIT.java index a61102b9db144..7ed3526cabe3f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/StressSearchServiceReaperIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/StressSearchServiceReaperIT.java @@ -38,9 +38,8 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -53,7 +52,7 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; @ClusterScope(scope = SUITE) -public class StressSearchServiceReaperIT extends ParameterizedOpenSearchIntegTestCase { +public class StressSearchServiceReaperIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public StressSearchServiceReaperIT(Settings settings) { super(settings); } @@ -66,11 +65,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Settings nodeSettings(int nodeOrdinal) { // very frequent checks diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java index 257786c1e9ce5..6059abce53c8b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java @@ -40,7 +40,6 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.bucket.terms.IncludeExclude; import org.opensearch.search.aggregations.bucket.terms.RareTermsAggregationBuilder; import org.opensearch.search.aggregations.bucket.terms.SignificantTermsAggregationBuilder; @@ -49,7 +48,7 @@ import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.opensearch.search.aggregations.bucket.terms.TermsAggregatorFactory; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -62,7 +61,7 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class AggregationsIntegrationIT extends ParameterizedOpenSearchIntegTestCase { +public class AggregationsIntegrationIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { static int numDocs; @@ -71,8 +70,8 @@ public class AggregationsIntegrationIT extends ParameterizedOpenSearchIntegTestC + LARGE_STRING.length() + "] used in the request has exceeded the allowed maximum"; - public AggregationsIntegrationIT(Settings dynamicSettings) { - super(dynamicSettings); + public AggregationsIntegrationIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -83,11 +82,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(prepareCreate("index").setMapping("f", "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/CombiIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/CombiIT.java index 3d3cf1943dfe3..1826dd69cd804 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/CombiIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/CombiIT.java @@ -37,12 +37,11 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.missing.Missing; import org.opensearch.search.aggregations.bucket.terms.Terms; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.util.Arrays; @@ -61,10 +60,10 @@ import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.core.IsNull.notNullValue; -public class CombiIT extends ParameterizedOpenSearchIntegTestCase { +public class CombiIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public CombiIT(Settings dynamicSettings) { - super(dynamicSettings); + public CombiIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -75,11 +74,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - /** * Making sure that if there are multiple aggregations, working on the same field, yet require different * value source type, they can all still work. It used to fail as we used to cache the ValueSource by the diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/EquivalenceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/EquivalenceIT.java index 2ffdf5fb32778..302ec3116d187 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/EquivalenceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/EquivalenceIT.java @@ -39,7 +39,6 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.IndicesOptions; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.query.RangeQueryBuilder; @@ -56,7 +55,7 @@ import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.search.aggregations.bucket.terms.TermsAggregatorFactory; import org.opensearch.search.aggregations.metrics.Sum; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.junit.After; import org.junit.Before; @@ -94,10 +93,10 @@ * Additional tests that aim at testing more complex aggregation trees on larger random datasets, so that things like * the growth of dynamic arrays is tested. */ -public class EquivalenceIT extends ParameterizedOpenSearchIntegTestCase { +public class EquivalenceIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public EquivalenceIT(Settings dynamicSettings) { - super(dynamicSettings); + public EquivalenceIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -108,11 +107,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MetadataIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MetadataIT.java index 1bc0cb36f5fe3..b650855083eed 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MetadataIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MetadataIT.java @@ -37,11 +37,10 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.search.aggregations.pipeline.InternalBucketMetricValue; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -57,10 +56,10 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; -public class MetadataIT extends ParameterizedOpenSearchIntegTestCase { +public class MetadataIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public MetadataIT(Settings dynamicSettings) { - super(dynamicSettings); + public MetadataIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -71,11 +70,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testMetadataSetOnAggregationResult() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping("name", "type=keyword").get()); IndexRequestBuilder[] builders = new IndexRequestBuilder[randomInt(30)]; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MissingValueIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MissingValueIT.java index e6325987d330f..bdd16c7e74dc0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MissingValueIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MissingValueIT.java @@ -37,7 +37,6 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.terms.Terms; @@ -47,7 +46,7 @@ import org.opensearch.search.aggregations.metrics.Percentiles; import org.opensearch.search.aggregations.metrics.Stats; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -65,10 +64,10 @@ import static org.hamcrest.Matchers.closeTo; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class MissingValueIT extends ParameterizedOpenSearchIntegTestCase { +public class MissingValueIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public MissingValueIT(Settings dynamicSettings) { - super(dynamicSettings); + public MissingValueIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -79,11 +78,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected int maximumNumberOfShards() { return 2; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/AdjacencyMatrixIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/AdjacencyMatrixIT.java index cd0922606ec99..557ec9a37978d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/AdjacencyMatrixIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/AdjacencyMatrixIT.java @@ -39,7 +39,6 @@ import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexSettings; import org.opensearch.index.query.BoolQueryBuilder; @@ -50,7 +49,7 @@ import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.metrics.Avg; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedDynamicSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.util.ArrayList; @@ -75,7 +74,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class AdjacencyMatrixIT extends ParameterizedOpenSearchIntegTestCase { +public class AdjacencyMatrixIT extends ParameterizedDynamicSettingsOpenSearchIntegTestCase { static int numDocs, numSingleTag1Docs, numSingleTag2Docs, numTag1Docs, numTag2Docs, numMultiTagDocs; static final int MAX_NUM_FILTERS = 3; @@ -92,11 +91,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/BooleanTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/BooleanTermsIT.java index 7ab1a44ce220c..9a1efb3336212 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/BooleanTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/BooleanTermsIT.java @@ -36,11 +36,10 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedDynamicSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -53,7 +52,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class BooleanTermsIT extends ParameterizedOpenSearchIntegTestCase { +public class BooleanTermsIT extends ParameterizedDynamicSettingsOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "b_value"; private static final String MULTI_VALUED_FIELD_NAME = "b_values"; @@ -72,11 +71,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/CompositeAggIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/CompositeAggIT.java new file mode 100644 index 0000000000000..5a38ba670f1dc --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/CompositeAggIT.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.bucket; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.search.SearchResponse; +import org.opensearch.cluster.health.ClusterHealthStatus; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; +import org.opensearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder; +import org.opensearch.search.aggregations.bucket.composite.TermsValuesSourceBuilder; +import org.opensearch.search.aggregations.metrics.MaxAggregationBuilder; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; + +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; + +@OpenSearchIntegTestCase.SuiteScopeTestCase +public class CompositeAggIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public CompositeAggIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + public void setupSuiteScopeCluster() throws Exception { + assertAcked( + prepareCreate( + "idx", + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + ).setMapping("type", "type=keyword", "num", "type=integer", "score", "type=integer") + ); + waitForRelocation(ClusterHealthStatus.GREEN); + + client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "1", "score", "5").get(); + client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "11", "score", "50").get(); + refresh("idx"); + client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "1", "score", "2").get(); + client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "12", "score", "20").get(); + refresh("idx"); + client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "3", "score", "10").get(); + client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "13", "score", "15").get(); + refresh("idx"); + client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "3", "score", "1").get(); + client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "13", "score", "100").get(); + refresh("idx"); + + waitForRelocation(ClusterHealthStatus.GREEN); + refresh(); + } + + public void testCompositeAggWithNoSubAgg() { + SearchResponse rsp = client().prepareSearch("idx") + .addAggregation(new CompositeAggregationBuilder("my_composite", getTestValueSources())) + .get(); + assertSearchResponse(rsp); + } + + public void testCompositeAggWithSubAgg() { + SearchResponse rsp = client().prepareSearch("idx") + .addAggregation( + new CompositeAggregationBuilder("my_composite", getTestValueSources()).subAggregation( + new MaxAggregationBuilder("max").field("score") + ) + ) + .get(); + assertSearchResponse(rsp); + } + + private List<CompositeValuesSourceBuilder<?>> getTestValueSources() { + final List<CompositeValuesSourceBuilder<?>> sources = new ArrayList<>(); + sources.add(new TermsValuesSourceBuilder("keyword_vs").field("type")); + sources.add(new TermsValuesSourceBuilder("num_vs").field("num")); + return sources; + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramIT.java index ee94e574228df..6a15490cbfe63 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramIT.java @@ -41,7 +41,6 @@ import org.opensearch.common.time.DateFormatter; import org.opensearch.common.time.DateFormatters; import org.opensearch.common.time.DateMathParser; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.index.query.MatchNoneQueryBuilder; import org.opensearch.index.query.QueryBuilders; @@ -59,7 +58,7 @@ import org.opensearch.search.aggregations.metrics.Avg; import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import org.junit.After; @@ -98,7 +97,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class DateHistogramIT extends ParameterizedOpenSearchIntegTestCase { +public class DateHistogramIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { static Map<ZonedDateTime, Map<String, Object>> expectedMultiSortBuckets; @@ -106,8 +105,8 @@ private ZonedDateTime date(int month, int day) { return ZonedDateTime.of(2012, month, day, 0, 0, 0, 0, ZoneOffset.UTC); } - public DateHistogramIT(Settings dynamicSettings) { - super(dynamicSettings); + public DateHistogramIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -118,11 +117,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - private ZonedDateTime date(String date) { return DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(date)); } @@ -183,9 +177,9 @@ public void setupSuiteScopeCluster() throws Exception { indexDoc(2, 15, 3), // date: Feb 15, dates: Feb 15, Mar 16 indexDoc(3, 2, 4), // date: Mar 2, dates: Mar 2, Apr 3 indexDoc(3, 15, 5), // date: Mar 15, dates: Mar 15, Apr 16 - indexDoc(3, 23, 6) + indexDoc(3, 23, 6) // date: Mar 23, dates: Mar 23, Apr 24 ) - ); // date: Mar 23, dates: Mar 23, Apr 24 + ); indexRandom(true, builders); ensureSearchable(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramOffsetIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramOffsetIT.java index d44071e1ef9c5..eea896e01afe1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramOffsetIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramOffsetIT.java @@ -38,12 +38,11 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.time.DateFormatters; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.junit.After; import org.junit.Before; @@ -69,13 +68,13 @@ */ @OpenSearchIntegTestCase.SuiteScopeTestCase @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE) -public class DateHistogramOffsetIT extends ParameterizedOpenSearchIntegTestCase { +public class DateHistogramOffsetIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String DATE_FORMAT = "yyyy-MM-dd:hh-mm-ss"; private static final DateFormatter FORMATTER = DateFormatter.forPattern(DATE_FORMAT); - public DateHistogramOffsetIT(Settings dynamicSettings) { - super(dynamicSettings); + public DateHistogramOffsetIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -86,11 +85,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - private ZonedDateTime date(String date) { return DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(date)); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateRangeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateRangeIT.java index ae4243019ffb1..f00b601a54b80 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateRangeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateRangeIT.java @@ -38,7 +38,6 @@ import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.plugins.Plugin; import org.opensearch.script.Script; import org.opensearch.script.ScriptType; @@ -49,7 +48,7 @@ import org.opensearch.search.aggregations.bucket.range.Range.Bucket; import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.time.ZoneId; @@ -81,10 +80,10 @@ import static org.hamcrest.core.IsNull.nullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class DateRangeIT extends ParameterizedOpenSearchIntegTestCase { +public class DateRangeIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public DateRangeIT(Settings dynamicSettings) { - super(dynamicSettings); + public DateRangeIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -95,11 +94,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - private static IndexRequestBuilder indexDoc(int month, int day, int value) throws Exception { return client().prepareIndex("idx") .setSource( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java index 1d5f7f93e7410..b62e5f0f7f3b0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java @@ -37,7 +37,6 @@ import org.opensearch.action.search.SearchType; import org.opensearch.action.support.WriteRequest; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.sampler.DiversifiedAggregationBuilder; @@ -48,7 +47,7 @@ import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.opensearch.search.aggregations.metrics.Max; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -71,12 +70,12 @@ * Tests the Sampler aggregation */ @OpenSearchIntegTestCase.SuiteScopeTestCase -public class DiversifiedSamplerIT extends ParameterizedOpenSearchIntegTestCase { +public class DiversifiedSamplerIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public static final int NUM_SHARDS = 2; - public DiversifiedSamplerIT(Settings dynamicSettings) { - super(dynamicSettings); + public DiversifiedSamplerIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -87,11 +86,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public String randomExecutionHint() { return randomBoolean() ? null : randomFrom(SamplerAggregator.ExecutionMode.values()).toString(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DoubleTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DoubleTermsIT.java index 88bb41923e53f..ccb4af8386472 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DoubleTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DoubleTermsIT.java @@ -88,8 +88,8 @@ @OpenSearchIntegTestCase.SuiteScopeTestCase public class DoubleTermsIT extends AbstractTermsTestCase { - public DoubleTermsIT(Settings dynamicSettings) { - super(dynamicSettings); + public DoubleTermsIT(Settings staticSettings) { + super(staticSettings); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FilterIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FilterIT.java index 7aa98803403e0..2863711d49580 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FilterIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FilterIT.java @@ -37,7 +37,6 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.QueryBuilder; @@ -46,7 +45,7 @@ import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.metrics.Avg; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.util.ArrayList; @@ -68,12 +67,12 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class FilterIT extends ParameterizedOpenSearchIntegTestCase { +public class FilterIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { static int numDocs, numTag1Docs; - public FilterIT(Settings dynamicSettings) { - super(dynamicSettings); + public FilterIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -84,11 +83,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FilterRewriteIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FilterRewriteIT.java new file mode 100644 index 0000000000000..e051265d4b3bc --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FilterRewriteIT.java @@ -0,0 +1,107 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.bucket; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.time.DateFormatter; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.opensearch.search.aggregations.bucket.histogram.Histogram; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedDynamicSettingsOpenSearchIntegTestCase; + +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.SuiteScopeTestCase +public class FilterRewriteIT extends ParameterizedDynamicSettingsOpenSearchIntegTestCase { + + // simulate segment level match all + private static final QueryBuilder QUERY = QueryBuilders.termQuery("match", true); + private static final Map<String, Long> expected = new HashMap<>(); + + public FilterRewriteIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected void setupSuiteScopeCluster() throws Exception { + assertAcked(client().admin().indices().prepareCreate("idx").get()); + expected.clear(); + + final int repeat = randomIntBetween(2, 10); + final Set<Long> longTerms = new HashSet<>(); + + for (int i = 0; i < repeat; i++) { + final List<IndexRequestBuilder> indexRequests = new ArrayList<>(); + + long longTerm; + do { + longTerm = randomInt(repeat * 2); + } while (!longTerms.add(longTerm)); + ZonedDateTime time = ZonedDateTime.of(2024, 1, ((int) longTerm) + 1, 0, 0, 0, 0, ZoneOffset.UTC); + String dateTerm = DateFormatter.forPattern("yyyy-MM-dd").format(time); + + final int frequency = randomBoolean() ? 1 : randomIntBetween(2, 20); + for (int j = 0; j < frequency; j++) { + indexRequests.add( + client().prepareIndex("idx") + .setSource(jsonBuilder().startObject().field("date", dateTerm).field("match", true).endObject()) + ); + } + expected.put(dateTerm + "T00:00:00.000Z", (long) frequency); + + indexRandom(true, false, indexRequests); + } + + ensureSearchable(); + } + + public void testMinDocCountOnDateHistogram() throws Exception { + final SearchResponse allResponse = client().prepareSearch("idx") + .setSize(0) + .setQuery(QUERY) + .addAggregation(dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.DAY).minDocCount(0)) + .get(); + + final Histogram allHisto = allResponse.getAggregations().get("histo"); + Map<String, Long> results = new HashMap<>(); + allHisto.getBuckets().forEach(bucket -> results.put(bucket.getKeyAsString(), bucket.getDocCount())); + + for (Map.Entry<String, Long> entry : expected.entrySet()) { + assertEquals(entry.getValue(), results.get(entry.getKey())); + } + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FiltersIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FiltersIT.java index b6cf515df78ba..e64877a1d4030 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FiltersIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FiltersIT.java @@ -38,7 +38,6 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.QueryBuilder; @@ -48,7 +47,7 @@ import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.metrics.Avg; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedDynamicSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.util.ArrayList; @@ -72,7 +71,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class FiltersIT extends ParameterizedOpenSearchIntegTestCase { +public class FiltersIT extends ParameterizedDynamicSettingsOpenSearchIntegTestCase { static int numDocs, numTag1Docs, numTag2Docs, numOtherDocs; @@ -88,11 +87,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GeoDistanceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GeoDistanceIT.java index 025bebf8b254d..ed0bd3aad5bab 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GeoDistanceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GeoDistanceIT.java @@ -41,7 +41,6 @@ import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.DistanceUnit; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.opensearch.search.aggregations.InternalAggregation; @@ -50,7 +49,7 @@ import org.opensearch.search.aggregations.bucket.range.Range.Bucket; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; import org.hamcrest.Matchers; @@ -76,10 +75,10 @@ import static org.hamcrest.core.IsNull.nullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class GeoDistanceIT extends ParameterizedOpenSearchIntegTestCase { +public class GeoDistanceIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public GeoDistanceIT(Settings dynamicSettings) { - super(dynamicSettings); + public GeoDistanceIT(Settings staticSettings) { + super(staticSettings); } @Override @@ -97,11 +96,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - private IndexRequestBuilder indexCity(String idx, String name, String... latLons) throws Exception { XContentBuilder source = jsonBuilder().startObject().field("city", name); source.startArray("location"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GlobalIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GlobalIT.java index be31a3afadad0..a4aea6096a6e4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GlobalIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GlobalIT.java @@ -37,13 +37,12 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.bucket.global.Global; import org.opensearch.search.aggregations.metrics.Stats; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -61,12 +60,12 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class GlobalIT extends ParameterizedOpenSearchIntegTestCase { +public class GlobalIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { static int numDocs; - public GlobalIT(Settings dynamicSettings) { - super(dynamicSettings); + public GlobalIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -77,11 +76,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/HistogramIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/HistogramIT.java index 75f57d1cc4c0e..4abd068d6fe37 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/HistogramIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/HistogramIT.java @@ -38,7 +38,6 @@ import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryBuilders; import org.opensearch.plugins.Plugin; import org.opensearch.script.MockScriptPlugin; @@ -56,7 +55,7 @@ import org.opensearch.search.aggregations.metrics.Stats; import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.io.IOException; @@ -91,7 +90,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class HistogramIT extends ParameterizedOpenSearchIntegTestCase { +public class HistogramIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; private static final String MULTI_VALUED_FIELD_NAME = "l_values"; @@ -102,8 +101,8 @@ public class HistogramIT extends ParameterizedOpenSearchIntegTestCase { static long[] valueCounts, valuesCounts; static Map<Long, Map<String, Object>> expectedMultiSortBuckets; - public HistogramIT(Settings dynamicSettings) { - super(dynamicSettings); + public HistogramIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -114,11 +113,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/IpRangeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/IpRangeIT.java index 14a3685bd183e..44789ea63f536 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/IpRangeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/IpRangeIT.java @@ -37,7 +37,6 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.plugins.Plugin; import org.opensearch.script.MockScriptPlugin; import org.opensearch.script.Script; @@ -45,7 +44,7 @@ import org.opensearch.search.aggregations.AggregationBuilders; import org.opensearch.search.aggregations.bucket.range.Range; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -60,10 +59,10 @@ import static org.hamcrest.Matchers.instanceOf; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class IpRangeIT extends ParameterizedOpenSearchIntegTestCase { +public class IpRangeIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public IpRangeIT(Settings dynamicSettings) { - super(dynamicSettings); + public IpRangeIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -74,11 +73,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public static class DummyScriptPlugin extends MockScriptPlugin { @Override public Map<String, Function<Map<String, Object>, Object>> pluginScripts() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/IpTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/IpTermsIT.java index c712c97af5c71..4d2da4fa1d14b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/IpTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/IpTermsIT.java @@ -51,8 +51,8 @@ public class IpTermsIT extends AbstractTermsTestCase { - public IpTermsIT(Settings dynamicSettings) { - super(dynamicSettings); + public IpTermsIT(Settings staticSettings) { + super(staticSettings); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/LongTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/LongTermsIT.java index 345cbdae8ef07..49031bfd3fc1d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/LongTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/LongTermsIT.java @@ -86,8 +86,8 @@ @OpenSearchIntegTestCase.SuiteScopeTestCase public class LongTermsIT extends AbstractTermsTestCase { - public LongTermsIT(Settings dynamicSettings) { - super(dynamicSettings); + public LongTermsIT(Settings staticSettings) { + super(staticSettings); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MinDocCountIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MinDocCountIT.java index 90dafc0d57887..781d2acc5e2be 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MinDocCountIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MinDocCountIT.java @@ -82,8 +82,8 @@ public class MinDocCountIT extends AbstractTermsTestCase { private static final QueryBuilder QUERY = QueryBuilders.termQuery("match", true); private static int cardinality; - public MinDocCountIT(Settings dynamicSettings) { - super(dynamicSettings); + public MinDocCountIT(Settings staticSettings) { + super(staticSettings); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MultiTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MultiTermsIT.java index ea5a59d89309f..09133f720f9f7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MultiTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MultiTermsIT.java @@ -34,8 +34,8 @@ @OpenSearchIntegTestCase.SuiteScopeTestCase public class MultiTermsIT extends BaseStringTermsTestCase { - public MultiTermsIT(Settings dynamicSettings) { - super(dynamicSettings); + public MultiTermsIT(Settings staticSettings) { + super(staticSettings); } // the main purpose of this test is to make sure we're not allocating 2GB of memory per shard diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NaNSortingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NaNSortingIT.java index 6289cd5e36151..3eb813dcb91ef 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NaNSortingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NaNSortingIT.java @@ -37,7 +37,6 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.Comparators; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.search.aggregations.Aggregation; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; @@ -51,7 +50,7 @@ import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -67,7 +66,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class NaNSortingIT extends ParameterizedOpenSearchIntegTestCase { +public class NaNSortingIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private enum SubAggregation { AVG("avg") { @@ -139,8 +138,8 @@ public String sortKey() { public abstract double getValue(Aggregation aggregation); } - public NaNSortingIT(Settings dynamicSettings) { - super(dynamicSettings); + public NaNSortingIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -151,11 +150,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping("string_value", "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NestedIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NestedIT.java index 7af2ac218800d..288d4d2c4e525 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NestedIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NestedIT.java @@ -39,7 +39,6 @@ import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; @@ -57,7 +56,7 @@ import org.opensearch.search.aggregations.metrics.Stats; import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.util.ArrayList; @@ -92,14 +91,14 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class NestedIT extends ParameterizedOpenSearchIntegTestCase { +public class NestedIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static int numParents; private static int[] numChildren; private static SubAggCollectionMode aggCollectionMode; - public NestedIT(Settings dynamicSettings) { - super(dynamicSettings); + public NestedIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -110,11 +109,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/RangeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/RangeIT.java index 5812b7796c33e..50cee4e9ecd92 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/RangeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/RangeIT.java @@ -37,7 +37,6 @@ import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.plugins.Plugin; import org.opensearch.script.Script; @@ -51,7 +50,7 @@ import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.util.ArrayList; @@ -79,15 +78,15 @@ import static org.hamcrest.core.IsNull.nullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class RangeIT extends ParameterizedOpenSearchIntegTestCase { +public class RangeIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; private static final String MULTI_VALUED_FIELD_NAME = "l_values"; static int numDocs; - public RangeIT(Settings dynamicSettings) { - super(dynamicSettings); + public RangeIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -98,11 +97,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ReverseNestedIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ReverseNestedIT.java index 2716db6b7e745..3bf9233d3441d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ReverseNestedIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ReverseNestedIT.java @@ -36,7 +36,6 @@ import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.opensearch.search.aggregations.BucketOrder; @@ -47,7 +46,7 @@ import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.search.aggregations.metrics.ValueCount; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -76,10 +75,10 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class ReverseNestedIT extends ParameterizedOpenSearchIntegTestCase { +public class ReverseNestedIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public ReverseNestedIT(Settings dynamicSettings) { - super(dynamicSettings); + public ReverseNestedIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -90,11 +89,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { assertAcked( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SamplerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SamplerIT.java index c7b03d21cb6bb..3decab92acbff 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SamplerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SamplerIT.java @@ -38,7 +38,6 @@ import org.opensearch.action.search.SearchType; import org.opensearch.action.support.WriteRequest; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.sampler.Sampler; @@ -48,7 +47,7 @@ import org.opensearch.search.aggregations.bucket.terms.Terms.Bucket; import org.opensearch.search.aggregations.metrics.Max; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -71,7 +70,7 @@ * Tests the Sampler aggregation */ @OpenSearchIntegTestCase.SuiteScopeTestCase -public class SamplerIT extends ParameterizedOpenSearchIntegTestCase { +public class SamplerIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public static final int NUM_SHARDS = 2; @@ -79,8 +78,8 @@ public String randomExecutionHint() { return randomBoolean() ? null : randomFrom(SamplerAggregator.ExecutionMode.values()).toString(); } - public SamplerIT(Settings dynamicSettings) { - super(dynamicSettings); + public SamplerIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -91,11 +90,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { assertAcked( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardReduceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardReduceIT.java index 66d761c56634e..4cab6deb08bb5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardReduceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardReduceIT.java @@ -36,7 +36,6 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.geometry.utils.Geohash; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; @@ -49,7 +48,7 @@ import org.opensearch.search.aggregations.bucket.range.Range; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -77,10 +76,10 @@ * we can make sure that the reduce is properly propagated by checking that empty buckets were created. */ @OpenSearchIntegTestCase.SuiteScopeTestCase -public class ShardReduceIT extends ParameterizedOpenSearchIntegTestCase { +public class ShardReduceIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public ShardReduceIT(Settings dynamicSettings) { - super(dynamicSettings); + public ShardReduceIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -91,11 +90,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - private IndexRequestBuilder indexDoc(String date, int value) throws Exception { return client().prepareIndex("idx") .setSource( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardSizeTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardSizeTermsIT.java index 7c7cc12888307..66cce21bcf86f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardSizeTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardSizeTermsIT.java @@ -47,8 +47,8 @@ public class ShardSizeTermsIT extends ShardSizeTestCase { - public ShardSizeTermsIT(Settings dynamicSettings) { - super(dynamicSettings); + public ShardSizeTermsIT(Settings staticSettings) { + super(staticSettings); } public void testNoShardSizeString() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java index e914b87754865..f2e9265fa5cf9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java @@ -38,7 +38,6 @@ import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; @@ -65,7 +64,7 @@ import org.opensearch.search.aggregations.bucket.terms.heuristic.ScriptHeuristic; import org.opensearch.search.aggregations.bucket.terms.heuristic.SignificanceHeuristic; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.search.aggregations.bucket.SharedSignificantTermsTestMethods; import java.io.IOException; @@ -95,14 +94,14 @@ import static org.hamcrest.Matchers.is; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE) -public class SignificantTermsSignificanceScoreIT extends ParameterizedOpenSearchIntegTestCase { +public class SignificantTermsSignificanceScoreIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { static final String INDEX_NAME = "testidx"; static final String TEXT_FIELD = "text"; static final String CLASS_FIELD = "class"; - public SignificantTermsSignificanceScoreIT(Settings dynamicSettings) { - super(dynamicSettings); + public SignificantTermsSignificanceScoreIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -113,11 +112,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(TestScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsDocCountErrorIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsDocCountErrorIT.java index 343cea4b94c87..add6b71cb1753 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsDocCountErrorIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsDocCountErrorIT.java @@ -38,14 +38,13 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.search.aggregations.bucket.terms.Terms.Bucket; import org.opensearch.search.aggregations.bucket.terms.TermsAggregatorFactory.ExecutionMode; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -67,7 +66,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class TermsDocCountErrorIT extends ParameterizedOpenSearchIntegTestCase { +public class TermsDocCountErrorIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String STRING_FIELD_NAME = "s_value"; private static final String LONG_FIELD_NAME = "l_value"; @@ -79,8 +78,8 @@ public static String randomExecutionHint() { private static int numRoutingValues; - public TermsDocCountErrorIT(Settings dynamicSettings) { - super(dynamicSettings); + public TermsDocCountErrorIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -91,11 +90,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping(STRING_FIELD_NAME, "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsFixedDocCountErrorIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsFixedDocCountErrorIT.java index 5ad913e8c7086..422af15d2881d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsFixedDocCountErrorIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsFixedDocCountErrorIT.java @@ -14,27 +14,27 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.index.IndexSettings.MINIMUM_REFRESH_INTERVAL; import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.terms; import static org.opensearch.test.OpenSearchIntegTestCase.Scope.TEST; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @OpenSearchIntegTestCase.ClusterScope(scope = TEST, numClientNodes = 0, maxNumDataNodes = 1, supportsDedicatedMasters = false) -public class TermsFixedDocCountErrorIT extends ParameterizedOpenSearchIntegTestCase { +public class TermsFixedDocCountErrorIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String STRING_FIELD_NAME = "s_value"; - public TermsFixedDocCountErrorIT(Settings dynamicSettings) { - super(dynamicSettings); + public TermsFixedDocCountErrorIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -45,11 +45,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testSimpleAggErrorMultiShard() throws Exception { // size = 1, shard_size = 2 // Shard_1 [A, A, A, A, B, B, C, C, D, D] -> Buckets {"A" : 4, "B" : 2} @@ -71,7 +66,10 @@ public void testSimpleAggErrorMultiShard() throws Exception { assertAcked( prepareCreate("idx_mshard_1").setMapping(STRING_FIELD_NAME, "type=keyword") .setSettings( - Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.refresh_interval", MINIMUM_REFRESH_INTERVAL) ) ); client().prepareIndex("idx_mshard_1").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); @@ -89,7 +87,10 @@ public void testSimpleAggErrorMultiShard() throws Exception { assertAcked( prepareCreate("idx_mshard_2").setMapping(STRING_FIELD_NAME, "type=keyword") .setSettings( - Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.refresh_interval", MINIMUM_REFRESH_INTERVAL) ) ); client().prepareIndex("idx_mshard_2").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); @@ -127,7 +128,10 @@ public void testSimpleAggErrorSingleShard() throws Exception { assertAcked( prepareCreate("idx_shard_error").setMapping(STRING_FIELD_NAME, "type=keyword") .setSettings( - Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.refresh_interval", MINIMUM_REFRESH_INTERVAL) ) ); client().prepareIndex("idx_shard_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); @@ -170,7 +174,10 @@ public void testSliceLevelDocCountErrorSingleShard() throws Exception { assertAcked( prepareCreate("idx_slice_error").setMapping(STRING_FIELD_NAME, "type=keyword") .setSettings( - Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.refresh_interval", MINIMUM_REFRESH_INTERVAL) ) ); client().prepareIndex("idx_slice_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); @@ -248,7 +255,10 @@ public void testSliceLevelDocCountErrorMultiShard() throws Exception { assertAcked( prepareCreate("idx_mshard_1").setMapping(STRING_FIELD_NAME, "type=keyword") .setSettings( - Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.refresh_interval", MINIMUM_REFRESH_INTERVAL) ) ); client().prepareIndex("idx_mshard_1").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); @@ -288,7 +298,10 @@ public void testSliceLevelDocCountErrorMultiShard() throws Exception { assertAcked( prepareCreate("idx_mshard_2").setMapping(STRING_FIELD_NAME, "type=keyword") .setSettings( - Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.refresh_interval", MINIMUM_REFRESH_INTERVAL) ) ); client().prepareIndex("idx_mshard_2").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsShardMinDocCountIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsShardMinDocCountIT.java index 3851b16551795..1cc250c00dba9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsShardMinDocCountIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsShardMinDocCountIT.java @@ -36,7 +36,6 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.aggregations.BucketOrder; @@ -44,7 +43,7 @@ import org.opensearch.search.aggregations.bucket.terms.SignificantTerms; import org.opensearch.search.aggregations.bucket.terms.SignificantTermsAggregatorFactory; import org.opensearch.search.aggregations.bucket.terms.Terms; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -61,12 +60,12 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; -public class TermsShardMinDocCountIT extends ParameterizedOpenSearchIntegTestCase { +public class TermsShardMinDocCountIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String index = "someindex"; - public TermsShardMinDocCountIT(Settings dynamicSettings) { - super(dynamicSettings); + public TermsShardMinDocCountIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -77,11 +76,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - private static String randomExecutionHint() { return randomBoolean() ? null : randomFrom(SignificantTermsAggregatorFactory.ExecutionMode.values()).toString(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/BaseStringTermsTestCase.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/BaseStringTermsTestCase.java index 20caa4fd076fe..79aa4a648310a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/BaseStringTermsTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/BaseStringTermsTestCase.java @@ -38,8 +38,8 @@ public class BaseStringTermsTestCase extends AbstractTermsTestCase { protected static final String MULTI_VALUED_FIELD_NAME = "s_values"; protected static Map<String, Map<String, Object>> expectedMultiSortBuckets; - public BaseStringTermsTestCase(Settings dynamicSettings) { - super(dynamicSettings); + public BaseStringTermsTestCase(Settings staticSettings) { + super(staticSettings); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/StringTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/StringTermsIT.java index 8c727d280ec52..edf9cd432dda2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/StringTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/StringTermsIT.java @@ -79,8 +79,8 @@ @OpenSearchIntegTestCase.SuiteScopeTestCase public class StringTermsIT extends BaseStringTermsTestCase { - public StringTermsIT(Settings dynamicSettings) { - super(dynamicSettings); + public StringTermsIT(Settings staticSettings) { + super(staticSettings); } // the main purpose of this test is to make sure we're not allocating 2GB of memory per shard diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityIT.java index 9ebec21367164..db4ee3571d141 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityIT.java @@ -37,7 +37,6 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.plugins.Plugin; import org.opensearch.script.MockScriptPlugin; @@ -48,7 +47,7 @@ import org.opensearch.search.aggregations.bucket.global.Global; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -71,10 +70,10 @@ import static org.hamcrest.Matchers.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class CardinalityIT extends ParameterizedOpenSearchIntegTestCase { +public class CardinalityIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public CardinalityIT(Settings dynamicSettings) { - super(dynamicSettings); + public CardinalityIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -85,11 +84,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java index 94756f3fe9f99..8122304ba992c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java @@ -38,12 +38,11 @@ import org.opensearch.OpenSearchException; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.breaker.CircuitBreakingException; import org.opensearch.indices.breaker.HierarchyCircuitBreakerService; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.BucketOrder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -54,10 +53,10 @@ import static org.opensearch.search.aggregations.AggregationBuilders.cardinality; import static org.opensearch.search.aggregations.AggregationBuilders.terms; -public class CardinalityWithRequestBreakerIT extends ParameterizedOpenSearchIntegTestCase { +public class CardinalityWithRequestBreakerIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public CardinalityWithRequestBreakerIT(Settings dynamicSettings) { - super(dynamicSettings); + public CardinalityWithRequestBreakerIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -68,11 +67,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - /** * Test that searches using cardinality aggregations returns all request breaker memory. */ diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ExtendedStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ExtendedStatsIT.java index 3d804b9aa626e..4a2c100690de4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ExtendedStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ExtendedStatsIT.java @@ -70,8 +70,8 @@ public class ExtendedStatsIT extends AbstractNumericTestCase { - public ExtendedStatsIT(Settings dynamicSettings) { - super(dynamicSettings); + public ExtendedStatsIT(Settings staticSettings) { + super(staticSettings); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/GeoCentroidIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/GeoCentroidIT.java index 78100d1778ecf..ed87fa6d8f5f6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/GeoCentroidIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/GeoCentroidIT.java @@ -55,8 +55,8 @@ public class GeoCentroidIT extends AbstractGeoTestCase { private static final String aggName = "geoCentroid"; - public GeoCentroidIT(Settings dynamicSettings) { - super(dynamicSettings); + public GeoCentroidIT(Settings staticSettings) { + super(staticSettings); } public void testEmptyAggregation() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/HDRPercentileRanksIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/HDRPercentileRanksIT.java index 7ca5130388eea..ae67f0b1c0b66 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/HDRPercentileRanksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/HDRPercentileRanksIT.java @@ -72,8 +72,8 @@ public class HDRPercentileRanksIT extends AbstractNumericTestCase { - public HDRPercentileRanksIT(Settings dynamicSettings) { - super(dynamicSettings); + public HDRPercentileRanksIT(Settings staticSettings) { + super(staticSettings); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/HDRPercentilesIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/HDRPercentilesIT.java index ec913b3e130f5..ff1cab85c18e6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/HDRPercentilesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/HDRPercentilesIT.java @@ -75,8 +75,8 @@ public class HDRPercentilesIT extends AbstractNumericTestCase { - public HDRPercentilesIT(Settings dynamicSettings) { - super(dynamicSettings); + public HDRPercentilesIT(Settings staticSettings) { + super(staticSettings); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java index b8447d682abae..0edba475a6401 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java @@ -91,8 +91,8 @@ public class MedianAbsoluteDeviationIT extends AbstractNumericTestCase { private static double singleValueExactMAD; private static double multiValueExactMAD; - public MedianAbsoluteDeviationIT(Settings dynamicSettings) { - super(dynamicSettings); + public MedianAbsoluteDeviationIT(Settings staticSettings) { + super(staticSettings); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ScriptedMetricIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ScriptedMetricIT.java index ced2358ac3f78..1725aa7847d72 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ScriptedMetricIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ScriptedMetricIT.java @@ -39,7 +39,6 @@ import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.support.XContentMapValues; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.xcontent.MediaTypeRegistry; @@ -56,7 +55,7 @@ import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.junit.Before; import java.io.IOException; @@ -93,12 +92,12 @@ @ClusterScope(scope = Scope.SUITE) @OpenSearchIntegTestCase.SuiteScopeTestCase -public class ScriptedMetricIT extends ParameterizedOpenSearchIntegTestCase { +public class ScriptedMetricIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static long numDocs; - public ScriptedMetricIT(Settings dynamicSettings) { - super(dynamicSettings); + public ScriptedMetricIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -109,11 +108,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/StatsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/StatsIT.java index f957a74eeb9d0..3708e1e6ab21b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/StatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/StatsIT.java @@ -66,8 +66,8 @@ import static org.hamcrest.Matchers.sameInstance; public class StatsIT extends AbstractNumericTestCase { - public StatsIT(Settings dynamicSettings) { - super(dynamicSettings); + public StatsIT(Settings staticSettings) { + super(staticSettings); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/SumIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/SumIT.java index 382d656448114..b2aa3438b2306 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/SumIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/SumIT.java @@ -68,8 +68,8 @@ public class SumIT extends AbstractNumericTestCase { - public SumIT(Settings dynamicSettings) { - super(dynamicSettings); + public SumIT(Settings staticSettings) { + super(staticSettings); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TDigestPercentileRanksIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TDigestPercentileRanksIT.java index 941d3a888db29..4225c027c4d96 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TDigestPercentileRanksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TDigestPercentileRanksIT.java @@ -72,8 +72,8 @@ public class TDigestPercentileRanksIT extends AbstractNumericTestCase { - public TDigestPercentileRanksIT(Settings dynamicSettings) { - super(dynamicSettings); + public TDigestPercentileRanksIT(Settings staticSettings) { + super(staticSettings); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TDigestPercentilesIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TDigestPercentilesIT.java index 6457cf9307fa1..974e90fab16e8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TDigestPercentilesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TDigestPercentilesIT.java @@ -74,8 +74,8 @@ public class TDigestPercentilesIT extends AbstractNumericTestCase { - public TDigestPercentilesIT(Settings dynamicSettings) { - super(dynamicSettings); + public TDigestPercentilesIT(Settings staticSettings) { + super(staticSettings); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TopHitsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TopHitsIT.java index 10e51079cf389..5d84452998e40 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TopHitsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TopHitsIT.java @@ -42,7 +42,6 @@ import org.opensearch.action.search.SearchType; import org.opensearch.common.document.DocumentField; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexSettings; import org.opensearch.index.query.MatchAllQueryBuilder; @@ -70,7 +69,7 @@ import org.opensearch.search.sort.SortBuilders; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -111,13 +110,13 @@ import static org.hamcrest.Matchers.sameInstance; @OpenSearchIntegTestCase.SuiteScopeTestCase() -public class TopHitsIT extends ParameterizedOpenSearchIntegTestCase { +public class TopHitsIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String TERMS_AGGS_FIELD = "terms"; private static final String SORT_FIELD = "sort"; - public TopHitsIT(Settings dynamicSettings) { - super(dynamicSettings); + public TopHitsIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -128,11 +127,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ValueCountIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ValueCountIT.java index 833d1ce3bb4c3..4610281c4b8a8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ValueCountIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ValueCountIT.java @@ -35,7 +35,6 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.plugins.Plugin; import org.opensearch.script.Script; import org.opensearch.script.ScriptType; @@ -45,7 +44,7 @@ import org.opensearch.search.aggregations.bucket.global.Global; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -73,10 +72,10 @@ import static org.hamcrest.Matchers.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class ValueCountIT extends ParameterizedOpenSearchIntegTestCase { +public class ValueCountIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public ValueCountIT(Settings dynamicSettings) { - super(dynamicSettings); + public ValueCountIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -87,11 +86,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/AvgBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/AvgBucketIT.java index bec9203384026..48fd06bac285b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/AvgBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/AvgBucketIT.java @@ -37,7 +37,6 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.histogram.Histogram.Bucket; @@ -46,7 +45,7 @@ import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -66,7 +65,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class AvgBucketIT extends ParameterizedOpenSearchIntegTestCase { +public class AvgBucketIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; @@ -77,8 +76,8 @@ public class AvgBucketIT extends ParameterizedOpenSearchIntegTestCase { static int numValueBuckets; static long[] valueCounts; - public AvgBucketIT(Settings dynamicSettings) { - super(dynamicSettings); + public AvgBucketIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -89,11 +88,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketScriptIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketScriptIT.java index 4c3129eb89e3b..1b22cf2018d96 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketScriptIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketScriptIT.java @@ -37,7 +37,6 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.xcontent.MediaTypeRegistry; @@ -51,7 +50,7 @@ import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; @@ -76,7 +75,7 @@ import static org.hamcrest.Matchers.nullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class BucketScriptIT extends ParameterizedOpenSearchIntegTestCase { +public class BucketScriptIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String FIELD_1_NAME = "field1"; private static final String FIELD_2_NAME = "field2"; @@ -90,8 +89,8 @@ public class BucketScriptIT extends ParameterizedOpenSearchIntegTestCase { private static int maxNumber; private static long date; - public BucketScriptIT(Settings dynamicSettings) { - super(dynamicSettings); + public BucketScriptIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -102,11 +101,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSelectorIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSelectorIT.java index a7b28add7373a..7dca1d0d79b1e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSelectorIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSelectorIT.java @@ -37,7 +37,6 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; @@ -50,7 +49,7 @@ import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; @@ -77,7 +76,7 @@ import static org.hamcrest.Matchers.nullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class BucketSelectorIT extends ParameterizedOpenSearchIntegTestCase { +public class BucketSelectorIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String FIELD_1_NAME = "field1"; private static final String FIELD_2_NAME = "field2"; @@ -89,8 +88,8 @@ public class BucketSelectorIT extends ParameterizedOpenSearchIntegTestCase { private static int minNumber; private static int maxNumber; - public BucketSelectorIT(Settings dynamicSettings) { - super(dynamicSettings); + public BucketSelectorIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -101,11 +100,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSortIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSortIT.java index 2e4fd7a412118..ffb607866935b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSortIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSortIT.java @@ -39,7 +39,6 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.aggregations.bucket.histogram.Histogram; @@ -48,7 +47,7 @@ import org.opensearch.search.sort.FieldSortBuilder; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.time.ZonedDateTime; @@ -75,7 +74,7 @@ import static org.hamcrest.Matchers.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class BucketSortIT extends ParameterizedOpenSearchIntegTestCase { +public class BucketSortIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String INDEX = "bucket-sort-it-data-index"; private static final String INDEX_WITH_GAPS = "bucket-sort-it-data-index-with-gaps"; @@ -85,8 +84,8 @@ public class BucketSortIT extends ParameterizedOpenSearchIntegTestCase { private static final String VALUE_1_FIELD = "value_1"; private static final String VALUE_2_FIELD = "value_2"; - public BucketSortIT(Settings dynamicSettings) { - super(dynamicSettings); + public BucketSortIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -97,11 +96,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { createIndex(INDEX, INDEX_WITH_GAPS); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DateDerivativeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DateDerivativeIT.java index b05ff7b4329cd..8c89c1232ebb3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DateDerivativeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DateDerivativeIT.java @@ -39,7 +39,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.time.DateFormatters; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.InternalMultiBucketAggregation; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; @@ -48,7 +47,7 @@ import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.search.aggregations.support.AggregationPath; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.Matcher; import org.junit.After; @@ -76,15 +75,15 @@ import static org.hamcrest.core.IsNull.nullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class DateDerivativeIT extends ParameterizedOpenSearchIntegTestCase { +public class DateDerivativeIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { // some index names used during these tests private static final String IDX_DST_START = "idx_dst_start"; private static final String IDX_DST_END = "idx_dst_end"; private static final String IDX_DST_KATHMANDU = "idx_dst_kathmandu"; - public DateDerivativeIT(Settings dynamicSettings) { - super(dynamicSettings); + public DateDerivativeIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -95,11 +94,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - private ZonedDateTime date(int month, int day) { return ZonedDateTime.of(2012, month, day, 0, 0, 0, 0, ZoneOffset.UTC); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DerivativeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DerivativeIT.java index 41bbffc13658b..f8def40ec003a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DerivativeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DerivativeIT.java @@ -39,7 +39,6 @@ import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.aggregations.InternalAggregation; @@ -51,7 +50,7 @@ import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.search.aggregations.support.AggregationPath; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedDynamicSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.io.IOException; @@ -78,7 +77,7 @@ import static org.hamcrest.core.IsNull.nullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class DerivativeIT extends ParameterizedOpenSearchIntegTestCase { +public class DerivativeIT extends ParameterizedDynamicSettingsOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; @@ -112,11 +111,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java index 299827e2413d4..1bd04cc13268f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java @@ -39,7 +39,6 @@ import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.histogram.Histogram.Bucket; @@ -49,7 +48,7 @@ import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -69,7 +68,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class ExtendedStatsBucketIT extends ParameterizedOpenSearchIntegTestCase { +public class ExtendedStatsBucketIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; @@ -80,8 +79,8 @@ public class ExtendedStatsBucketIT extends ParameterizedOpenSearchIntegTestCase static int numValueBuckets; static long[] valueCounts; - public ExtendedStatsBucketIT(Settings dynamicSettings) { - super(dynamicSettings); + public ExtendedStatsBucketIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -92,11 +91,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java index dc3b690c7f78f..ea6fcbd6a1560 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java @@ -38,7 +38,6 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.WriteRequest; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.MediaTypeRegistry; @@ -58,7 +57,7 @@ import org.opensearch.search.aggregations.metrics.SumAggregationBuilder; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -80,7 +79,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class MaxBucketIT extends ParameterizedOpenSearchIntegTestCase { +public class MaxBucketIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; @@ -91,8 +90,8 @@ public class MaxBucketIT extends ParameterizedOpenSearchIntegTestCase { static int numValueBuckets; static long[] valueCounts; - public MaxBucketIT(Settings dynamicSettings) { - super(dynamicSettings); + public MaxBucketIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -103,11 +102,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MinBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MinBucketIT.java index 189bfd9b5b80a..44d12436382f6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MinBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MinBucketIT.java @@ -37,7 +37,6 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.histogram.Histogram.Bucket; @@ -46,7 +45,7 @@ import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -66,7 +65,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class MinBucketIT extends ParameterizedOpenSearchIntegTestCase { +public class MinBucketIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; @@ -77,8 +76,8 @@ public class MinBucketIT extends ParameterizedOpenSearchIntegTestCase { static int numValueBuckets; static long[] valueCounts; - public MinBucketIT(Settings dynamicSettings) { - super(dynamicSettings); + public MinBucketIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -89,11 +88,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java index 8ad3107ac33ac..d35b80b7918fe 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java @@ -43,14 +43,13 @@ import org.opensearch.client.Client; import org.opensearch.common.collect.EvictingQueue; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.opensearch.search.aggregations.metrics.Avg; import org.opensearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.util.ArrayList; @@ -77,7 +76,7 @@ import static org.hamcrest.core.IsNull.nullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class MovAvgIT extends ParameterizedOpenSearchIntegTestCase { +public class MovAvgIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String INTERVAL_FIELD = "l_value"; private static final String VALUE_FIELD = "v_value"; private static final String VALUE_FIELD2 = "v_value2"; @@ -133,8 +132,8 @@ public String toString() { } } - public MovAvgIT(Settings dynamicSettings) { - super(dynamicSettings); + public MovAvgIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -145,11 +144,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { prepareCreate("idx").setMapping( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketIT.java index 580497715ed6d..29cb334bfcd00 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketIT.java @@ -39,7 +39,6 @@ import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.terms.IncludeExclude; @@ -47,7 +46,7 @@ import org.opensearch.search.aggregations.metrics.Percentile; import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -69,7 +68,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class PercentilesBucketIT extends ParameterizedOpenSearchIntegTestCase { +public class PercentilesBucketIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; private static final double[] PERCENTS = { 0.0, 1.0, 25.0, 50.0, 75.0, 99.0, 100.0 }; @@ -80,8 +79,8 @@ public class PercentilesBucketIT extends ParameterizedOpenSearchIntegTestCase { static int numValueBuckets; static long[] valueCounts; - public PercentilesBucketIT(Settings dynamicSettings) { - super(dynamicSettings); + public PercentilesBucketIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -92,11 +91,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SerialDiffIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SerialDiffIT.java index b4da63802bc50..507bff51f0e39 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SerialDiffIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SerialDiffIT.java @@ -38,12 +38,11 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.collect.EvictingQueue; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.opensearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.util.ArrayList; @@ -69,7 +68,7 @@ import static org.hamcrest.core.IsNull.nullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class SerialDiffIT extends ParameterizedOpenSearchIntegTestCase { +public class SerialDiffIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String INTERVAL_FIELD = "l_value"; private static final String VALUE_FIELD = "v_value"; @@ -98,8 +97,8 @@ public String toString() { } } - public SerialDiffIT(Settings dynamicSettings) { - super(dynamicSettings); + public SerialDiffIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -110,11 +109,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - private ValuesSourceAggregationBuilder<? extends ValuesSourceAggregationBuilder<?>> randomMetric(String name, String field) { int rand = randomIntBetween(0, 3); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/StatsBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/StatsBucketIT.java index 21fdd5e761e77..fbaf799871c8a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/StatsBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/StatsBucketIT.java @@ -37,7 +37,6 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.histogram.Histogram.Bucket; @@ -46,7 +45,7 @@ import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -66,7 +65,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class StatsBucketIT extends ParameterizedOpenSearchIntegTestCase { +public class StatsBucketIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; static int numDocs; @@ -76,8 +75,8 @@ public class StatsBucketIT extends ParameterizedOpenSearchIntegTestCase { static int numValueBuckets; static long[] valueCounts; - public StatsBucketIT(Settings dynamicSettings) { - super(dynamicSettings); + public StatsBucketIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -88,11 +87,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SumBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SumBucketIT.java index d4bd8f21b2a99..a5967124ff921 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SumBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SumBucketIT.java @@ -37,7 +37,6 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.histogram.Histogram.Bucket; @@ -46,7 +45,7 @@ import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -66,7 +65,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class SumBucketIT extends ParameterizedOpenSearchIntegTestCase { +public class SumBucketIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; @@ -77,8 +76,8 @@ public class SumBucketIT extends ParameterizedOpenSearchIntegTestCase { static int numValueBuckets; static long[] valueCounts; - public SumBucketIT(Settings dynamicSettings) { - super(dynamicSettings); + public SumBucketIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -89,11 +88,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java b/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java index 28ada82a1c56b..fb84134120e00 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java @@ -21,7 +21,6 @@ import org.opensearch.common.inject.Inject; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.action.ActionListener; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; @@ -37,7 +36,7 @@ import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.Task; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; import org.hamcrest.MatcherAssert; @@ -61,13 +60,13 @@ import static org.hamcrest.Matchers.instanceOf; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE) -public class SearchBackpressureIT extends ParameterizedOpenSearchIntegTestCase { +public class SearchBackpressureIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final TimeValue TIMEOUT = new TimeValue(10, TimeUnit.SECONDS); private static final int MOVING_AVERAGE_WINDOW_SIZE = 10; - public SearchBackpressureIT(Settings dynamicSettings) { - super(dynamicSettings); + public SearchBackpressureIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -78,11 +77,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { final List<Class<? extends Plugin>> plugins = new ArrayList<>(super.nodePlugins()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchRedStateIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchRedStateIndexIT.java index bd623ccdf2731..ad1ce0582cfb3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchRedStateIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchRedStateIndexIT.java @@ -42,11 +42,10 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.rest.RestStatus; import org.opensearch.search.SearchService; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.junit.After; import java.util.Arrays; @@ -61,10 +60,10 @@ import static org.hamcrest.Matchers.lessThan; @OpenSearchIntegTestCase.ClusterScope(minNumDataNodes = 2) -public class SearchRedStateIndexIT extends ParameterizedOpenSearchIntegTestCase { +public class SearchRedStateIndexIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public SearchRedStateIndexIT(Settings dynamicSettings) { - super(dynamicSettings); + public SearchRedStateIndexIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -75,11 +74,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testAllowPartialsWithRedState() throws Exception { final int numShards = cluster().numDataNodes() + 2; buildRedIndex(numShards); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileCreatingIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileCreatingIndexIT.java index a5989b693d332..681f7081fa2dc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileCreatingIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileCreatingIndexIT.java @@ -39,9 +39,8 @@ import org.opensearch.client.Client; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryBuilders; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -54,10 +53,10 @@ * This test basically verifies that search with a single shard active (cause we indexed to it) and other * shards possibly not active at all (cause they haven't allocated) will still work. */ -public class SearchWhileCreatingIndexIT extends ParameterizedOpenSearchIntegTestCase { +public class SearchWhileCreatingIndexIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public SearchWhileCreatingIndexIT(Settings dynamicSettings) { - super(dynamicSettings); + public SearchWhileCreatingIndexIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -68,11 +67,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testIndexCausesIndexCreation() throws Exception { searchWhileCreatingIndex(false, 1); // 1 replica in our default... } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileRelocatingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileRelocatingIT.java index 6d2ec845afa98..f7b8b0df7dca7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileRelocatingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileRelocatingIT.java @@ -40,10 +40,9 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.SearchHits; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -60,10 +59,10 @@ import static org.hamcrest.Matchers.equalTo; @OpenSearchIntegTestCase.ClusterScope(minNumDataNodes = 2) -public class SearchWhileRelocatingIT extends ParameterizedOpenSearchIntegTestCase { +public class SearchWhileRelocatingIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public SearchWhileRelocatingIT(Settings dynamicSettings) { - super(dynamicSettings); + public SearchWhileRelocatingIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -74,11 +73,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testSearchAndRelocateConcurrentlyRandomReplicas() throws Exception { testSearchAndRelocateConcurrently(randomIntBetween(0, 1)); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWithRandomExceptionsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWithRandomExceptionsIT.java index aa82b9d21c7fb..614ec2ebd634a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWithRandomExceptionsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWithRandomExceptionsIT.java @@ -49,14 +49,13 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.Settings.Builder; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.index.MockEngineFactoryPlugin; import org.opensearch.index.query.QueryBuilders; import org.opensearch.plugins.Plugin; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.engine.MockEngineSupport; import org.opensearch.test.engine.ThrowingLeafReaderWrapper; @@ -71,10 +70,10 @@ import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; -public class SearchWithRandomExceptionsIT extends ParameterizedOpenSearchIntegTestCase { +public class SearchWithRandomExceptionsIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public SearchWithRandomExceptionsIT(Settings dynamicSettings) { - super(dynamicSettings); + public SearchWithRandomExceptionsIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -85,11 +84,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(RandomExceptionDirectoryReaderWrapper.TestPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWithRandomIOExceptionsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWithRandomIOExceptionsIT.java index 446a0bce58d66..b45b334fc1d1c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWithRandomIOExceptionsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWithRandomIOExceptionsIT.java @@ -45,13 +45,12 @@ import org.opensearch.client.Requests; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.index.query.QueryBuilders; import org.opensearch.plugins.Plugin; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.store.MockFSDirectoryFactory; import org.opensearch.test.store.MockFSIndexStore; @@ -64,10 +63,10 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; -public class SearchWithRandomIOExceptionsIT extends ParameterizedOpenSearchIntegTestCase { +public class SearchWithRandomIOExceptionsIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public SearchWithRandomIOExceptionsIT(Settings dynamicSettings) { - super(dynamicSettings); + public SearchWithRandomIOExceptionsIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -78,11 +77,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(MockFSIndexStore.TestPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportSearchFailuresIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportSearchFailuresIT.java index cbe52abf5279b..0e337822ba0e7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportSearchFailuresIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportSearchFailuresIT.java @@ -44,11 +44,10 @@ import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.MatchQueryBuilder; import org.opensearch.search.builder.SearchSourceBuilder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.Arrays; @@ -64,10 +63,10 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -public class TransportSearchFailuresIT extends ParameterizedOpenSearchIntegTestCase { +public class TransportSearchFailuresIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public TransportSearchFailuresIT(Settings dynamicSettings) { - super(dynamicSettings); + public TransportSearchFailuresIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -78,11 +77,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected int maximumNumberOfReplicas() { return 1; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportTwoNodesSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportTwoNodesSearchIT.java index edceb0cbc0d24..a82b6f12755ca 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportTwoNodesSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportTwoNodesSearchIT.java @@ -41,7 +41,6 @@ import org.opensearch.client.Requests; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.MatchQueryBuilder; import org.opensearch.index.query.QueryBuilders; @@ -54,7 +53,7 @@ import org.opensearch.search.aggregations.bucket.global.Global; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.Arrays; @@ -80,10 +79,10 @@ import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.startsWith; -public class TransportTwoNodesSearchIT extends ParameterizedOpenSearchIntegTestCase { +public class TransportTwoNodesSearchIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public TransportTwoNodesSearchIT(Settings dynamicSettings) { - super(dynamicSettings); + public TransportTwoNodesSearchIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -94,11 +93,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected int numberOfReplicas() { return 0; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/FetchSubPhasePluginIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/FetchSubPhasePluginIT.java index 87f2153eb800f..13b4abb58b4df 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/FetchSubPhasePluginIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/FetchSubPhasePluginIT.java @@ -42,7 +42,6 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.document.DocumentField; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -54,7 +53,7 @@ import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; @@ -74,10 +73,10 @@ import static org.hamcrest.CoreMatchers.equalTo; @ClusterScope(scope = Scope.SUITE, supportsDedicatedMasters = false, numDataNodes = 2) -public class FetchSubPhasePluginIT extends ParameterizedOpenSearchIntegTestCase { +public class FetchSubPhasePluginIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public FetchSubPhasePluginIT(Settings dynamicSettings) { - super(dynamicSettings); + public FetchSubPhasePluginIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -88,11 +87,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singletonList(FetchTermVectorsPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/InnerHitsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/InnerHitsIT.java index 1a730c01e4890..b743c00bf4549 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/InnerHitsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/InnerHitsIT.java @@ -41,7 +41,6 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexSettings; import org.opensearch.index.query.BoolQueryBuilder; @@ -58,7 +57,7 @@ import org.opensearch.search.sort.FieldSortBuilder; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.InternalSettingsPlugin; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -88,10 +87,10 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -public class InnerHitsIT extends ParameterizedOpenSearchIntegTestCase { +public class InnerHitsIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public InnerHitsIT(Settings dynamicSettings) { - super(dynamicSettings); + public InnerHitsIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -102,11 +101,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(InternalSettingsPlugin.class, CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/MatchedQueriesIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/MatchedQueriesIT.java index 83cedb8c20e1d..a1adc6f99b92a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/MatchedQueriesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/MatchedQueriesIT.java @@ -36,7 +36,6 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentHelper; @@ -45,7 +44,7 @@ import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.search.SearchHit; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -62,12 +61,14 @@ import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasItemInArray; +import static org.hamcrest.Matchers.hasKey; -public class MatchedQueriesIT extends ParameterizedOpenSearchIntegTestCase { +public class MatchedQueriesIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public MatchedQueriesIT(Settings dynamicSettings) { - super(dynamicSettings); + public MatchedQueriesIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -78,11 +79,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testSimpleMatchedQueryFromFilteredQuery() throws Exception { createIndex("test"); ensureGreen(); @@ -101,15 +97,18 @@ public void testSimpleMatchedQueryFromFilteredQuery() throws Exception { .should(rangeQuery("number").gte(2).queryName("test2")) ) ) + .setIncludeNamedQueriesScore(true) .get(); assertHitCount(searchResponse, 3L); for (SearchHit hit : searchResponse.getHits()) { if (hit.getId().equals("3") || hit.getId().equals("2")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("test2")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("test2")); + assertThat(hit.getMatchedQueryScore("test2"), equalTo(1f)); } else if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("test1")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("test1")); + assertThat(hit.getMatchedQueryScore("test1"), equalTo(1f)); } else { fail("Unexpected document returned with id " + hit.getId()); } @@ -119,15 +118,18 @@ public void testSimpleMatchedQueryFromFilteredQuery() throws Exception { .setQuery( boolQuery().should(rangeQuery("number").lte(2).queryName("test1")).should(rangeQuery("number").gt(2).queryName("test2")) ) + .setIncludeNamedQueriesScore(true) .get(); assertHitCount(searchResponse, 3L); for (SearchHit hit : searchResponse.getHits()) { if (hit.getId().equals("1") || hit.getId().equals("2")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("test1")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("test1")); + assertThat(hit.getMatchedQueryScore("test1"), equalTo(1f)); } else if (hit.getId().equals("3")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("test2")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("test2")); + assertThat(hit.getMatchedQueryScore("test2"), equalTo(1f)); } else { fail("Unexpected document returned with id " + hit.getId()); } @@ -153,12 +155,15 @@ public void testSimpleMatchedQueryFromTopLevelFilter() throws Exception { assertHitCount(searchResponse, 3L); for (SearchHit hit : searchResponse.getHits()) { if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueries().length, equalTo(2)); - assertThat(hit.getMatchedQueries(), hasItemInArray("name")); - assertThat(hit.getMatchedQueries(), hasItemInArray("title")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); + assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); + assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); } else if (hit.getId().equals("2") || hit.getId().equals("3")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("name")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); + assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); } else { fail("Unexpected document returned with id " + hit.getId()); } @@ -174,12 +179,15 @@ public void testSimpleMatchedQueryFromTopLevelFilter() throws Exception { assertHitCount(searchResponse, 3L); for (SearchHit hit : searchResponse.getHits()) { if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueries().length, equalTo(2)); - assertThat(hit.getMatchedQueries(), hasItemInArray("name")); - assertThat(hit.getMatchedQueries(), hasItemInArray("title")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); + assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); + assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); } else if (hit.getId().equals("2") || hit.getId().equals("3")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("name")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); + assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); } else { fail("Unexpected document returned with id " + hit.getId()); } @@ -203,9 +211,11 @@ public void testSimpleMatchedQueryFromTopLevelFilterAndFilteredQuery() throws Ex assertHitCount(searchResponse, 3L); for (SearchHit hit : searchResponse.getHits()) { if (hit.getId().equals("1") || hit.getId().equals("2") || hit.getId().equals("3")) { - assertThat(hit.getMatchedQueries().length, equalTo(2)); - assertThat(hit.getMatchedQueries(), hasItemInArray("name")); - assertThat(hit.getMatchedQueries(), hasItemInArray("title")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); + assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); + assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); } else { fail("Unexpected document returned with id " + hit.getId()); } @@ -237,13 +247,15 @@ public void testRegExpQuerySupportsName() throws InterruptedException { SearchResponse searchResponse = client().prepareSearch() .setQuery(QueryBuilders.regexpQuery("title", "title1").queryName("regex")) + .setIncludeNamedQueriesScore(true) .get(); assertHitCount(searchResponse, 1L); for (SearchHit hit : searchResponse.getHits()) { if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("regex")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("regex")); + assertThat(hit.getMatchedQueryScore("regex"), equalTo(1f)); } else { fail("Unexpected document returned with id " + hit.getId()); } @@ -258,15 +270,17 @@ public void testPrefixQuerySupportsName() throws InterruptedException { refresh(); indexRandomForConcurrentSearch("test1"); - SearchResponse searchResponse = client().prepareSearch() + var query = client().prepareSearch() .setQuery(QueryBuilders.prefixQuery("title", "title").queryName("prefix")) - .get(); + .setIncludeNamedQueriesScore(true); + var searchResponse = query.get(); assertHitCount(searchResponse, 1L); for (SearchHit hit : searchResponse.getHits()) { if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("prefix")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("prefix")); + assertThat(hit.getMatchedQueryScore("prefix"), equalTo(1f)); } else { fail("Unexpected document returned with id " + hit.getId()); } @@ -288,8 +302,9 @@ public void testFuzzyQuerySupportsName() throws InterruptedException { for (SearchHit hit : searchResponse.getHits()) { if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("fuzzy")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("fuzzy")); + assertThat(hit.getMatchedQueryScore("fuzzy"), greaterThan(0f)); } else { fail("Unexpected document returned with id " + hit.getId()); } @@ -306,13 +321,15 @@ public void testWildcardQuerySupportsName() throws InterruptedException { SearchResponse searchResponse = client().prepareSearch() .setQuery(QueryBuilders.wildcardQuery("title", "titl*").queryName("wildcard")) + .setIncludeNamedQueriesScore(true) .get(); assertHitCount(searchResponse, 1L); for (SearchHit hit : searchResponse.getHits()) { if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("wildcard")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("wildcard")); + assertThat(hit.getMatchedQueryScore("wildcard"), equalTo(1f)); } else { fail("Unexpected document returned with id " + hit.getId()); } @@ -334,8 +351,9 @@ public void testSpanFirstQuerySupportsName() throws InterruptedException { for (SearchHit hit : searchResponse.getHits()) { if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("span")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("span")); + assertThat(hit.getMatchedQueryScore("span"), greaterThan(0f)); } else { fail("Unexpected document returned with id " + hit.getId()); } @@ -369,11 +387,13 @@ public void testMatchedWithShould() throws Exception { assertHitCount(searchResponse, 2L); for (SearchHit hit : searchResponse.getHits()) { if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("dolor")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("dolor")); + assertThat(hit.getMatchedQueryScore("dolor"), greaterThan(0f)); } else if (hit.getId().equals("2")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("elit")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("elit")); + assertThat(hit.getMatchedQueryScore("elit"), greaterThan(0f)); } else { fail("Unexpected document returned with id " + hit.getId()); } @@ -397,7 +417,10 @@ public void testMatchedWithWrapperQuery() throws Exception { for (QueryBuilder query : queries) { SearchResponse searchResponse = client().prepareSearch().setQuery(query).get(); assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getMatchedQueries()[0], equalTo("abc")); + SearchHit hit = searchResponse.getHits().getAt(0); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("abc")); + assertThat(hit.getMatchedQueryScore("abc"), greaterThan(0f)); } } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/CustomHighlighterSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/CustomHighlighterSearchIT.java index fe17c3e22d43c..66cbf36137551 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/CustomHighlighterSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/CustomHighlighterSearchIT.java @@ -35,12 +35,11 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryBuilders; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.junit.Before; import java.io.IOException; @@ -57,10 +56,10 @@ * Integration test for highlighters registered by a plugin. */ @ClusterScope(scope = Scope.SUITE, supportsDedicatedMasters = false, numDataNodes = 1) -public class CustomHighlighterSearchIT extends ParameterizedOpenSearchIntegTestCase { +public class CustomHighlighterSearchIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public CustomHighlighterSearchIT(Settings dynamicSettings) { - super(dynamicSettings); + public CustomHighlighterSearchIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -71,11 +70,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(CustomHighlighterPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index 2afa911223074..f449a91a57279 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -32,6 +32,7 @@ package org.opensearch.search.fetch.subphase.highlight; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.lucene.analysis.Analyzer; @@ -41,6 +42,7 @@ import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.tests.analysis.MockAnalyzer; import org.apache.lucene.tests.analysis.MockTokenizer; +import org.apache.lucene.tests.util.TimeUnits; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; @@ -49,7 +51,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.Settings.Builder; import org.opensearch.common.time.DateFormatter; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.XContentBuilder; @@ -75,7 +76,7 @@ import org.opensearch.search.sort.SortOrder; import org.opensearch.test.InternalSettingsPlugin; import org.opensearch.test.MockKeywordPlugin; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.Matcher; import org.hamcrest.Matchers; @@ -128,13 +129,15 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.startsWith; -public class HighlighterSearchIT extends ParameterizedOpenSearchIntegTestCase { +// Higher timeout to accommodate large number of tests in this class. See https://github.com/opensearch-project/OpenSearch/issues/12119 +@TimeoutSuite(millis = 35 * TimeUnits.MINUTE) +public class HighlighterSearchIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { // TODO as we move analyzers out of the core we need to move some of these into HighlighterWithAnalyzersTests private static final String[] ALL_TYPES = new String[] { "plain", "fvh", "unified" }; - public HighlighterSearchIT(Settings dynamicSettings) { - super(dynamicSettings); + public HighlighterSearchIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -145,11 +148,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(InternalSettingsPlugin.class, MockKeywordPlugin.class, MockAnalysisPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java index f5d1b8234558e..4d398f8ca09cc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java @@ -38,13 +38,12 @@ import org.opensearch.action.fieldcaps.FieldCapabilitiesResponse; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.plugins.MapperPlugin; import org.opensearch.plugins.Plugin; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.junit.Before; import java.util.ArrayList; @@ -59,10 +58,10 @@ import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; -public class FieldCapabilitiesIT extends ParameterizedOpenSearchIntegTestCase { +public class FieldCapabilitiesIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public FieldCapabilitiesIT(Settings dynamicSettings) { - super(dynamicSettings); + public FieldCapabilitiesIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -73,11 +72,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Before public void setUp() throws Exception { super.setUp(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java index ed8fe74504f92..906d45ef84b3f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java @@ -43,7 +43,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.time.DateUtils; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.support.XContentMapValues; import org.opensearch.core.common.bytes.BytesArray; @@ -63,7 +62,7 @@ import org.opensearch.search.lookup.FieldLookup; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.InternalSettingsPlugin; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.joda.time.format.DateTimeFormat; @@ -104,10 +103,10 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -public class SearchFieldsIT extends ParameterizedOpenSearchIntegTestCase { +public class SearchFieldsIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public SearchFieldsIT(Settings dynamicSettings) { - super(dynamicSettings); + public SearchFieldsIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -118,11 +117,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(InternalSettingsPlugin.class, CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/DecayFunctionScoreIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/DecayFunctionScoreIT.java index 3a6624c2ad2e6..0380b3c7ddb89 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/DecayFunctionScoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/DecayFunctionScoreIT.java @@ -46,7 +46,6 @@ import org.opensearch.common.lucene.search.function.FunctionScoreQuery; import org.opensearch.common.lucene.search.function.FunctionScoreQuery.ScoreMode; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; @@ -56,7 +55,7 @@ import org.opensearch.search.MultiValueMode; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; import java.time.ZoneOffset; @@ -91,10 +90,10 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThan; -public class DecayFunctionScoreIT extends ParameterizedOpenSearchIntegTestCase { +public class DecayFunctionScoreIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public DecayFunctionScoreIT(Settings dynamicSettings) { - super(dynamicSettings); + public DecayFunctionScoreIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -105,11 +104,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected boolean forbidPrivateIndexSettings() { return false; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/ExplainableScriptIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/ExplainableScriptIT.java index 62d0d89c644a5..0573dcfc4863d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/ExplainableScriptIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/ExplainableScriptIT.java @@ -43,7 +43,6 @@ import org.opensearch.common.lucene.search.function.CombineFunction; import org.opensearch.common.lucene.search.function.Functions; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.plugins.Plugin; import org.opensearch.plugins.ScriptPlugin; @@ -58,7 +57,7 @@ import org.opensearch.search.lookup.SearchLookup; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; import java.io.IOException; @@ -83,10 +82,10 @@ import static org.hamcrest.Matchers.equalTo; @ClusterScope(scope = Scope.SUITE, supportsDedicatedMasters = false, numDataNodes = 1) -public class ExplainableScriptIT extends ParameterizedOpenSearchIntegTestCase { +public class ExplainableScriptIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public ExplainableScriptIT(Settings dynamicSettings) { - super(dynamicSettings); + public ExplainableScriptIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -97,11 +96,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public static class ExplainableScriptPlugin extends Plugin implements ScriptPlugin { @Override public ScriptEngine getScriptEngine(Settings settings, Collection<ScriptContext<?>> contexts) { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreFieldValueIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreFieldValueIT.java index d53f55b98bd23..6956833cf6d62 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreFieldValueIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreFieldValueIT.java @@ -38,9 +38,8 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.lucene.search.function.FieldValueFactorFunction; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.SearchHit; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.Arrays; @@ -61,10 +60,10 @@ /** * Tests for the {@code field_value_factor} function in a function_score query. */ -public class FunctionScoreFieldValueIT extends ParameterizedOpenSearchIntegTestCase { +public class FunctionScoreFieldValueIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public FunctionScoreFieldValueIT(Settings dynamicSettings) { - super(dynamicSettings); + public FunctionScoreFieldValueIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -75,11 +74,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testFieldValueFactor() throws IOException, InterruptedException { assertAcked( prepareCreate("test").setMapping( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreIT.java index 3b80d437e95c0..4f267f0059291 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreIT.java @@ -39,7 +39,6 @@ import org.opensearch.common.lucene.search.function.CombineFunction; import org.opensearch.common.lucene.search.function.FunctionScoreQuery; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.functionscore.FunctionScoreQueryBuilder.FilterFunctionBuilder; @@ -50,7 +49,7 @@ import org.opensearch.search.SearchHit; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; @@ -78,13 +77,13 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; -public class FunctionScoreIT extends ParameterizedOpenSearchIntegTestCase { +public class FunctionScoreIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { static final String TYPE = "type"; static final String INDEX = "index"; - public FunctionScoreIT(Settings dynamicSettings) { - super(dynamicSettings); + public FunctionScoreIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -95,11 +94,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScorePluginIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScorePluginIT.java index a91f53dae04d2..593f844305743 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScorePluginIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScorePluginIT.java @@ -40,7 +40,6 @@ import org.opensearch.common.Priority; import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.index.query.functionscore.DecayFunction; @@ -52,7 +51,7 @@ import org.opensearch.search.SearchHits; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; import java.io.IOException; @@ -71,10 +70,10 @@ import static org.hamcrest.Matchers.equalTo; @ClusterScope(scope = Scope.SUITE, supportsDedicatedMasters = false, numDataNodes = 1) -public class FunctionScorePluginIT extends ParameterizedOpenSearchIntegTestCase { +public class FunctionScorePluginIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public FunctionScorePluginIT(Settings dynamicSettings) { - super(dynamicSettings); + public FunctionScorePluginIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -85,11 +84,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(CustomDistanceScorePlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/QueryRescorerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/QueryRescorerIT.java index bda6284d9535a..5121d5023fd95 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/QueryRescorerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/QueryRescorerIT.java @@ -43,7 +43,6 @@ import org.opensearch.common.lucene.search.function.CombineFunction; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.Settings.Builder; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.Operator; @@ -55,7 +54,7 @@ import org.opensearch.search.rescore.QueryRescoreMode; import org.opensearch.search.rescore.QueryRescorerBuilder; import org.opensearch.search.sort.SortBuilders; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -84,6 +83,7 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSecondHit; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertThirdHit; import static org.opensearch.test.hamcrest.OpenSearchAssertions.hasId; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.hasMatchedQueries; import static org.opensearch.test.hamcrest.OpenSearchAssertions.hasScore; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -91,10 +91,10 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; -public class QueryRescorerIT extends ParameterizedOpenSearchIntegTestCase { +public class QueryRescorerIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public QueryRescorerIT(Settings dynamicSettings) { - super(dynamicSettings); + public QueryRescorerIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -105,11 +105,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testEnforceWindowSize() throws InterruptedException { createIndex("test"); // this @@ -600,7 +595,7 @@ public void testExplain() throws Exception { SearchResponse searchResponse = client().prepareSearch() .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) + .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR).queryName("hello-world")) .setRescorer(innerRescoreQuery, 5) .setExplain(true) .get(); @@ -608,7 +603,10 @@ public void testExplain() throws Exception { assertFirstHit(searchResponse, hasId("1")); assertSecondHit(searchResponse, hasId("2")); assertThirdHit(searchResponse, hasId("3")); - + final String[] matchedQueries = { "hello-world" }; + assertFirstHit(searchResponse, hasMatchedQueries(matchedQueries)); + assertSecondHit(searchResponse, hasMatchedQueries(matchedQueries)); + assertThirdHit(searchResponse, hasMatchedQueries(matchedQueries)); for (int j = 0; j < 3; j++) { assertThat(searchResponse.getHits().getAt(j).getExplanation().getDescription(), equalTo(descriptionModes[innerMode])); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/RandomScoreFunctionIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/RandomScoreFunctionIT.java index 69e30fc879dd8..f1205ba0f1e93 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/RandomScoreFunctionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/RandomScoreFunctionIT.java @@ -36,7 +36,6 @@ import org.apache.lucene.util.ArrayUtil; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.index.mapper.SeqNoFieldMapper; import org.opensearch.index.query.functionscore.FunctionScoreQueryBuilder; @@ -47,7 +46,7 @@ import org.opensearch.script.Script; import org.opensearch.script.ScriptType; import org.opensearch.search.SearchHit; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.CoreMatchers; import java.util.Arrays; @@ -76,10 +75,10 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.nullValue; -public class RandomScoreFunctionIT extends ParameterizedOpenSearchIntegTestCase { +public class RandomScoreFunctionIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public RandomScoreFunctionIT(Settings dynamicSettings) { - super(dynamicSettings); + public RandomScoreFunctionIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -90,11 +89,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoFilterIT.java b/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoFilterIT.java index ba519be04edff..701ff0a94baf2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoFilterIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoFilterIT.java @@ -56,7 +56,6 @@ import org.opensearch.common.geo.builders.PointBuilder; import org.opensearch.common.geo.builders.PolygonBuilder; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.io.Streams; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.bytes.BytesReference; @@ -64,7 +63,7 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.SearchHit; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; import org.junit.BeforeClass; @@ -99,10 +98,10 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; -public class GeoFilterIT extends ParameterizedOpenSearchIntegTestCase { +public class GeoFilterIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public GeoFilterIT(Settings dynamicSettings) { - super(dynamicSettings); + public GeoFilterIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -113,11 +112,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected boolean forbidPrivateIndexSettings() { return false; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoPolygonIT.java b/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoPolygonIT.java index 85cb087585d31..2010a288427b3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoPolygonIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoPolygonIT.java @@ -39,10 +39,9 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.SearchHit; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; import java.util.ArrayList; @@ -60,10 +59,10 @@ import static org.hamcrest.Matchers.equalTo; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class GeoPolygonIT extends ParameterizedOpenSearchIntegTestCase { +public class GeoPolygonIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public GeoPolygonIT(Settings dynamicSettings) { - super(dynamicSettings); + public GeoPolygonIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -74,11 +73,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected boolean forbidPrivateIndexSettings() { return false; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoShapeIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoShapeIntegrationIT.java index 1f9b6ae434f75..6dbffa019382d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoShapeIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoShapeIntegrationIT.java @@ -41,14 +41,13 @@ import org.opensearch.common.geo.builders.PointBuilder; import org.opensearch.common.geo.builders.ShapeBuilder; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexService; import org.opensearch.index.mapper.GeoShapeFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.indices.IndicesService; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -61,10 +60,10 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -public class GeoShapeIntegrationIT extends ParameterizedOpenSearchIntegTestCase { +public class GeoShapeIntegrationIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public GeoShapeIntegrationIT(Settings dynamicSettings) { - super(dynamicSettings); + public GeoShapeIntegrationIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -75,11 +74,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() diff --git a/server/src/internalClusterTest/java/org/opensearch/search/geo/LegacyGeoShapeIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/search/geo/LegacyGeoShapeIntegrationIT.java index d21d6036c9673..e9115cf7dfbce 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/geo/LegacyGeoShapeIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/geo/LegacyGeoShapeIntegrationIT.java @@ -41,7 +41,6 @@ import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.common.geo.builders.ShapeBuilder; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; @@ -50,7 +49,7 @@ import org.opensearch.index.mapper.LegacyGeoShapeFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.indices.IndicesService; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.Arrays; @@ -63,10 +62,10 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -public class LegacyGeoShapeIntegrationIT extends ParameterizedOpenSearchIntegTestCase { +public class LegacyGeoShapeIntegrationIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public LegacyGeoShapeIntegrationIT(Settings dynamicSettings) { - super(dynamicSettings); + public LegacyGeoShapeIntegrationIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -77,11 +76,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - /** * Test that orientation parameter correctly persists across cluster restart */ diff --git a/server/src/internalClusterTest/java/org/opensearch/search/morelikethis/MoreLikeThisIT.java b/server/src/internalClusterTest/java/org/opensearch/search/morelikethis/MoreLikeThisIT.java index dc7c4e687c2fa..36fc5de0a5cf7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/morelikethis/MoreLikeThisIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/morelikethis/MoreLikeThisIT.java @@ -41,7 +41,6 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.MoreLikeThisQueryBuilder; @@ -50,7 +49,7 @@ import org.opensearch.index.query.QueryBuilders; import org.opensearch.plugins.Plugin; import org.opensearch.test.InternalSettingsPlugin; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; @@ -79,10 +78,10 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; -public class MoreLikeThisIT extends ParameterizedOpenSearchIntegTestCase { +public class MoreLikeThisIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public MoreLikeThisIT(Settings dynamicSettings) { - super(dynamicSettings); + public MoreLikeThisIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -93,11 +92,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(InternalSettingsPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/msearch/MultiSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/msearch/MultiSearchIT.java index b35208941d2a2..9f49b7a27cda4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/msearch/MultiSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/msearch/MultiSearchIT.java @@ -37,10 +37,9 @@ import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.MultiSearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.query.QueryBuilders; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -52,10 +51,10 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.hasId; import static org.hamcrest.Matchers.equalTo; -public class MultiSearchIT extends ParameterizedOpenSearchIntegTestCase { +public class MultiSearchIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public MultiSearchIT(Settings dynamicSettings) { - super(dynamicSettings); + public MultiSearchIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -66,11 +65,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testSimpleMultiSearch() throws InterruptedException { createIndex("test"); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedExplainIT.java b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedExplainIT.java index 71f82d7c0b412..a6554271a0bc5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedExplainIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedExplainIT.java @@ -12,7 +12,6 @@ import org.apache.lucene.search.join.ScoreMode; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.test.OpenSearchIntegTestCase; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; @@ -31,11 +30,6 @@ */ public class SimpleNestedExplainIT extends OpenSearchIntegTestCase { - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - /* * Tests the explain output for multiple docs. Concurrent search with multiple slices is tested * here as call to indexRandomForMultipleSlices is made and compared with explain output for diff --git a/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java index 8eeffcbecb377..19e38da1aed05 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java @@ -47,7 +47,6 @@ import org.opensearch.action.search.SearchType; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; @@ -56,7 +55,7 @@ import org.opensearch.search.sort.SortBuilders; import org.opensearch.search.sort.SortMode; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -77,10 +76,10 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.startsWith; -public class SimpleNestedIT extends ParameterizedOpenSearchIntegTestCase { +public class SimpleNestedIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public SimpleNestedIT(Settings dynamicSettings) { - super(dynamicSettings); + public SimpleNestedIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -91,11 +90,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testSimpleNested() throws Exception { assertAcked(prepareCreate("test").setMapping("nested1", "type=nested")); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/pit/PitMultiNodeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/pit/PitMultiNodeIT.java index a3432bfe7e3e4..8bea5ef97fbba 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/pit/PitMultiNodeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/pit/PitMultiNodeIT.java @@ -32,12 +32,11 @@ import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.action.ActionListener; import org.opensearch.search.builder.PointInTimeBuilder; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.junit.After; @@ -67,7 +66,7 @@ * Multi node integration tests for PIT creation and search operation with PIT ID. */ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 2) -public class PitMultiNodeIT extends ParameterizedOpenSearchIntegTestCase { +public class PitMultiNodeIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public PitMultiNodeIT(Settings settings) { super(settings); } @@ -80,11 +79,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Before public void setupIndex() throws ExecutionException, InterruptedException { createIndex("index", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/preference/SearchPreferenceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/preference/SearchPreferenceIT.java index 6e40c08ed08a1..bc9eeb528b031 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/preference/SearchPreferenceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/preference/SearchPreferenceIT.java @@ -44,13 +44,12 @@ import org.opensearch.cluster.routing.OperationRouting; import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.Strings; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.node.Node; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -70,10 +69,10 @@ import static org.hamcrest.Matchers.not; @OpenSearchIntegTestCase.ClusterScope(minNumDataNodes = 2) -public class SearchPreferenceIT extends ParameterizedOpenSearchIntegTestCase { +public class SearchPreferenceIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public SearchPreferenceIT(Settings dynamicSettings) { - super(dynamicSettings); + public SearchPreferenceIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -84,11 +83,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public Settings nodeSettings(int nodeOrdinal) { return Settings.builder() diff --git a/server/src/internalClusterTest/java/org/opensearch/search/profile/aggregation/AggregationProfilerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/profile/aggregation/AggregationProfilerIT.java index 82dd6225fda4e..2f608a0cbe06f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/profile/aggregation/AggregationProfilerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/profile/aggregation/AggregationProfilerIT.java @@ -37,7 +37,6 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.InternalAggregation; @@ -50,7 +49,7 @@ import org.opensearch.search.profile.query.CollectorResult; import org.opensearch.search.profile.query.QueryProfileShardResult; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.core.IsNull; import java.util.ArrayList; @@ -83,7 +82,7 @@ import static org.hamcrest.Matchers.sameInstance; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class AggregationProfilerIT extends ParameterizedOpenSearchIntegTestCase { +public class AggregationProfilerIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String BUILD_LEAF_COLLECTOR = AggregationTimingType.BUILD_LEAF_COLLECTOR.toString(); private static final String COLLECT = AggregationTimingType.COLLECT.toString(); @@ -166,8 +165,8 @@ public class AggregationProfilerIT extends ParameterizedOpenSearchIntegTestCase private static final String REASON_SEARCH_TOP_HITS = "search_top_hits"; private static final String REASON_AGGREGATION = "aggregation"; - public AggregationProfilerIT(Settings dynamicSettings) { - super(dynamicSettings); + public AggregationProfilerIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -178,11 +177,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected int numberOfShards() { return 1; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java index ef73438114079..412a94aaf1b3e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java @@ -42,14 +42,13 @@ import org.opensearch.action.search.SearchType; import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.SearchHit; import org.opensearch.search.profile.ProfileResult; import org.opensearch.search.profile.ProfileShardResult; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedDynamicSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -68,7 +67,7 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; -public class QueryProfilerIT extends ParameterizedOpenSearchIntegTestCase { +public class QueryProfilerIT extends ParameterizedDynamicSettingsOpenSearchIntegTestCase { private final boolean concurrentSearchEnabled; private static final String MAX_PREFIX = "max_"; private static final String MIN_PREFIX = "min_"; @@ -88,11 +87,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - /** * This test simply checks to make sure nothing crashes. Test indexes 100-150 documents, * constructs 20-100 random queries and tries to profile them diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/ExistsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/ExistsIT.java index e3253ea583ac2..b95542382e5fc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/ExistsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/ExistsIT.java @@ -38,13 +38,12 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.SearchHit; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -62,10 +61,10 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; -public class ExistsIT extends ParameterizedOpenSearchIntegTestCase { +public class ExistsIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public ExistsIT(Settings dynamicSettings) { - super(dynamicSettings); + public ExistsIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -76,11 +75,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - // TODO: move this to a unit test somewhere... public void testEmptyIndex() throws Exception { createIndex("test"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/MultiMatchQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/MultiMatchQueryIT.java index 457114bac33b8..392f8b036b7a2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/MultiMatchQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/MultiMatchQueryIT.java @@ -39,7 +39,6 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.Fuzziness; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.set.Sets; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentBuilder; @@ -54,7 +53,7 @@ import org.opensearch.search.sort.SortBuilders; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.MockKeywordPlugin; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.junit.Before; import java.io.IOException; @@ -92,10 +91,10 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThan; -public class MultiMatchQueryIT extends ParameterizedOpenSearchIntegTestCase { +public class MultiMatchQueryIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public MultiMatchQueryIT(Settings dynamicSettings) { - super(dynamicSettings); + public MultiMatchQueryIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -106,11 +105,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(MockKeywordPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java index 1ca5859f23bca..c43a9c23661ea 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java @@ -38,7 +38,6 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.mapper.MapperService; @@ -47,7 +46,7 @@ import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; import org.opensearch.search.SearchModule; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.junit.Before; import org.junit.BeforeClass; @@ -70,12 +69,12 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -public class QueryStringIT extends ParameterizedOpenSearchIntegTestCase { +public class QueryStringIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static int CLUSTER_MAX_CLAUSE_COUNT; - public QueryStringIT(Settings dynamicSettings) { - super(dynamicSettings); + public QueryStringIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -86,11 +85,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @BeforeClass public static void createRandomClusterSetting() { CLUSTER_MAX_CLAUSE_COUNT = randomIntBetween(50, 100); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/ScriptScoreQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/ScriptScoreQueryIT.java index 55029712a061c..136ddce152f63 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/ScriptScoreQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/ScriptScoreQueryIT.java @@ -38,7 +38,6 @@ import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.RangeQueryBuilder; @@ -46,7 +45,7 @@ import org.opensearch.script.MockScriptPlugin; import org.opensearch.script.Script; import org.opensearch.script.ScriptType; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -67,10 +66,10 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertThirdHit; import static org.opensearch.test.hamcrest.OpenSearchAssertions.hasScore; -public class ScriptScoreQueryIT extends ParameterizedOpenSearchIntegTestCase { +public class ScriptScoreQueryIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public ScriptScoreQueryIT(Settings dynamicSettings) { - super(dynamicSettings); + public ScriptScoreQueryIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -81,11 +80,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java index 03312c6e1e2f7..a58db51780826 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java @@ -51,7 +51,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.unit.Fuzziness; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaTypeRegistry; @@ -80,7 +79,7 @@ import org.opensearch.search.SearchHits; import org.opensearch.search.aggregations.AggregationBuilders; import org.opensearch.test.InternalSettingsPlugin; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.junit.annotations.TestIssueLogging; import java.io.IOException; @@ -147,10 +146,10 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; -public class SearchQueryIT extends ParameterizedOpenSearchIntegTestCase { +public class SearchQueryIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public SearchQueryIT(Settings dynamicSettings) { - super(dynamicSettings); + public SearchQueryIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -161,11 +160,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(InternalSettingsPlugin.class, MockAnalysisPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java index d8902238005da..31678d3f018a1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java @@ -43,7 +43,6 @@ import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; @@ -60,7 +59,7 @@ import org.opensearch.search.SearchHits; import org.opensearch.search.SearchModule; import org.opensearch.search.builder.SearchSourceBuilder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.junit.BeforeClass; import java.io.IOException; @@ -95,12 +94,12 @@ /** * Tests for the {@code simple_query_string} query */ -public class SimpleQueryStringIT extends ParameterizedOpenSearchIntegTestCase { +public class SimpleQueryStringIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static int CLUSTER_MAX_CLAUSE_COUNT; - public SimpleQueryStringIT(Settings dynamicSettings) { - super(dynamicSettings); + public SimpleQueryStringIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -111,11 +110,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @BeforeClass public static void createRandomClusterSetting() { CLUSTER_MAX_CLAUSE_COUNT = randomIntBetween(60, 100); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/scriptfilter/ScriptQuerySearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/scriptfilter/ScriptQuerySearchIT.java index ae00904f237a5..7dbc61a3ced39 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/scriptfilter/ScriptQuerySearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/scriptfilter/ScriptQuerySearchIT.java @@ -38,7 +38,6 @@ import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexModule; @@ -50,7 +49,7 @@ import org.opensearch.search.sort.SortOrder; import org.opensearch.test.InternalSettingsPlugin; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.Arrays; @@ -71,7 +70,7 @@ import static org.hamcrest.Matchers.equalTo; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE) -public class ScriptQuerySearchIT extends ParameterizedOpenSearchIntegTestCase { +public class ScriptQuerySearchIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public ScriptQuerySearchIT(Settings settings) { super(settings); } @@ -84,11 +83,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(CustomScriptPlugin.class, InternalSettingsPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/scroll/DuelScrollIT.java b/server/src/internalClusterTest/java/org/opensearch/search/scroll/DuelScrollIT.java index c7a6d18f881c6..55b3cfeef7419 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/scroll/DuelScrollIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/scroll/DuelScrollIT.java @@ -40,14 +40,13 @@ import org.opensearch.action.search.SearchType; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; import org.opensearch.search.sort.SortBuilder; import org.opensearch.search.sort.SortBuilders; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -60,7 +59,7 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.equalTo; -public class DuelScrollIT extends ParameterizedOpenSearchIntegTestCase { +public class DuelScrollIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public DuelScrollIT(Settings settings) { super(settings); } @@ -73,11 +72,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testDuelQueryThenFetch() throws Exception { TestContext context = create(SearchType.DFS_QUERY_THEN_FETCH, SearchType.QUERY_THEN_FETCH); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollIT.java b/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollIT.java index b82048ffc924e..35b5a7949b20b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollIT.java @@ -45,7 +45,6 @@ import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.common.bytes.BytesReference; @@ -60,7 +59,7 @@ import org.opensearch.search.sort.FieldSortBuilder; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.InternalTestCluster; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; import org.junit.After; @@ -92,7 +91,7 @@ /** * Tests for scrolling. */ -public class SearchScrollIT extends ParameterizedOpenSearchIntegTestCase { +public class SearchScrollIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public SearchScrollIT(Settings settings) { super(settings); } @@ -105,11 +104,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @After public void cleanup() throws Exception { assertAcked( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollWithFailingNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollWithFailingNodesIT.java index 27002b844da1d..38f65c8c2d0da 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollWithFailingNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollWithFailingNodesIT.java @@ -39,9 +39,8 @@ import org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -58,7 +57,7 @@ import static org.hamcrest.Matchers.lessThan; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 2, numClientNodes = 0) -public class SearchScrollWithFailingNodesIT extends ParameterizedOpenSearchIntegTestCase { +public class SearchScrollWithFailingNodesIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public SearchScrollWithFailingNodesIT(Settings settings) { super(settings); } @@ -71,11 +70,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected int numberOfShards() { return 2; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java b/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java index b99f66850e9e3..13c510ff21338 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java @@ -47,12 +47,11 @@ import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.search.SearchHit; import org.opensearch.search.builder.PointInTimeBuilder; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.util.ArrayList; @@ -69,7 +68,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -public class SearchAfterIT extends ParameterizedOpenSearchIntegTestCase { +public class SearchAfterIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String INDEX_NAME = "test"; private static final int NUM_DOCS = 100; @@ -85,11 +84,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testsShouldFail() throws Exception { assertAcked(client().admin().indices().prepareCreate("test").setMapping("field1", "type=long", "field2", "type=keyword").get()); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java index 7aae41d939cac..98e749aa48cac 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java @@ -40,7 +40,6 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.WriteRequest.RefreshPolicy; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.rest.RestStatus; @@ -48,12 +47,14 @@ import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.query.ConstantScoreQueryBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.search.rescore.QueryRescorerBuilder; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; +import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -77,7 +78,7 @@ import static org.apache.lucene.search.TotalHits.Relation.EQUAL_TO; import static org.apache.lucene.search.TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO; -public class SimpleSearchIT extends ParameterizedOpenSearchIntegTestCase { +public class SimpleSearchIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public SimpleSearchIT(Settings settings) { super(settings); @@ -91,11 +92,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testSearchNullIndex() { expectThrows( NullPointerException.class, @@ -305,7 +301,15 @@ public void dotestSimpleTerminateAfterCountWithSize(int size, int max) throws Ex .setSize(size) .setTrackTotalHits(true) .get(); - assertHitCount(searchResponse, i); + + // Do not expect an exact match as an optimization introduced by https://issues.apache.org/jira/browse/LUCENE-10620 + // can produce a total hit count > terminated_after, but this only kicks in + // when size = 0 which is when TotalHitCountCollector is used. + if (size == 0) { + assertHitCount(searchResponse, i, max); + } else { + assertHitCount(searchResponse, i); + } assertTrue(searchResponse.isTerminatedEarly()); assertEquals(Math.min(i, size), searchResponse.getHits().getHits().length); } @@ -319,7 +323,6 @@ public void dotestSimpleTerminateAfterCountWithSize(int size, int max) throws Ex assertFalse(searchResponse.isTerminatedEarly()); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/10435") public void testSimpleTerminateAfterCountSize0() throws Exception { int max = randomIntBetween(3, 29); dotestSimpleTerminateAfterCountWithSize(0, max); @@ -330,6 +333,24 @@ public void testSimpleTerminateAfterCountRandomSize() throws Exception { dotestSimpleTerminateAfterCountWithSize(randomIntBetween(1, max), max); } + /** + * Special cases when size = 0: + * + * If track_total_hits = true: + * Weight#count optimization can cause totalHits in the response to be up to the total doc count regardless of terminate_after. + * So, we will have to do a range check, not an equality check. + * + * If track_total_hits != true, but set to a value AND terminate_after is set: + * Again, due to the optimization, any count can be returned. + * Up to terminate_after, relation == EQUAL_TO. + * But if track_total_hits_up_to ≥ terminate_after, relation can be EQ _or_ GTE. + * This ambiguity is due to the fact that totalHits == track_total_hits_up_to + * or totalHits > track_total_hits_up_to and SearchPhaseController sets totalHits = track_total_hits_up_to when returning results + * in which case relation = GTE. + * + * @param size + * @throws Exception + */ public void doTestSimpleTerminateAfterTrackTotalHitsUpTo(int size) throws Exception { prepareCreate("test").setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)).get(); ensureGreen(); @@ -346,6 +367,7 @@ public void doTestSimpleTerminateAfterTrackTotalHitsUpTo(int size) throws Except refresh(); SearchResponse searchResponse; + searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) .setTerminateAfter(10) @@ -356,25 +378,28 @@ public void doTestSimpleTerminateAfterTrackTotalHitsUpTo(int size) throws Except assertEquals(5, searchResponse.getHits().getTotalHits().value); assertEquals(GREATER_THAN_OR_EQUAL_TO, searchResponse.getHits().getTotalHits().relation); - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) - .setTerminateAfter(5) - .setSize(size) - .setTrackTotalHitsUpTo(10) - .get(); - assertTrue(searchResponse.isTerminatedEarly()); - assertEquals(5, searchResponse.getHits().getTotalHits().value); - assertEquals(EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + // For size = 0, the following queries terminate early, but hits and relation can vary. + if (size > 0) { + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setTerminateAfter(5) + .setSize(size) + .setTrackTotalHitsUpTo(10) + .get(); + assertTrue(searchResponse.isTerminatedEarly()); + assertEquals(5, searchResponse.getHits().getTotalHits().value); + assertEquals(EQUAL_TO, searchResponse.getHits().getTotalHits().relation); - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) - .setTerminateAfter(5) - .setSize(size) - .setTrackTotalHitsUpTo(5) - .get(); - assertTrue(searchResponse.isTerminatedEarly()); - assertEquals(5, searchResponse.getHits().getTotalHits().value); - assertEquals(EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setTerminateAfter(5) + .setSize(size) + .setTrackTotalHitsUpTo(5) + .get(); + assertTrue(searchResponse.isTerminatedEarly()); + assertEquals(5, searchResponse.getHits().getTotalHits().value); + assertEquals(EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + } searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) @@ -383,7 +408,12 @@ public void doTestSimpleTerminateAfterTrackTotalHitsUpTo(int size) throws Except .setTrackTotalHits(true) .get(); assertTrue(searchResponse.isTerminatedEarly()); - assertEquals(5, searchResponse.getHits().getTotalHits().value); + if (size == 0) { + // Since terminate_after < track_total_hits, we need to do a range check. + assertHitCount(searchResponse, 5, numDocs); + } else { + assertEquals(5, searchResponse.getHits().getTotalHits().value); + } assertEquals(EQUAL_TO, searchResponse.getHits().getTotalHits().relation); searchResponse = client().prepareSearch("test") @@ -405,12 +435,11 @@ public void doTestSimpleTerminateAfterTrackTotalHitsUpTo(int size) throws Except assertEquals(GREATER_THAN_OR_EQUAL_TO, searchResponse.getHits().getTotalHits().relation); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/10435") - public void testSimpleTerminateAfterTrackTotalHitsUpToRandomSize() throws Exception { + public void testSimpleTerminateAfterTrackTotalHitsUpToRandomSize0() throws Exception { doTestSimpleTerminateAfterTrackTotalHitsUpTo(0); } - public void testSimpleTerminateAfterTrackTotalHitsUpToSize0() throws Exception { + public void testSimpleTerminateAfterTrackTotalHitsUpToSize() throws Exception { doTestSimpleTerminateAfterTrackTotalHitsUpTo(randomIntBetween(1, 29)); } @@ -649,6 +678,23 @@ public void testTermQueryBigInt() throws Exception { assertEquals(1, searchResponse.getHits().getTotalHits().value); } + public void testIndexOnlyFloatField() throws IOException { + prepareCreate("idx").setMapping("field", "type=float,doc_values=false").get(); + ensureGreen("idx"); + + IndexRequestBuilder indexRequestBuilder = client().prepareIndex("idx"); + + for (float i = 9000.0F; i < 20000.0F; i++) { + indexRequestBuilder.setId(String.valueOf(i)).setSource("{\"field\":" + i + "}", MediaTypeRegistry.JSON).get(); + } + String queryJson = "{ \"filter\" : { \"terms\" : { \"field\" : [ 10000.0 ] } } }"; + XContentParser parser = createParser(JsonXContent.jsonXContent, queryJson); + parser.nextToken(); + ConstantScoreQueryBuilder query = ConstantScoreQueryBuilder.fromXContent(parser); + SearchResponse searchResponse = client().prepareSearch("idx").setQuery(query).get(); + assertEquals(1, searchResponse.getHits().getTotalHits().value); + } + public void testTooLongRegexInRegexpQuery() throws Exception { createIndex("idx"); indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/slice/SearchSliceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/slice/SearchSliceIT.java index 27a56f9d14f08..ea73f9ee1a2be 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/slice/SearchSliceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/slice/SearchSliceIT.java @@ -45,7 +45,6 @@ import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.search.Scroll; @@ -53,7 +52,7 @@ import org.opensearch.search.SearchHit; import org.opensearch.search.builder.PointInTimeBuilder; import org.opensearch.search.sort.SortBuilders; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; @@ -70,9 +69,9 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.startsWith; -public class SearchSliceIT extends ParameterizedOpenSearchIntegTestCase { - public SearchSliceIT(Settings dynamicSettings) { - super(dynamicSettings); +public class SearchSliceIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + public SearchSliceIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -83,11 +82,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - private void setupIndex(int numDocs, int numberOfShards) throws IOException, ExecutionException, InterruptedException { String mapping = XContentFactory.jsonBuilder() .startObject() diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java index 81e948640ee94..e40928f15e8a8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java @@ -47,7 +47,6 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Numbers; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaTypeRegistry; @@ -63,7 +62,7 @@ import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; import org.opensearch.test.InternalSettingsPlugin; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedDynamicSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.io.IOException; @@ -109,7 +108,7 @@ import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.oneOf; -public class FieldSortIT extends ParameterizedOpenSearchIntegTestCase { +public class FieldSortIT extends ParameterizedDynamicSettingsOpenSearchIntegTestCase { public FieldSortIT(Settings dynamicSettings) { super(dynamicSettings); } @@ -122,11 +121,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public static class CustomScriptPlugin extends MockScriptPlugin { @Override protected Map<String, Function<Map<String, Object>, Object>> pluginScripts() { @@ -203,7 +197,8 @@ public void testIssue8226() throws InterruptedException { public void testIssue6614() throws ExecutionException, InterruptedException { List<IndexRequestBuilder> builders = new ArrayList<>(); boolean strictTimeBasedIndices = randomBoolean(); - final int numIndices = randomIntBetween(2, 25); // at most 25 days in the month + // consider only 15 days of the month to avoid hitting open file limit + final int numIndices = randomIntBetween(2, 15); int docs = 0; for (int i = 0; i < numIndices; i++) { final String indexId = strictTimeBasedIndices ? "idx_" + i : "idx"; @@ -2389,4 +2384,185 @@ public void testLongSortOptimizationCorrectResults() throws InterruptedException } } + public void testSimpleSortsPoints() throws Exception { + final int docs = 100; + + Random random = random(); + assertAcked( + prepareCreate("test").setMapping( + XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("str_value") + .field("type", "keyword") + .endObject() + .startObject("boolean_value") + .field("type", "boolean") + .endObject() + .startObject("byte_value") + .field("type", "byte") + .endObject() + .startObject("short_value") + .field("type", "short") + .endObject() + .startObject("integer_value") + .field("type", "integer") + .endObject() + .startObject("long_value") + .field("type", "long") + .endObject() + .startObject("unsigned_long_value") + .field("type", "unsigned_long") + .endObject() + .startObject("float_value") + .field("type", "float") + .endObject() + .startObject("half_float_value") + .field("type", "half_float") + .endObject() + .startObject("double_value") + .field("type", "double") + .endObject() + .endObject() + .endObject() + ) + ); + ensureGreen(); + BigInteger UNSIGNED_LONG_BASE = Numbers.MAX_UNSIGNED_LONG_VALUE.subtract(BigInteger.valueOf(10000 * docs)); + List<IndexRequestBuilder> builders = new ArrayList<>(); + for (int i = 0; i < docs / 2; i++) { + IndexRequestBuilder builder = client().prepareIndex("test") + .setId(Integer.toString(i)) + .setSource( + jsonBuilder().startObject() + .field("str_value", new String(new char[] { (char) (97 + i), (char) (97 + i) })) + .field("boolean_value", true) + .field("byte_value", i) + .field("short_value", i) + .field("integer_value", i) + .field("long_value", i) + .field("unsigned_long_value", UNSIGNED_LONG_BASE.add(BigInteger.valueOf(10000 * i))) + .field("float_value", 32 * i) + .field("half_float_value", 16 * i) + .field("double_value", 64 * i) + .endObject() + ); + builders.add(builder); + } + + // We keep half of the docs with numeric values and other half without + for (int i = docs / 2; i < docs; i++) { + IndexRequestBuilder builder = client().prepareIndex("test") + .setId(Integer.toString(i)) + .setSource( + jsonBuilder().startObject().field("str_value", new String(new char[] { (char) (97 + i), (char) (97 + i) })).endObject() + ); + builders.add(builder); + } + + int j = 0; + Collections.shuffle(builders, random); + for (IndexRequestBuilder builder : builders) { + builder.get(); + if ((++j % 25) == 0) { + refresh(); + } + + } + refresh(); + indexRandomForConcurrentSearch("test"); + + final int size = 2; + // HALF_FLOAT + SearchResponse searchResponse = client().prepareSearch() + .setQuery(matchAllQuery()) + .setSize(size) + .addSort("half_float_value", SortOrder.ASC) + .get(); + + assertHitCount(searchResponse, docs); + assertThat(searchResponse.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + } + + assertThat(searchResponse.toString(), not(containsString("error"))); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("half_float_value", SortOrder.DESC).get(); + + assertHitCount(searchResponse, docs); + assertThat(searchResponse.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(docs / 2 - 1 - i))); + } + + assertThat(searchResponse.toString(), not(containsString("error"))); + + // FLOAT + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("float_value", SortOrder.ASC).get(); + + assertHitCount(searchResponse, docs); + assertThat(searchResponse.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + } + + assertThat(searchResponse.toString(), not(containsString("error"))); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("float_value", SortOrder.DESC).get(); + + assertHitCount(searchResponse, docs); + assertThat(searchResponse.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(docs / 2 - 1 - i))); + } + + assertThat(searchResponse.toString(), not(containsString("error"))); + + // DOUBLE + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("double_value", SortOrder.ASC).get(); + + assertHitCount(searchResponse, docs); + assertThat(searchResponse.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + } + + assertThat(searchResponse.toString(), not(containsString("error"))); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("double_value", SortOrder.DESC).get(); + + assertHitCount(searchResponse, docs); + assertThat(searchResponse.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(docs / 2 - 1 - i))); + } + + assertThat(searchResponse.toString(), not(containsString("error"))); + + // UNSIGNED_LONG + searchResponse = client().prepareSearch() + .setQuery(matchAllQuery()) + .setSize(size) + .addSort("unsigned_long_value", SortOrder.ASC) + .get(); + + assertHitCount(searchResponse, docs); + assertThat(searchResponse.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + } + + assertThat(searchResponse.toString(), not(containsString("error"))); + searchResponse = client().prepareSearch() + .setQuery(matchAllQuery()) + .setSize(size) + .addSort("unsigned_long_value", SortOrder.DESC) + .get(); + + assertHitCount(searchResponse, docs); + assertThat(searchResponse.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(docs / 2 - 1 - i))); + } + + assertThat(searchResponse.toString(), not(containsString("error"))); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceIT.java index 766ac6139b24b..492ffce3321e4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceIT.java @@ -39,13 +39,12 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.geo.GeoDistance; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.geometry.utils.Geohash; import org.opensearch.index.query.QueryBuilders; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; import java.io.IOException; @@ -66,10 +65,10 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; -public class GeoDistanceIT extends ParameterizedOpenSearchIntegTestCase { +public class GeoDistanceIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public GeoDistanceIT(Settings dynamicSettings) { - super(dynamicSettings); + public GeoDistanceIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -80,11 +79,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected boolean forbidPrivateIndexSettings() { return false; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceSortBuilderIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceSortBuilderIT.java index 1b8bd9694483d..b6f53936d5939 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceSortBuilderIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceSortBuilderIT.java @@ -41,11 +41,10 @@ import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.DistanceUnit; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.GeoValidationMethod; import org.opensearch.search.builder.SearchSourceBuilder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; import java.io.IOException; @@ -65,7 +64,7 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSortValues; import static org.hamcrest.Matchers.closeTo; -public class GeoDistanceSortBuilderIT extends ParameterizedOpenSearchIntegTestCase { +public class GeoDistanceSortBuilderIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public GeoDistanceSortBuilderIT(Settings settings) { super(settings); } @@ -78,11 +77,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - private static final String LOCATION_FIELD = "location"; @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/SimpleSortIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/SimpleSortIT.java index ddfbc3cce2be6..cb8b508c4496b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/SimpleSortIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/SimpleSortIT.java @@ -40,7 +40,6 @@ import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.geo.GeoUtils; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.plugins.Plugin; import org.opensearch.script.MockScriptPlugin; @@ -49,7 +48,7 @@ import org.opensearch.search.SearchHit; import org.opensearch.search.sort.ScriptSortBuilder.ScriptSortType; import org.opensearch.test.InternalSettingsPlugin; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; @@ -75,12 +74,12 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; -public class SimpleSortIT extends ParameterizedOpenSearchIntegTestCase { +public class SimpleSortIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String DOUBLE_APOSTROPHE = "\u0027\u0027"; - public SimpleSortIT(Settings dynamicSettings) { - super(dynamicSettings); + public SimpleSortIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -91,11 +90,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(CustomScriptPlugin.class, InternalSettingsPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/SortFromPluginIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/SortFromPluginIT.java index 76e68781c72ba..ec891045cb510 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/SortFromPluginIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/SortFromPluginIT.java @@ -12,14 +12,13 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.plugins.Plugin; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.search.sort.plugin.CustomSortBuilder; import org.opensearch.search.sort.plugin.CustomSortPlugin; import org.opensearch.test.InternalSettingsPlugin; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -27,7 +26,7 @@ import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.hamcrest.Matchers.equalTo; -public class SortFromPluginIT extends ParameterizedOpenSearchIntegTestCase { +public class SortFromPluginIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public SortFromPluginIT(Settings settings) { super(settings); } @@ -40,11 +39,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(CustomSortPlugin.class, InternalSettingsPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/source/MetadataFetchingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/source/MetadataFetchingIT.java index a9c4bf841d9a1..4c1e47ef8da99 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/source/MetadataFetchingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/source/MetadataFetchingIT.java @@ -38,14 +38,13 @@ import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.InnerHitBuilder; import org.opensearch.index.query.NestedQueryBuilder; import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.search.SearchException; import org.opensearch.search.SearchHits; import org.opensearch.search.fetch.subphase.FetchSourceContext; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -57,10 +56,10 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -public class MetadataFetchingIT extends ParameterizedOpenSearchIntegTestCase { +public class MetadataFetchingIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public MetadataFetchingIT(Settings dynamicSettings) { - super(dynamicSettings); + public MetadataFetchingIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -71,11 +70,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testSimple() throws InterruptedException { assertAcked(prepareCreate("test")); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/source/SourceFetchingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/source/SourceFetchingIT.java index 805e82dc9850b..294657cedcc5c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/source/SourceFetchingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/source/SourceFetchingIT.java @@ -36,8 +36,7 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -47,10 +46,10 @@ import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.core.IsEqual.equalTo; -public class SourceFetchingIT extends ParameterizedOpenSearchIntegTestCase { +public class SourceFetchingIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public SourceFetchingIT(Settings dynamicSettings) { - super(dynamicSettings); + public SourceFetchingIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -61,11 +60,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testSourceDefaultBehavior() throws InterruptedException { createIndex("test"); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/stats/ConcurrentSearchStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/stats/ConcurrentSearchStatsIT.java index f770bd9864850..f8d2955440bc4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/stats/ConcurrentSearchStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/stats/ConcurrentSearchStatsIT.java @@ -16,7 +16,6 @@ import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; import org.opensearch.indices.IndicesQueryCache; @@ -61,6 +60,7 @@ protected Settings nodeSettings(int nodeOrdinal) { .put(IndicesService.INDICES_CACHE_CLEAN_INTERVAL_SETTING.getKey(), "1ms") .put(IndicesQueryCache.INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING.getKey(), true) .put(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_KEY, SEGMENT_SLICE_COUNT) + .put(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true) .build(); } @@ -74,11 +74,6 @@ public Settings indexSettings() { .build(); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testConcurrentQueryCount() throws Exception { String INDEX_1 = "test-" + randomAlphaOfLength(5).toLowerCase(Locale.ROOT); String INDEX_2 = "test-" + randomAlphaOfLength(5).toLowerCase(Locale.ROOT); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/stats/SearchStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/stats/SearchStatsIT.java index 8fb3c57dd7680..99cb3a4e8ca20 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/stats/SearchStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/stats/SearchStatsIT.java @@ -45,7 +45,6 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.search.stats.SearchStats.Stats; import org.opensearch.plugins.Plugin; @@ -54,7 +53,7 @@ import org.opensearch.script.ScriptType; import org.opensearch.search.fetch.subphase.highlight.HighlightBuilder; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -81,10 +80,10 @@ import static org.hamcrest.Matchers.nullValue; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, minNumDataNodes = 2) -public class SearchStatsIT extends ParameterizedOpenSearchIntegTestCase { +public class SearchStatsIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public SearchStatsIT(Settings dynamicSettings) { - super(dynamicSettings); + public SearchStatsIT(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -95,11 +94,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java index b342e6d35f0b4..c72e128a88045 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java @@ -48,7 +48,6 @@ import org.opensearch.common.FieldMemoryStats; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.Fuzziness; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.mapper.MapperParsingException; @@ -65,7 +64,7 @@ import org.opensearch.search.suggest.completion.context.ContextMapping; import org.opensearch.search.suggest.completion.context.GeoContextMapping; import org.opensearch.test.InternalSettingsPlugin; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; @@ -100,7 +99,7 @@ import static org.hamcrest.Matchers.notNullValue; @SuppressCodecs("*") // requires custom completion format -public class CompletionSuggestSearchIT extends ParameterizedOpenSearchIntegTestCase { +public class CompletionSuggestSearchIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public CompletionSuggestSearchIT(Settings settings) { super(settings); } @@ -113,11 +112,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - private final String INDEX = RandomStrings.randomAsciiOfLength(random(), 10).toLowerCase(Locale.ROOT); private final String FIELD = RandomStrings.randomAsciiOfLength(random(), 10).toLowerCase(Locale.ROOT); private final CompletionMappingBuilder completionMappingBuilder = new CompletionMappingBuilder(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/suggest/ContextCompletionSuggestSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/suggest/ContextCompletionSuggestSearchIT.java index bac3e7fb61683..67523e9fd424a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/suggest/ContextCompletionSuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/suggest/ContextCompletionSuggestSearchIT.java @@ -41,7 +41,6 @@ import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.Fuzziness; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; @@ -53,7 +52,7 @@ import org.opensearch.search.suggest.completion.context.ContextMapping; import org.opensearch.search.suggest.completion.context.GeoContextMapping; import org.opensearch.search.suggest.completion.context.GeoQueryContext; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; @@ -73,7 +72,7 @@ import static org.hamcrest.core.IsEqual.equalTo; @SuppressCodecs("*") // requires custom completion format -public class ContextCompletionSuggestSearchIT extends ParameterizedOpenSearchIntegTestCase { +public class ContextCompletionSuggestSearchIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public ContextCompletionSuggestSearchIT(Settings settings) { super(settings); } @@ -86,11 +85,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - private final String INDEX = RandomStrings.randomAsciiOfLength(random(), 10).toLowerCase(Locale.ROOT); private final String FIELD = RandomStrings.randomAsciiOfLength(random(), 10).toLowerCase(Locale.ROOT); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/suggest/SuggestSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/suggest/SuggestSearchIT.java index bc6e49008bf6b..e0afdbc816f5c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/suggest/SuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/suggest/SuggestSearchIT.java @@ -41,7 +41,6 @@ import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexSettings; @@ -57,7 +56,7 @@ import org.opensearch.search.suggest.phrase.StupidBackoff; import org.opensearch.search.suggest.term.TermSuggestionBuilder; import org.opensearch.search.suggest.term.TermSuggestionBuilder.SuggestMode; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; import java.io.IOException; @@ -96,7 +95,7 @@ * possible these tests should declare for the first request, make the request, modify the configuration for the next request, make that * request, modify again, request again, etc. This makes it very obvious what changes between requests. */ -public class SuggestSearchIT extends ParameterizedOpenSearchIntegTestCase { +public class SuggestSearchIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public SuggestSearchIT(Settings settings) { super(settings); } @@ -109,11 +108,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - // see #3196 public void testSuggestAcrossMultipleIndices() throws IOException { assertAcked(prepareCreate("test").setMapping("text", "type=text")); diff --git a/server/src/internalClusterTest/java/org/opensearch/similarity/SimilarityIT.java b/server/src/internalClusterTest/java/org/opensearch/similarity/SimilarityIT.java index 8c9bff9833462..b89541c647580 100644 --- a/server/src/internalClusterTest/java/org/opensearch/similarity/SimilarityIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/similarity/SimilarityIT.java @@ -36,8 +36,7 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -49,7 +48,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; -public class SimilarityIT extends ParameterizedOpenSearchIntegTestCase { +public class SimilarityIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public SimilarityIT(Settings settings) { super(settings); } @@ -62,11 +61,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public void testCustomBM25Similarity() throws Exception { try { client().admin().indices().prepareDelete("test").execute().actionGet(); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java index e79bf1c16b586..78827849a8037 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java @@ -92,6 +92,7 @@ public void testDeleteShallowCopySnapshot() throws Exception { } // Deleting multiple shallow copy snapshots as part of single delete call with repo having only shallow copy snapshots. + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9208") public void testDeleteMultipleShallowCopySnapshotsCase1() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); final Path remoteStoreRepoPath = randomRepoPath(); @@ -303,7 +304,8 @@ public void testRemoteStoreCleanupForDeletedIndex() throws Exception { .getSetting(remoteStoreEnabledIndexName, IndexMetadata.SETTING_INDEX_UUID); logger.info("--> create two remote index shallow snapshots"); - List<String> shallowCopySnapshots = createNSnapshots(snapshotRepoName, 2); + SnapshotInfo snapshotInfo1 = createFullSnapshot(snapshotRepoName, "snap1"); + SnapshotInfo snapshotInfo2 = createFullSnapshot(snapshotRepoName, "snap2"); String[] lockFiles = getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME); assert (lockFiles.length == 2) : "lock files are " + Arrays.toString(lockFiles); @@ -314,17 +316,18 @@ public void testRemoteStoreCleanupForDeletedIndex() throws Exception { logger.info("--> delete snapshot 1"); AcknowledgedResponse deleteSnapshotResponse = clusterManagerClient.admin() .cluster() - .prepareDeleteSnapshot(snapshotRepoName, shallowCopySnapshots.get(0)) + .prepareDeleteSnapshot(snapshotRepoName, snapshotInfo1.snapshotId().getName()) .get(); assertAcked(deleteSnapshotResponse); lockFiles = getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME, indexUUID); assert (lockFiles.length == 1) : "lock files are " + Arrays.toString(lockFiles); + assertTrue(lockFiles[0].contains(snapshotInfo2.snapshotId().getUUID())); logger.info("--> delete snapshot 2"); deleteSnapshotResponse = clusterManagerClient.admin() .cluster() - .prepareDeleteSnapshot(snapshotRepoName, shallowCopySnapshots.get(1)) + .prepareDeleteSnapshot(snapshotRepoName, snapshotInfo2.snapshotId().getName()) .get(); assertAcked(deleteSnapshotResponse); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java index 9a92ddc81852a..90bb2b501764e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java @@ -7,6 +7,7 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; import com.carrotsearch.randomizedtesting.generators.RandomPicks; +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -25,15 +26,21 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.GroupShardsIterator; import org.opensearch.cluster.routing.ShardIterator; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; +import org.opensearch.common.Priority; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexNotFoundException; +import org.opensearch.index.shard.ShardPath; import org.opensearch.index.store.remote.file.CleanerDaemonThreadLeakFilter; import org.opensearch.index.store.remote.filecache.FileCacheStats; import org.opensearch.monitor.fs.FsInfo; @@ -47,6 +54,8 @@ import java.util.Arrays; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import java.util.stream.Stream; import java.util.stream.StreamSupport; @@ -54,6 +63,7 @@ import static org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.FS; import static org.opensearch.core.common.util.CollectionUtils.iterableAsArrayList; import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; @@ -75,10 +85,10 @@ protected Settings.Builder randomRepositorySettings() { return settings; } - private Settings.Builder chunkedRepositorySettings() { + private Settings.Builder chunkedRepositorySettings(long chunkSize) { final Settings.Builder settings = Settings.builder(); settings.put("location", randomRepoPath()).put("compress", randomBoolean()); - settings.put("chunk_size", 2 << 23, ByteSizeUnit.BYTES); + settings.put("chunk_size", chunkSize, ByteSizeUnit.BYTES); return settings; } @@ -184,10 +194,10 @@ public void testSnapshottingSearchableSnapshots() throws Exception { } /** - * Tests a chunked repository scenario for searchable snapshots by creating an index, + * Tests a default 8mib chunked repository scenario for searchable snapshots by creating an index, * taking a snapshot, restoring it as a searchable snapshot index. */ - public void testCreateSearchableSnapshotWithChunks() throws Exception { + public void testCreateSearchableSnapshotWithDefaultChunks() throws Exception { final int numReplicasIndex = randomIntBetween(1, 4); final String indexName = "test-idx"; final String restoredIndexName = indexName + "-copy"; @@ -195,7 +205,33 @@ public void testCreateSearchableSnapshotWithChunks() throws Exception { final String snapshotName = "test-snap"; final Client client = client(); - Settings.Builder repositorySettings = chunkedRepositorySettings(); + Settings.Builder repositorySettings = chunkedRepositorySettings(2 << 23); + + internalCluster().ensureAtLeastNumSearchAndDataNodes(numReplicasIndex + 1); + createIndexWithDocsAndEnsureGreen(numReplicasIndex, 1000, indexName); + createRepositoryWithSettings(repositorySettings, repoName); + takeSnapshot(client, snapshotName, repoName, indexName); + + deleteIndicesAndEnsureGreen(client, indexName); + restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); + assertRemoteSnapshotIndexSettings(client, restoredIndexName); + + assertDocCount(restoredIndexName, 1000L); + } + + /** + * Tests a small 1000 bytes chunked repository scenario for searchable snapshots by creating an index, + * taking a snapshot, restoring it as a searchable snapshot index. + */ + public void testCreateSearchableSnapshotWithSmallChunks() throws Exception { + final int numReplicasIndex = randomIntBetween(1, 4); + final String indexName = "test-idx"; + final String restoredIndexName = indexName + "-copy"; + final String repoName = "test-repo"; + final String snapshotName = "test-snap"; + final Client client = client(); + + Settings.Builder repositorySettings = chunkedRepositorySettings(1000); internalCluster().ensureAtLeastNumSearchAndDataNodes(numReplicasIndex + 1); createIndexWithDocsAndEnsureGreen(numReplicasIndex, 1000, indexName); @@ -234,6 +270,62 @@ public void testSearchableSnapshotAllocationForLocalAndRemoteShardsOnSameNode() assertDocCount(indexName, 100L); } + public void testSearchableSnapshotAllocationFilterSettings() throws Exception { + final int numShardsIndex = randomIntBetween(3, 6); + final String indexName = "test-idx"; + final String restoredIndexName = indexName + "-copy"; + final String repoName = "test-repo"; + final String snapshotName = "test-snap"; + final Client client = client(); + + internalCluster().ensureAtLeastNumSearchAndDataNodes(numShardsIndex); + createIndexWithDocsAndEnsureGreen(numShardsIndex, 1, 100, indexName); + createRepositoryWithSettings(null, repoName); + takeSnapshot(client, snapshotName, repoName, indexName); + + restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); + assertRemoteSnapshotIndexSettings(client, restoredIndexName); + final Set<String> searchNodes = StreamSupport.stream(clusterService().state().getNodes().spliterator(), false) + .filter(DiscoveryNode::isSearchNode) + .map(DiscoveryNode::getId) + .collect(Collectors.toSet()); + + for (int i = searchNodes.size(); i > 2; --i) { + String pickedNode = randomFrom(searchNodes); + searchNodes.remove(pickedNode); + assertIndexAssignedToNodeOrNot(restoredIndexName, pickedNode, true); + assertTrue( + client.admin() + .indices() + .prepareUpdateSettings(restoredIndexName) + .setSettings(Settings.builder().put("index.routing.allocation.exclude._id", pickedNode)) + .execute() + .actionGet() + .isAcknowledged() + ); + ClusterHealthResponse clusterHealthResponse = client.admin() + .cluster() + .prepareHealth() + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .setTimeout(new TimeValue(5, TimeUnit.MINUTES)) + .execute() + .actionGet(); + assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); + assertIndexAssignedToNodeOrNot(restoredIndexName, pickedNode, false); + assertIndexAssignedToNodeOrNot(indexName, pickedNode, true); + } + } + + private void assertIndexAssignedToNodeOrNot(String index, String node, boolean assigned) { + final ClusterState state = clusterService().state(); + if (assigned) { + assertTrue(state.getRoutingTable().allShards(index).stream().anyMatch(shard -> shard.currentNodeId().equals(node))); + } else { + assertTrue(state.getRoutingTable().allShards(index).stream().noneMatch(shard -> shard.currentNodeId().equals(node))); + } + } + /** * Tests the functionality of remote shard allocation to * ensure it can handle node drops for failover scenarios and the cluster gets back to a healthy state when @@ -341,11 +433,16 @@ public void testDeleteSearchableSnapshotBackingIndex() throws Exception { } private void createIndexWithDocsAndEnsureGreen(int numReplicasIndex, int numOfDocs, String indexName) throws InterruptedException { + createIndexWithDocsAndEnsureGreen(1, numReplicasIndex, numOfDocs, indexName); + } + + private void createIndexWithDocsAndEnsureGreen(int numShardsIndex, int numReplicasIndex, int numOfDocs, String indexName) + throws InterruptedException { createIndex( indexName, Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, Integer.toString(numReplicasIndex)) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicasIndex) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShardsIndex) .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.FS.getSettingsKey()) .build() ); @@ -722,6 +819,47 @@ public void testDefaultShardPreference() throws Exception { } } + public void testRestoreSearchableSnapshotWithIndexStoreTypeThrowsException() throws Exception { + final String snapshotName = "test-snap"; + final String repoName = "test-repo"; + final String indexName1 = "test-idx-1"; + final int numReplicasIndex1 = randomIntBetween(1, 4); + final Client client = client(); + + internalCluster().ensureAtLeastNumDataNodes(numReplicasIndex1 + 1); + createIndexWithDocsAndEnsureGreen(numReplicasIndex1, 100, indexName1); + + createRepositoryWithSettings(null, repoName); + takeSnapshot(client, snapshotName, repoName, indexName1); + deleteIndicesAndEnsureGreen(client, indexName1); + + internalCluster().ensureAtLeastNumSearchNodes(numReplicasIndex1 + 1); + + // set "index.store.type" to "remote_snapshot" in index settings of restore API and assert appropriate exception with error message + // is thrown. + final SnapshotRestoreException error = expectThrows( + SnapshotRestoreException.class, + () -> client.admin() + .cluster() + .prepareRestoreSnapshot(repoName, snapshotName) + .setRenamePattern("(.+)") + .setRenameReplacement("$1-copy") + .setIndexSettings( + Settings.builder() + .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), RestoreSnapshotRequest.StorageType.REMOTE_SNAPSHOT) + ) + .setWaitForCompletion(true) + .execute() + .actionGet() + ); + assertThat( + error.getMessage(), + containsString( + "cannot restore remote snapshot with index settings \"index.store.type\" set to \"remote_snapshot\". Instead use \"storage_type\": \"remote_snapshot\" as argument to restore." + ) + ); + } + /** * Asserts the cache folder count to match the number of shards and the number of indices within the cache folder * as provided. @@ -750,4 +888,75 @@ private void assertCacheDirectoryReplicaAndIndexCount(int numCacheFolderCount, i // Verifies if all the shards (primary and replica) have been deleted assertEquals(numCacheFolderCount, searchNodeFileCachePaths.size()); } + + public void testRelocateSearchableSnapshotIndex() throws Exception { + final String snapshotName = "test-snap"; + final String repoName = "test-repo"; + final String indexName = "test-idx-1"; + final String restoredIndexName = indexName + "-copy"; + final Client client = client(); + + internalCluster().ensureAtLeastNumDataNodes(1); + createIndexWithDocsAndEnsureGreen(0, 100, indexName); + + createRepositoryWithSettings(null, repoName); + takeSnapshot(client, snapshotName, repoName, indexName); + deleteIndicesAndEnsureGreen(client, indexName); + + String searchNode1 = internalCluster().startSearchOnlyNodes(1).get(0); + internalCluster().validateClusterFormed(); + restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); + assertRemoteSnapshotIndexSettings(client, restoredIndexName); + + String searchNode2 = internalCluster().startSearchOnlyNodes(1).get(0); + internalCluster().validateClusterFormed(); + + final Index index = resolveIndex(restoredIndexName); + assertSearchableSnapshotIndexDirectoryExistence(searchNode1, index, true); + assertSearchableSnapshotIndexDirectoryExistence(searchNode2, index, false); + + // relocate the shard from node1 to node2 + client.admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand(restoredIndexName, 0, searchNode1, searchNode2)) + .execute() + .actionGet(); + ClusterHealthResponse clusterHealthResponse = client.admin() + .cluster() + .prepareHealth() + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .setTimeout(new TimeValue(5, TimeUnit.MINUTES)) + .execute() + .actionGet(); + assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); + assertDocCount(restoredIndexName, 100L); + + assertSearchableSnapshotIndexDirectoryExistence(searchNode1, index, false); + assertSearchableSnapshotIndexDirectoryExistence(searchNode2, index, true); + deleteIndicesAndEnsureGreen(client, restoredIndexName); + assertSearchableSnapshotIndexDirectoryExistence(searchNode2, index, false); + } + + private void assertSearchableSnapshotIndexDirectoryExistence(String nodeName, Index index, boolean exists) throws Exception { + final Node node = internalCluster().getInstance(Node.class, nodeName); + final ShardId shardId = new ShardId(index, 0); + final ShardPath shardPath = ShardPath.loadFileCachePath(node.getNodeEnvironment(), shardId); + + assertBusy(() -> { + assertTrue( + "shard state path should " + (exists ? "exist" : "not exist"), + Files.exists(shardPath.getShardStatePath()) == exists + ); + assertTrue("shard cache path should " + (exists ? "exist" : "not exist"), Files.exists(shardPath.getDataPath()) == exists); + }, 30, TimeUnit.SECONDS); + + final Path indexDataPath = node.getNodeEnvironment().fileCacheNodePath().fileCachePath.resolve(index.getUUID()); + final Path indexPath = node.getNodeEnvironment().fileCacheNodePath().indicesPath.resolve(index.getUUID()); + assertBusy(() -> { + assertTrue("index path should " + (exists ? "exist" : "not exist"), Files.exists(indexDataPath) == exists); + assertTrue("index cache path should " + (exists ? "exist" : "not exist"), Files.exists(indexPath) == exists); + }, 30, TimeUnit.SECONDS); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SystemRepositoryIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SystemRepositoryIT.java index f50fc691fb232..28b84655a2cc7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SystemRepositoryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SystemRepositoryIT.java @@ -12,7 +12,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.repositories.RepositoryException; -import org.opensearch.repositories.fs.FsRepository; +import org.opensearch.repositories.fs.ReloadableFsRepository; import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.Before; @@ -53,7 +53,7 @@ public void testRestrictedSettingsCantBeUpdated() { assertEquals( e.getMessage(), "[system-repo-name] trying to modify an unmodifiable attribute type of system " - + "repository from current value [fs] to new value [mock]" + + "repository from current value [reloadable-fs] to new value [mock]" ); } @@ -65,7 +65,12 @@ public void testSystemRepositoryNonRestrictedSettingsCanBeUpdated() { final Settings.Builder repoSettings = Settings.builder().put("location", absolutePath).put("chunk_size", new ByteSizeValue(20)); assertAcked( - client.admin().cluster().preparePutRepository(systemRepoName).setType(FsRepository.TYPE).setSettings(repoSettings).get() + client.admin() + .cluster() + .preparePutRepository(systemRepoName) + .setType(ReloadableFsRepository.TYPE) + .setSettings(repoSettings) + .get() ); } } diff --git a/server/src/main/java/org/opensearch/OpenSearchServerException.java b/server/src/main/java/org/opensearch/OpenSearchServerException.java index 39c22e60f038a..c5a5ce12b238c 100644 --- a/server/src/main/java/org/opensearch/OpenSearchServerException.java +++ b/server/src/main/java/org/opensearch/OpenSearchServerException.java @@ -8,13 +8,11 @@ package org.opensearch; -import org.opensearch.core.index.snapshots.IndexShardSnapshotException; -import org.opensearch.crypto.CryptoRegistryException; - import static org.opensearch.OpenSearchException.OpenSearchExceptionHandle; import static org.opensearch.OpenSearchException.OpenSearchExceptionHandleRegistry.registerExceptionHandle; import static org.opensearch.OpenSearchException.UNKNOWN_VERSION_ADDED; import static org.opensearch.Version.V_2_10_0; +import static org.opensearch.Version.V_2_13_0; import static org.opensearch.Version.V_2_1_0; import static org.opensearch.Version.V_2_4_0; import static org.opensearch.Version.V_2_5_0; @@ -678,7 +676,12 @@ public static void registerExceptions() { ) ); registerExceptionHandle( - new OpenSearchExceptionHandle(IndexShardSnapshotException.class, IndexShardSnapshotException::new, 98, UNKNOWN_VERSION_ADDED) + new OpenSearchExceptionHandle( + org.opensearch.core.index.snapshots.IndexShardSnapshotException.class, + org.opensearch.core.index.snapshots.IndexShardSnapshotException::new, + 98, + UNKNOWN_VERSION_ADDED + ) ); registerExceptionHandle( new OpenSearchExceptionHandle( @@ -1174,7 +1177,30 @@ public static void registerExceptions() { V_2_7_0 ) ); - registerExceptionHandle(new OpenSearchExceptionHandle(CryptoRegistryException.class, CryptoRegistryException::new, 171, V_2_10_0)); + registerExceptionHandle( + new OpenSearchExceptionHandle( + org.opensearch.crypto.CryptoRegistryException.class, + org.opensearch.crypto.CryptoRegistryException::new, + 171, + V_2_10_0 + ) + ); + registerExceptionHandle( + new OpenSearchExceptionHandle( + org.opensearch.action.admin.indices.view.ViewNotFoundException.class, + org.opensearch.action.admin.indices.view.ViewNotFoundException::new, + 172, + V_2_13_0 + ) + ); + registerExceptionHandle( + new OpenSearchExceptionHandle( + org.opensearch.action.admin.indices.view.ViewAlreadyExistsException.class, + org.opensearch.action.admin.indices.view.ViewAlreadyExistsException::new, + 173, + V_2_13_0 + ) + ); registerExceptionHandle( new OpenSearchExceptionHandle( org.opensearch.cluster.block.IndexCreateBlockException.class, diff --git a/server/src/main/java/org/opensearch/SpecialPermission.java b/server/src/main/java/org/opensearch/SpecialPermission.java index 8a694d4543f32..8348f0844acc6 100644 --- a/server/src/main/java/org/opensearch/SpecialPermission.java +++ b/server/src/main/java/org/opensearch/SpecialPermission.java @@ -98,6 +98,7 @@ public SpecialPermission(String name, String actions) { /** * Check that the current stack has {@link SpecialPermission} access according to the {@link SecurityManager}. */ + @SuppressWarnings("removal") public static void check() { SecurityManager sm = System.getSecurityManager(); if (sm != null) { diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index 46775466aa615..b19bf9590f43b 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -224,6 +224,12 @@ import org.opensearch.action.admin.indices.upgrade.post.UpgradeSettingsAction; import org.opensearch.action.admin.indices.validate.query.TransportValidateQueryAction; import org.opensearch.action.admin.indices.validate.query.ValidateQueryAction; +import org.opensearch.action.admin.indices.view.CreateViewAction; +import org.opensearch.action.admin.indices.view.DeleteViewAction; +import org.opensearch.action.admin.indices.view.GetViewAction; +import org.opensearch.action.admin.indices.view.ListViewNamesAction; +import org.opensearch.action.admin.indices.view.SearchViewAction; +import org.opensearch.action.admin.indices.view.UpdateViewAction; import org.opensearch.action.bulk.BulkAction; import org.opensearch.action.bulk.TransportBulkAction; import org.opensearch.action.bulk.TransportShardBulkAction; @@ -409,6 +415,7 @@ import org.opensearch.rest.action.admin.indices.RestUpgradeAction; import org.opensearch.rest.action.admin.indices.RestUpgradeStatusAction; import org.opensearch.rest.action.admin.indices.RestValidateQueryAction; +import org.opensearch.rest.action.admin.indices.RestViewAction; import org.opensearch.rest.action.cat.AbstractCatAction; import org.opensearch.rest.action.cat.RestAliasAction; import org.opensearch.rest.action.cat.RestAllocationAction; @@ -721,6 +728,14 @@ public <Request extends ActionRequest, Response extends ActionResponse> void reg actions.register(ResolveIndexAction.INSTANCE, ResolveIndexAction.TransportAction.class); actions.register(DataStreamsStatsAction.INSTANCE, DataStreamsStatsAction.TransportAction.class); + // Views: + actions.register(CreateViewAction.INSTANCE, CreateViewAction.TransportAction.class); + actions.register(DeleteViewAction.INSTANCE, DeleteViewAction.TransportAction.class); + actions.register(GetViewAction.INSTANCE, GetViewAction.TransportAction.class); + actions.register(UpdateViewAction.INSTANCE, UpdateViewAction.TransportAction.class); + actions.register(ListViewNamesAction.INSTANCE, ListViewNamesAction.TransportAction.class); + actions.register(SearchViewAction.INSTANCE, SearchViewAction.TransportAction.class); + // Persistent tasks: actions.register(StartPersistentTaskAction.INSTANCE, StartPersistentTaskAction.TransportAction.class); actions.register(UpdatePersistentTaskStatusAction.INSTANCE, UpdatePersistentTaskStatusAction.TransportAction.class); @@ -915,6 +930,14 @@ public void initRestHandlers(Supplier<DiscoveryNodes> nodesInCluster) { registerHandler.accept(new RestResolveIndexAction()); registerHandler.accept(new RestDataStreamsStatsAction()); + // View API + registerHandler.accept(new RestViewAction.CreateViewHandler()); + registerHandler.accept(new RestViewAction.DeleteViewHandler()); + registerHandler.accept(new RestViewAction.GetViewHandler()); + registerHandler.accept(new RestViewAction.UpdateViewHandler()); + registerHandler.accept(new RestViewAction.SearchViewHandler()); + registerHandler.accept(new RestViewAction.ListViewNamesHandler()); + // CAT API registerHandler.accept(new RestAllocationAction()); registerHandler.accept(new RestCatSegmentReplicationAction()); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequest.java index e694a5e102e02..17b633c533218 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequest.java @@ -53,7 +53,7 @@ @PublicApi(since = "1.0.0") public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> { - private Set<String> requestedMetrics = Metric.allMetrics(); + private Set<String> requestedMetrics = Metric.defaultMetrics(); /** * Create a new NodeInfoRequest from a {@link StreamInput} object. @@ -73,7 +73,7 @@ public NodesInfoRequest(StreamInput in) throws IOException { */ public NodesInfoRequest(String... nodesIds) { super(nodesIds); - all(); + defaultMetrics(); } /** @@ -85,13 +85,24 @@ public NodesInfoRequest clear() { } /** - * Sets to return all the data. + * Sets to return data for all the metrics. + * See {@link Metric} */ public NodesInfoRequest all() { requestedMetrics.addAll(Metric.allMetrics()); return this; } + /** + * Sets to return data for default metrics only. + * See {@link Metric} + * See {@link Metric#defaultMetrics()}. + */ + public NodesInfoRequest defaultMetrics() { + requestedMetrics.addAll(Metric.defaultMetrics()); + return this; + } + /** * Get the names of requested metrics */ @@ -156,7 +167,7 @@ public void writeTo(StreamOutput out) throws IOException { /** * An enumeration of the "core" sections of metrics that may be requested - * from the nodes information endpoint. Eventually this list list will be + * from the nodes information endpoint. Eventually this list will be * pluggable. */ public enum Metric { @@ -187,8 +198,25 @@ boolean containedIn(Set<String> metricNames) { return metricNames.contains(this.metricName()); } + /** + * Return all available metrics. + * See {@link Metric} + */ public static Set<String> allMetrics() { return Arrays.stream(values()).map(Metric::metricName).collect(Collectors.toSet()); } + + /** + * Return "the default" set of metrics. + * Similar to {@link #allMetrics()} except {@link Metric#SEARCH_PIPELINES} metric is not included. + * <br> + * The motivation to define the default set of metrics was to keep the default response + * size at bay. Metrics that are NOT included in the default set were typically introduced later + * and are considered to contain specific type of information that is not usually useful unless you + * know that you really need it. + */ + public static Set<String> defaultMetrics() { + return allMetrics().stream().filter(metric -> !(metric.equals(SEARCH_PIPELINES.metricName()))).collect(Collectors.toSet()); + } } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java index 8293a5bb27612..8562a7eb37709 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java @@ -229,8 +229,7 @@ public NodeStats(StreamInput in) throws IOException { } else { repositoriesStats = null; } - // TODO: change to V_2_12_0 on main after backport to 2.x - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { admissionControlStats = in.readOptionalWriteable(AdmissionControlStats::new); } else { admissionControlStats = null; @@ -504,8 +503,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_12_0)) { out.writeOptionalWriteable(repositoriesStats); } - // TODO: change to V_2_12_0 on main after backport to 2.x - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { out.writeOptionalWriteable(admissionControlStats); } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java index e62c83490d810..ab6451382aa88 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java @@ -55,6 +55,7 @@ import org.opensearch.index.IndexNotFoundException; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskInfo; +import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.tasks.TaskResult; import org.opensearch.tasks.TaskResultsService; import org.opensearch.threadpool.ThreadPool; @@ -84,6 +85,8 @@ public class TransportGetTaskAction extends HandledTransportAction<GetTaskReques private final Client client; private final NamedXContentRegistry xContentRegistry; + private final TaskResourceTrackingService taskResourceTrackingService; + @Inject public TransportGetTaskAction( ThreadPool threadPool, @@ -91,7 +94,8 @@ public TransportGetTaskAction( ActionFilters actionFilters, ClusterService clusterService, Client client, - NamedXContentRegistry xContentRegistry + NamedXContentRegistry xContentRegistry, + TaskResourceTrackingService taskResourceTrackingService ) { super(GetTaskAction.NAME, transportService, actionFilters, GetTaskRequest::new); this.threadPool = threadPool; @@ -99,6 +103,7 @@ public TransportGetTaskAction( this.transportService = transportService; this.client = new OriginSettingClient(client, GetTaskAction.TASKS_ORIGIN); this.xContentRegistry = xContentRegistry; + this.taskResourceTrackingService = taskResourceTrackingService; } @Override @@ -173,6 +178,7 @@ public void onFailure(Exception e) { } }); } else { + taskResourceTrackingService.refreshResourceStats(runningTask); TaskInfo info = runningTask.taskInfo(clusterService.localNode().getId(), true); listener.onResponse(new GetTaskResponse(new TaskResult(false, info))); } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java index e5dbefc3dba97..01b4cd779c261 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java @@ -594,4 +594,25 @@ public void writeTo(StreamOutput out) throws IOException { } waitForActiveShards.writeTo(out); } + + @Override + public String toString() { + return "CreateIndexRequest{" + + "cause='" + + cause + + '\'' + + ", index='" + + index + + '\'' + + ", settings=" + + settings + + ", mappings='" + + mappings + + '\'' + + ", aliases=" + + aliases + + ", waitForActiveShards=" + + waitForActiveShards + + '}'; + } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java index f38b49f434261..bf6ee9ca43755 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java @@ -69,11 +69,13 @@ public static final class Defaults { public static final int MAX_NUM_SEGMENTS = -1; public static final boolean ONLY_EXPUNGE_DELETES = false; public static final boolean FLUSH = true; + public static final boolean PRIMARY_ONLY = false; } private int maxNumSegments = Defaults.MAX_NUM_SEGMENTS; private boolean onlyExpungeDeletes = Defaults.ONLY_EXPUNGE_DELETES; private boolean flush = Defaults.FLUSH; + private boolean primaryOnly = Defaults.PRIMARY_ONLY; private static final Version FORCE_MERGE_UUID_VERSION = Version.V_3_0_0; @@ -100,6 +102,9 @@ public ForceMergeRequest(StreamInput in) throws IOException { maxNumSegments = in.readInt(); onlyExpungeDeletes = in.readBoolean(); flush = in.readBoolean(); + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + primaryOnly = in.readBoolean(); + } if (in.getVersion().onOrAfter(FORCE_MERGE_UUID_VERSION)) { forceMergeUUID = in.readString(); } else if ((forceMergeUUID = in.readOptionalString()) == null) { @@ -166,6 +171,21 @@ public ForceMergeRequest flush(boolean flush) { return this; } + /** + * Should force merge only performed on primary shards. Defaults to {@code false}. + */ + public boolean primaryOnly() { + return primaryOnly; + } + + /** + * Should force merge only performed on primary shards. Defaults to {@code false}. + */ + public ForceMergeRequest primaryOnly(boolean primaryOnly) { + this.primaryOnly = primaryOnly; + return this; + } + /** * Should this task store its result after it has finished? */ @@ -188,6 +208,8 @@ public String getDescription() { + onlyExpungeDeletes + "], flush[" + flush + + "], primaryOnly[" + + primaryOnly + "]"; } @@ -197,6 +219,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeInt(maxNumSegments); out.writeBoolean(onlyExpungeDeletes); out.writeBoolean(flush); + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeBoolean(primaryOnly); + } if (out.getVersion().onOrAfter(FORCE_MERGE_UUID_VERSION)) { out.writeString(forceMergeUUID); } else { @@ -213,6 +238,8 @@ public String toString() { + onlyExpungeDeletes + ", flush=" + flush + + ", primaryOnly=" + + primaryOnly + '}'; } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java index d8a618a1828ad..10b9749f16b27 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java @@ -81,4 +81,12 @@ public ForceMergeRequestBuilder setFlush(boolean flush) { request.flush(flush); return this; } + + /** + * Should force merge only performed on primary shards. Defaults to {@code false}. + */ + public ForceMergeRequestBuilder setPrimaryOnly(boolean primaryOnly) { + request.primaryOnly(primaryOnly); + return this; + } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/TransportForceMergeAction.java b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/TransportForceMergeAction.java index fb8eb86c12269..b71c75462900a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/TransportForceMergeAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/TransportForceMergeAction.java @@ -115,11 +115,16 @@ protected EmptyResult shardOperation(ForceMergeRequest request, ShardRouting sha } /** - * The refresh request works against *all* shards. + * The force merge request works against *all* shards by default, but it can work against all primary shards only + * by setting primary_only to true. */ @Override protected ShardsIterator shards(ClusterState clusterState, ForceMergeRequest request, String[] concreteIndices) { - return clusterState.routingTable().allShards(concreteIndices); + if (request.primaryOnly()) { + return clusterState.routingTable().allShardsSatisfyingPredicate(concreteIndices, ShardRouting::primary); + } else { + return clusterState.routingTable().allShards(concreteIndices); + } } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java index 14c985f1d3427..9265c6ae60678 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java @@ -82,7 +82,7 @@ public class TransportUpdateSettingsAction extends TransportClusterManagerNodeAc "index.number_of_replicas" ); - private final static String[] ALLOWLIST_REMOTE_SNAPSHOT_SETTINGS_PREFIXES = { "index.search.slowlog" }; + private final static String[] ALLOWLIST_REMOTE_SNAPSHOT_SETTINGS_PREFIXES = { "index.search.slowlog", "index.routing.allocation" }; private final MetadataUpdateSettingsService updateSettingsService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java b/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java index 04166c88a00ad..3fbf9ac1bb570 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java @@ -258,9 +258,9 @@ void finish() { storeStatuses.add( new IndicesShardStoresResponse.StoreStatus( response.getNode(), - response.allocationId(), + response.getGatewayShardStarted().allocationId(), allocationStatus, - response.storeException() + response.getGatewayShardStarted().storeException() ) ); } @@ -308,7 +308,8 @@ private IndicesShardStoresResponse.StoreStatus.AllocationStatus getAllocationSta * A shard exists/existed in a node only if shard state file exists in the node */ private boolean shardExistsInNode(final NodeGatewayStartedShards response) { - return response.storeException() != null || response.allocationId() != null; + return response.getGatewayShardStarted().storeException() != null + || response.getGatewayShardStarted().allocationId() != null; } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java index 23cd8efdcaf59..ca4c16935c2b9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java @@ -168,9 +168,11 @@ protected void clusterManagerOperation( .getSegments() .getReplicationStats().maxBytesBehind != 0) { throw new IllegalStateException( - " For index [" + "Replication still in progress for index [" + sourceIndex - + "] replica shards haven't caught up with primary, please retry after sometime." + + "]. Please wait for replication to complete and retry. Use the _cat/segment_replication/" + + sourceIndex + + " api to check if the index is up to date (e.g. bytes_behind == 0)." ); } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/view/CreateViewAction.java b/server/src/main/java/org/opensearch/action/admin/indices/view/CreateViewAction.java new file mode 100644 index 0000000000000..9faf25ce10732 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/view/CreateViewAction.java @@ -0,0 +1,279 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.ActionType; +import org.opensearch.action.ValidateActions; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.metadata.View; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.ValidationException; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.common.util.CollectionUtils; +import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +/** Action to create a view */ +@ExperimentalApi +public class CreateViewAction extends ActionType<GetViewAction.Response> { + + private static final int MAX_NAME_LENGTH = 64; + private static final int MAX_DESCRIPTION_LENGTH = 256; + private static final int MAX_TARGET_COUNT = 25; + private static final int MAX_TARGET_INDEX_PATTERN_LENGTH = 64; + + public static final CreateViewAction INSTANCE = new CreateViewAction(); + public static final String NAME = "cluster:admin/views/create"; + + private CreateViewAction() { + super(NAME, GetViewAction.Response::new); + } + + /** + * Request for Creating View + */ + @ExperimentalApi + public static class Request extends ClusterManagerNodeRequest<Request> { + private final String name; + private final String description; + private final List<Target> targets; + + public Request(final String name, final String description, final List<Target> targets) { + this.name = name; + this.description = Objects.requireNonNullElse(description, ""); + this.targets = targets; + } + + public Request(final StreamInput in) throws IOException { + super(in); + this.name = in.readString(); + this.description = in.readString(); + this.targets = in.readList(Target::new); + } + + public String getName() { + return name; + } + + public String getDescription() { + return description; + } + + public List<Target> getTargets() { + return new ArrayList<>(targets); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Request that = (Request) o; + return name.equals(that.name) && description.equals(that.description) && targets.equals(that.targets); + } + + @Override + public int hashCode() { + return Objects.hash(name, description, targets); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (Strings.isNullOrEmpty(name)) { + validationException = ValidateActions.addValidationError("name cannot be empty or null", validationException); + } + if (name != null && name.length() > MAX_NAME_LENGTH) { + validationException = ValidateActions.addValidationError( + "name must be less than " + MAX_NAME_LENGTH + " characters in length", + validationException + ); + } + if (description != null && description.length() > MAX_DESCRIPTION_LENGTH) { + validationException = ValidateActions.addValidationError( + "description must be less than " + MAX_DESCRIPTION_LENGTH + " characters in length", + validationException + ); + } + if (CollectionUtils.isEmpty(targets)) { + validationException = ValidateActions.addValidationError("targets cannot be empty", validationException); + } else { + if (targets.size() > MAX_TARGET_COUNT) { + validationException = ValidateActions.addValidationError( + "view cannot have more than " + MAX_TARGET_COUNT + " targets", + validationException + ); + } + for (final Target target : targets) { + final var validationMessages = Optional.ofNullable(target.validate()) + .map(ValidationException::validationErrors) + .orElse(List.of()); + for (final String validationMessage : validationMessages) { + validationException = ValidateActions.addValidationError(validationMessage, validationException); + } + } + } + + return validationException; + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(name); + out.writeString(description); + out.writeList(targets); + } + + /** View target representation for create requests */ + @ExperimentalApi + public static class Target implements Writeable { + public final String indexPattern; + + public Target(final String indexPattern) { + this.indexPattern = indexPattern; + } + + public Target(final StreamInput in) throws IOException { + this.indexPattern = in.readString(); + } + + public String getIndexPattern() { + return indexPattern; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Target that = (Target) o; + return indexPattern.equals(that.indexPattern); + } + + @Override + public int hashCode() { + return Objects.hash(indexPattern); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(indexPattern); + } + + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + + if (Strings.isNullOrEmpty(indexPattern)) { + validationException = ValidateActions.addValidationError("index pattern cannot be empty or null", validationException); + } + if (indexPattern != null && indexPattern.length() > MAX_TARGET_INDEX_PATTERN_LENGTH) { + validationException = ValidateActions.addValidationError( + "target index pattern must be less than " + MAX_TARGET_INDEX_PATTERN_LENGTH + " characters in length", + validationException + ); + } + + return validationException; + } + + private static final ConstructingObjectParser<Target, Void> PARSER = new ConstructingObjectParser<>( + "target", + args -> new Target((String) args[0]) + ); + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), View.Target.INDEX_PATTERN_FIELD); + } + + public static Target fromXContent(final XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + } + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser<Request, Void> PARSER = new ConstructingObjectParser<>( + "create_view_request", + args -> new Request((String) args[0], (String) args[1], (List<Target>) args[2]) + ); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), View.NAME_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), View.DESCRIPTION_FIELD); + PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), (p, c) -> Target.fromXContent(p), View.TARGETS_FIELD); + } + + public static Request fromXContent(final XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + } + + /** + * Transport Action for creating a View + */ + public static class TransportAction extends TransportClusterManagerNodeAction<Request, GetViewAction.Response> { + + private final ViewService viewService; + + @Inject + public TransportAction( + final TransportService transportService, + final ClusterService clusterService, + final ThreadPool threadPool, + final ActionFilters actionFilters, + final IndexNameExpressionResolver indexNameExpressionResolver, + final ViewService viewService + ) { + super(NAME, transportService, clusterService, threadPool, actionFilters, Request::new, indexNameExpressionResolver); + this.viewService = viewService; + } + + @Override + protected String executor() { + return ThreadPool.Names.MANAGEMENT; + } + + @Override + protected GetViewAction.Response read(final StreamInput in) throws IOException { + return new GetViewAction.Response(in); + } + + @Override + protected void clusterManagerOperation( + final Request request, + final ClusterState state, + final ActionListener<GetViewAction.Response> listener + ) throws Exception { + viewService.createView(request, listener); + } + + @Override + protected ClusterBlockException checkBlock(final Request request, final ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/view/DeleteViewAction.java b/server/src/main/java/org/opensearch/action/admin/indices/view/DeleteViewAction.java new file mode 100644 index 0000000000000..abb3c3f4db5f6 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/view/DeleteViewAction.java @@ -0,0 +1,156 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.ActionType; +import org.opensearch.action.ValidateActions; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.metadata.View; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.Objects; + +/** Action to delete a view */ +@SuppressWarnings("deprecation") +@ExperimentalApi +public class DeleteViewAction extends ActionType<AcknowledgedResponse> { + + public static final DeleteViewAction INSTANCE = new DeleteViewAction(); + public static final String NAME = "cluster:admin/views/delete"; + + public DeleteViewAction() { + super(NAME, AcknowledgedResponse::new); + } + + /** Request for delete view */ + @ExperimentalApi + public static class Request extends ClusterManagerNodeRequest<Request> { + private final String name; + + public Request(final String name) { + this.name = name; + } + + public Request(final StreamInput in) throws IOException { + super(in); + this.name = in.readString(); + } + + public String getName() { + return name; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + final Request that = (Request) o; + return name.equals(that.name); + } + + @Override + public int hashCode() { + return Objects.hash(name); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (Strings.isNullOrEmpty(name)) { + validationException = ValidateActions.addValidationError("name cannot be empty or null", validationException); + } + + return validationException; + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(name); + } + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser<Request, Void> PARSER = new ConstructingObjectParser<>( + "delete_view_request", + args -> new Request((String) args[0]) + ); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), View.NAME_FIELD); + } + + public static Request fromXContent(final XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + } + + /** + * Transport Action for deleting a View + */ + public static class TransportAction extends TransportClusterManagerNodeAction<Request, AcknowledgedResponse> { + + private final ViewService viewService; + + @Inject + public TransportAction( + final TransportService transportService, + final ClusterService clusterService, + final ThreadPool threadPool, + final ActionFilters actionFilters, + final IndexNameExpressionResolver indexNameExpressionResolver, + final ViewService viewService + ) { + super(NAME, transportService, clusterService, threadPool, actionFilters, Request::new, indexNameExpressionResolver); + this.viewService = viewService; + } + + @Override + protected String executor() { + return ThreadPool.Names.MANAGEMENT; + } + + @Override + protected AcknowledgedResponse read(final StreamInput in) throws IOException { + return new AcknowledgedResponse(in); + } + + @Override + protected void clusterManagerOperation( + final Request request, + final ClusterState state, + final ActionListener<AcknowledgedResponse> listener + ) throws Exception { + viewService.deleteView(request, listener); + } + + @Override + protected ClusterBlockException checkBlock(final Request request, final ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/view/GetViewAction.java b/server/src/main/java/org/opensearch/action/admin/indices/view/GetViewAction.java new file mode 100644 index 0000000000000..762eea965c8c1 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/view/GetViewAction.java @@ -0,0 +1,214 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.ActionType; +import org.opensearch.action.ValidateActions; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.metadata.View; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.ToXContent.Params; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.Objects; + +/** Action to get a view */ +@ExperimentalApi +public class GetViewAction extends ActionType<GetViewAction.Response> { + + public static final GetViewAction INSTANCE = new GetViewAction(); + public static final String NAME = "views:data/read/get"; + + public GetViewAction() { + super(NAME, GetViewAction.Response::new); + } + + /** Request for get view */ + @ExperimentalApi + public static class Request extends ClusterManagerNodeRequest<Request> { + private final String name; + + public Request(final String name) { + this.name = name; + } + + public Request(final StreamInput in) throws IOException { + super(in); + this.name = in.readString(); + } + + public String getName() { + return name; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + final Request that = (Request) o; + return name.equals(that.name); + } + + @Override + public int hashCode() { + return Objects.hash(name); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (Strings.isNullOrEmpty(name)) { + validationException = ValidateActions.addValidationError("name cannot be empty or null", validationException); + } + + return validationException; + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(name); + } + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser<Request, Void> PARSER = new ConstructingObjectParser<>( + "get_view_request", + args -> new Request((String) args[0]) + ); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), View.NAME_FIELD); + } + + public static Request fromXContent(final XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + } + + /** Response with a view */ + @ExperimentalApi + public static class Response extends ActionResponse implements ToXContentObject { + + private final View view; + + public Response(final View view) { + this.view = view; + } + + public Response(final StreamInput in) throws IOException { + super(in); + this.view = new View(in); + } + + public View getView() { + return view; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + final Response that = (Response) o; + return getView().equals(that.getView()); + } + + @Override + public int hashCode() { + return Objects.hash(getView()); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + this.view.writeTo(out); + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(); + builder.field("view", view); + builder.endObject(); + return builder; + } + + private static final ConstructingObjectParser<Response, Void> PARSER = new ConstructingObjectParser<>( + "view_response", + args -> new Response((View) args[0]) + ); + static { + PARSER.declareObject(ConstructingObjectParser.constructorArg(), View.PARSER, new ParseField("view")); + } + + public static Response fromXContent(final XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + } + + /** + * Transport Action for getting a View + */ + public static class TransportAction extends TransportClusterManagerNodeAction<Request, Response> { + + private final ViewService viewService; + + @Inject + public TransportAction( + final TransportService transportService, + final ClusterService clusterService, + final ThreadPool threadPool, + final ActionFilters actionFilters, + final IndexNameExpressionResolver indexNameExpressionResolver, + final ViewService viewService + ) { + super(NAME, transportService, clusterService, threadPool, actionFilters, Request::new, indexNameExpressionResolver); + this.viewService = viewService; + } + + @Override + protected String executor() { + return ThreadPool.Names.MANAGEMENT; + } + + @Override + protected Response read(final StreamInput in) throws IOException { + return new Response(in); + } + + @Override + protected void clusterManagerOperation(final Request request, final ClusterState state, final ActionListener<Response> listener) + throws Exception { + viewService.getView(request, listener); + } + + @Override + protected ClusterBlockException checkBlock(final Request request, final ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + } + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/view/ListViewNamesAction.java b/server/src/main/java/org/opensearch/action/admin/indices/view/ListViewNamesAction.java new file mode 100644 index 0000000000000..eac0b1d5558ca --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/view/ListViewNamesAction.java @@ -0,0 +1,133 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.action.ActionRequest; +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.ActionType; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.HandledTransportAction; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.tasks.Task; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +/** Action to list a view names */ +@ExperimentalApi +public class ListViewNamesAction extends ActionType<ListViewNamesAction.Response> { + + public static final ListViewNamesAction INSTANCE = new ListViewNamesAction(); + public static final String NAME = "views:data/read/list"; + + public ListViewNamesAction() { + super(NAME, ListViewNamesAction.Response::new); + } + + /** Request for list view names */ + @ExperimentalApi + public static class Request extends ActionRequest { + public Request() {} + + public Request(final StreamInput in) {} + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + final Request that = (Request) o; + return true; + } + + @Override + public int hashCode() { + return 1; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + } + + /** Response for list view names */ + @ExperimentalApi + public static class Response extends ActionResponse implements ToXContentObject { + + private final List<String> views; + + public Response(final List<String> views) { + this.views = views; + } + + public Response(final StreamInput in) throws IOException { + views = in.readStringList(); + } + + public List<String> getViewNames() { + return views; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + final Response that = (Response) o; + return views.equals(that.views); + } + + @Override + public int hashCode() { + return Objects.hash(views); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeStringCollection(views); + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(); + builder.field("views", views); + builder.endObject(); + return builder; + } + } + + /** + * Transport Action for getting a View + */ + public static class TransportAction extends HandledTransportAction<Request, Response> { + + private final ViewService viewService; + + @Inject + public TransportAction(final TransportService transportService, final ActionFilters actionFilters, final ViewService viewService) { + super(NAME, transportService, actionFilters, Request::new); + this.viewService = viewService; + } + + @Override + protected void doExecute(Task task, Request request, ActionListener<Response> listener) { + viewService.listViewNames(listener); + } + + } + +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/view/SearchViewAction.java b/server/src/main/java/org/opensearch/action/admin/indices/view/SearchViewAction.java new file mode 100644 index 0000000000000..1e20221242f06 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/view/SearchViewAction.java @@ -0,0 +1,128 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.ActionType; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.HandledTransportAction; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.tasks.Task; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.Objects; +import java.util.function.Function; + +import static org.opensearch.action.ValidateActions.addValidationError; + +/** Action to create a view */ +@ExperimentalApi +public class SearchViewAction extends ActionType<SearchResponse> { + + public static final SearchViewAction INSTANCE = new SearchViewAction(); + public static final String NAME = "views:data/read/search"; + + private SearchViewAction() { + super(NAME, SearchResponse::new); + } + + /** + * Wraps the functionality of search requests and tailors for what is available + * when searching through views + */ + @ExperimentalApi + public static class Request extends SearchRequest { + + private final String view; + + public Request(final String view, final SearchRequest searchRequest) { + super(searchRequest); + this.view = view; + } + + public Request(final StreamInput in) throws IOException { + super(in); + view = in.readString(); + } + + public String getView() { + return view; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Request that = (Request) o; + return view.equals(that.view) && super.equals(that); + } + + @Override + public int hashCode() { + return Objects.hash(view, super.hashCode()); + } + + @Override + public ActionRequestValidationException validate() { + final Function<String, String> unsupported = (String x) -> x + " is not supported when searching views"; + ActionRequestValidationException validationException = super.validate(); + + if (scroll() != null) { + validationException = addValidationError(unsupported.apply("Scroll"), validationException); + } + + // TODO: Filter out any additional search features that are not supported. + // Required before removing @ExperimentalApi annotations. + + if (Strings.isNullOrEmpty(view)) { + validationException = addValidationError("View is required", validationException); + } + + return validationException; + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(view); + } + + @Override + public String toString() { + return super.toString().replace("SearchRequest{", "SearchViewAction.Request{view=" + view + ","); + } + } + + /** + * Transport Action for searching a View + */ + public static class TransportAction extends HandledTransportAction<Request, SearchResponse> { + + private final ViewService viewService; + + @Inject + public TransportAction(final TransportService transportService, final ActionFilters actionFilters, final ViewService viewService) { + super(NAME, transportService, actionFilters, Request::new); + this.viewService = viewService; + } + + @Override + protected void doExecute(final Task task, final Request request, final ActionListener<SearchResponse> listener) { + viewService.searchView(request, listener); + } + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/view/UpdateViewAction.java b/server/src/main/java/org/opensearch/action/admin/indices/view/UpdateViewAction.java new file mode 100644 index 0000000000000..9182684c73a0b --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/view/UpdateViewAction.java @@ -0,0 +1,121 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.action.ActionType; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.metadata.View; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.List; + +/** Action to update a view */ +@ExperimentalApi +public class UpdateViewAction extends ActionType<GetViewAction.Response> { + + public static final UpdateViewAction INSTANCE = new UpdateViewAction(); + public static final String NAME = "cluster:admin/views/update"; + + public UpdateViewAction() { + super(NAME, GetViewAction.Response::new); + } + + /** Request for update view */ + @ExperimentalApi + public static class Request { + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser<CreateViewAction.Request, String> PARSER = new ConstructingObjectParser<>( + "create_view_request", + false, + (args, viewName) -> new CreateViewAction.Request(viewName, (String) args[0], (List<CreateViewAction.Request.Target>) args[1]) + ); + + static { + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), View.DESCRIPTION_FIELD); + PARSER.declareObjectArray( + ConstructingObjectParser.constructorArg(), + (p, c) -> CreateViewAction.Request.Target.fromXContent(p), + View.TARGETS_FIELD + ); + } + + public static CreateViewAction.Request fromXContent(final XContentParser parser, final String viewName) throws IOException { + return PARSER.parse(parser, viewName); + } + } + + /** + * Transport Action for updating a View + */ + @ExperimentalApi + public static class TransportAction extends TransportClusterManagerNodeAction<CreateViewAction.Request, GetViewAction.Response> { + + private final ViewService viewService; + + @Inject + public TransportAction( + final TransportService transportService, + final ClusterService clusterService, + final ThreadPool threadPool, + final ActionFilters actionFilters, + final IndexNameExpressionResolver indexNameExpressionResolver, + final ViewService viewService + ) { + super( + NAME, + transportService, + clusterService, + threadPool, + actionFilters, + CreateViewAction.Request::new, + indexNameExpressionResolver + ); + this.viewService = viewService; + } + + @Override + protected String executor() { + return ThreadPool.Names.MANAGEMENT; + } + + @Override + protected GetViewAction.Response read(final StreamInput in) throws IOException { + return new GetViewAction.Response(in); + } + + @Override + protected void clusterManagerOperation( + final CreateViewAction.Request request, + final ClusterState state, + final ActionListener<GetViewAction.Response> listener + ) throws Exception { + viewService.updateView(request, listener); + } + + @Override + protected ClusterBlockException checkBlock(final CreateViewAction.Request request, final ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + } + +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/view/ViewAlreadyExistsException.java b/server/src/main/java/org/opensearch/action/admin/indices/view/ViewAlreadyExistsException.java new file mode 100644 index 0000000000000..90a69158286b4 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/view/ViewAlreadyExistsException.java @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.ResourceAlreadyExistsException; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.common.io.stream.StreamInput; + +import java.io.IOException; + +/** Exception thrown when a view already exists */ +@ExperimentalApi +public class ViewAlreadyExistsException extends ResourceAlreadyExistsException { + + public ViewAlreadyExistsException(final String viewName) { + super("View [{}] already exists", viewName); + } + + public ViewAlreadyExistsException(final StreamInput in) throws IOException { + super(in); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/view/ViewNotFoundException.java b/server/src/main/java/org/opensearch/action/admin/indices/view/ViewNotFoundException.java new file mode 100644 index 0000000000000..3a90e6b0bc791 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/view/ViewNotFoundException.java @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.ResourceNotFoundException; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.common.io.stream.StreamInput; + +import java.io.IOException; + +/** Exception thrown when a view is not found */ +@ExperimentalApi +public class ViewNotFoundException extends ResourceNotFoundException { + + public ViewNotFoundException(final String viewName) { + super("View [{}] does not exist", viewName); + } + + public ViewNotFoundException(final StreamInput in) throws IOException { + super(in); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/view/ViewService.java b/server/src/main/java/org/opensearch/action/admin/indices/view/ViewService.java new file mode 100644 index 0000000000000..294f88decba1f --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/view/ViewService.java @@ -0,0 +1,178 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.search.SearchAction; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.client.node.NodeClient; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.ClusterStateUpdateTask; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.View; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.action.ActionListener; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.TreeSet; +import java.util.function.LongSupplier; +import java.util.stream.Collectors; + +/** Service to interact with views, create, retrieve, update, and delete */ +@ExperimentalApi +public class ViewService { + + private final static Logger LOG = LogManager.getLogger(ViewService.class); + private final ClusterService clusterService; + private final NodeClient client; + private final LongSupplier timeProvider; + + public ViewService(final ClusterService clusterService, final NodeClient client, final LongSupplier timeProvider) { + this.clusterService = clusterService; + this.client = client; + this.timeProvider = Optional.ofNullable(timeProvider).orElse(System::currentTimeMillis); + } + + public void createView(final CreateViewAction.Request request, final ActionListener<GetViewAction.Response> listener) { + final long currentTime = timeProvider.getAsLong(); + + final List<View.Target> targets = request.getTargets() + .stream() + .map(target -> new View.Target(target.getIndexPattern())) + .collect(Collectors.toList()); + final View view = new View(request.getName(), request.getDescription(), currentTime, currentTime, new TreeSet<>(targets)); + + createOrUpdateView(Operation.CreateView, view, listener); + } + + public void updateView(final CreateViewAction.Request request, final ActionListener<GetViewAction.Response> listener) { + final View originalView = getViewOrThrowException(request.getName()); + + final long currentTime = timeProvider.getAsLong(); + final List<View.Target> targets = request.getTargets() + .stream() + .map(target -> new View.Target(target.getIndexPattern())) + .collect(Collectors.toList()); + final View updatedView = new View( + request.getName(), + request.getDescription(), + originalView.getCreatedAt(), + currentTime, + new TreeSet<>(targets) + ); + + createOrUpdateView(Operation.UpdateView, updatedView, listener); + } + + public void deleteView(final DeleteViewAction.Request request, final ActionListener<AcknowledgedResponse> listener) { + getViewOrThrowException(request.getName()); + + clusterService.submitStateUpdateTask("delete_view_task", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(final ClusterState currentState) throws Exception { + return new ClusterState.Builder(clusterService.state()).metadata( + Metadata.builder(currentState.metadata()).removeView(request.getName()) + ).build(); + } + + @Override + public void onFailure(final String source, final Exception e) { + LOG.error("Unable to delete view, from " + source, e); + listener.onFailure(e); + } + + @Override + public void clusterStateProcessed(final String source, final ClusterState oldState, final ClusterState newState) { + listener.onResponse(new AcknowledgedResponse(true)); + } + }); + } + + public void getView(final GetViewAction.Request request, final ActionListener<GetViewAction.Response> listener) { + final View view = getViewOrThrowException(request.getName()); + + listener.onResponse(new GetViewAction.Response(view)); + } + + public void listViewNames(final ActionListener<ListViewNamesAction.Response> listener) { + final List<String> viewNames = new ArrayList<>( + Optional.ofNullable(clusterService) + .map(ClusterService::state) + .map(ClusterState::metadata) + .map(Metadata::views) + .map(Map::keySet) + .orElseThrow() + ); + + listener.onResponse(new ListViewNamesAction.Response(viewNames)); + } + + public void searchView(final SearchViewAction.Request request, final ActionListener<SearchResponse> listener) { + final View view = getViewOrThrowException(request.getView()); + + final String[] indices = view.getTargets().stream().map(View.Target::getIndexPattern).toArray(String[]::new); + request.indices(indices); + + client.executeLocally(SearchAction.INSTANCE, request, listener); + } + + View getViewOrThrowException(final String viewName) { + return Optional.ofNullable(clusterService) + .map(ClusterService::state) + .map(ClusterState::metadata) + .map(Metadata::views) + .map(views -> views.get(viewName)) + .orElseThrow(() -> new ViewNotFoundException(viewName)); + } + + private enum Operation { + CreateView("create", false), + UpdateView("update", true); + + private final String name; + private final boolean allowOverriding; + + Operation(final String name, final boolean allowOverriding) { + this.name = name; + this.allowOverriding = allowOverriding; + } + } + + private void createOrUpdateView(final Operation operation, final View view, final ActionListener<GetViewAction.Response> listener) { + clusterService.submitStateUpdateTask(operation.name + "_view_task", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(final ClusterState currentState) throws Exception { + if (!operation.allowOverriding && currentState.metadata().views().containsKey(view.getName())) { + throw new ViewAlreadyExistsException(view.getName()); + } + return new ClusterState.Builder(clusterService.state()).metadata(Metadata.builder(currentState.metadata()).put(view)) + .build(); + } + + @Override + public void onFailure(final String source, final Exception e) { + LOG.error("Unable to " + operation.name + " view, from " + source, e); + listener.onFailure(e); + } + + @Override + public void clusterStateProcessed(final String source, final ClusterState oldState, final ClusterState newState) { + final View createdView = newState.getMetadata().views().get(view.getName()); + final GetViewAction.Response response = new GetViewAction.Response(createdView); + listener.onResponse(response); + } + }); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/view/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/view/package-info.java new file mode 100644 index 0000000000000..db0556b1bf334 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/view/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** View transport handlers. */ +package org.opensearch.action.admin.indices.view; diff --git a/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java b/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java index b15c69a41972f..9ec41fdca585d 100644 --- a/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java +++ b/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java @@ -583,4 +583,19 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + @Override + public String toString() { + return "MultiGetRequest{" + + "preference='" + + preference + + '\'' + + ", realtime=" + + realtime + + ", refresh=" + + refresh + + ", items=" + + items + + '}'; + } + } diff --git a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java index 5b41c2a13b596..0520a4a7aecec 100644 --- a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java @@ -58,6 +58,10 @@ import org.opensearch.search.internal.SearchContext; import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.search.pipeline.PipelinedRequest; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanCreationContext; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.transport.Transport; import java.util.ArrayDeque; @@ -116,8 +120,10 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten private final Map<String, PendingExecutions> pendingExecutionsPerNode = new ConcurrentHashMap<>(); private final boolean throttleConcurrentRequests; private final SearchRequestContext searchRequestContext; + private final Tracer tracer; private SearchPhase currentPhase; + private boolean currentPhaseHasLifecycle; private final List<Releasable> releasables = new ArrayList<>(); @@ -139,7 +145,8 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten SearchPhaseResults<Result> resultConsumer, int maxConcurrentRequestsPerNode, SearchResponse.Clusters clusters, - SearchRequestContext searchRequestContext + SearchRequestContext searchRequestContext, + Tracer tracer ) { super(name); final List<SearchShardIterator> toSkipIterators = new ArrayList<>(); @@ -176,6 +183,7 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten this.results = resultConsumer; this.clusters = clusters; this.searchRequestContext = searchRequestContext; + this.tracer = tracer; } @Override @@ -220,6 +228,7 @@ public final void start() { null ) ); + onRequestEnd(searchRequestContext); return; } executePhase(this); @@ -285,6 +294,7 @@ private void performPhaseOnShard(final int shardIndex, final SearchShardIterator Runnable r = () -> { final Thread thread = Thread.currentThread(); try { + final SearchPhase phase = this; executePhaseOnShard(shardIt, shard, new SearchActionListener<Result>(shard, shardIndex) { @Override public void innerOnResponse(Result result) { @@ -298,7 +308,12 @@ public void innerOnResponse(Result result) { @Override public void onFailure(Exception t) { try { - onShardFailure(shardIndex, shard, shardIt, t); + // It only happens when onPhaseDone() is called and executePhaseOnShard() fails hard with an exception. + if (totalOps.get() == expectedTotalOps) { + onPhaseFailure(phase, "The phase has failed", t); + } else { + onShardFailure(shardIndex, shard, shardIt, t); + } } finally { executeNext(pendingExecutions, thread); } @@ -436,12 +451,16 @@ private void onPhaseEnd(SearchRequestContext searchRequestContext) { long tookInNanos = System.nanoTime() - getCurrentPhase().getStartTimeInNanos(); searchRequestContext.updatePhaseTookMap(getCurrentPhase().getName(), TimeUnit.NANOSECONDS.toMillis(tookInNanos)); } - this.searchRequestContext.getSearchRequestOperationsListener().onPhaseEnd(this, searchRequestContext); + if (currentPhaseHasLifecycle) { + this.searchRequestContext.getSearchRequestOperationsListener().onPhaseEnd(this, searchRequestContext); + } } private void onPhaseStart(SearchPhase phase) { setCurrentPhase(phase); - this.searchRequestContext.getSearchRequestOperationsListener().onPhaseStart(this); + if (currentPhaseHasLifecycle) { + this.searchRequestContext.getSearchRequestOperationsListener().onPhaseStart(this); + } } private void onRequestEnd(SearchRequestContext searchRequestContext) { @@ -449,14 +468,24 @@ private void onRequestEnd(SearchRequestContext searchRequestContext) { } private void executePhase(SearchPhase phase) { - try { + Span phaseSpan = tracer.startSpan(SpanCreationContext.server().name("[phase/" + phase.getName() + "]")); + try (final SpanScope scope = tracer.withSpanInScope(phaseSpan)) { onPhaseStart(phase); phase.recordAndRun(); } catch (Exception e) { if (logger.isDebugEnabled()) { logger.debug(new ParameterizedMessage("Failed to execute [{}] while moving to [{}] phase", request, phase.getName()), e); } + + if (currentPhaseHasLifecycle == false) { + phaseSpan.setError(e); + } + onPhaseFailure(phase, "", e); + } finally { + if (currentPhaseHasLifecycle == false) { + phaseSpan.endSpan(); + } } } @@ -635,6 +664,12 @@ public SearchPhase getCurrentPhase() { private void setCurrentPhase(SearchPhase phase) { currentPhase = phase; + // The WrappingSearchAsyncActionPhase (see please CanMatchPreFilterSearchPhase as one example) is a special case + // of search phase that wraps SearchAsyncActionPhase as SearchPhase. The AbstractSearchAsyncAction manages own + // onPhaseStart / onPhaseFailure / OnPhaseDone callbacks and the wrapping SearchPhase is being abandoned + // (fe, has no onPhaseEnd callbacks called ever). To fix that, we would not send any notifications for this + // phase. + currentPhaseHasLifecycle = ((phase instanceof WrappingSearchAsyncActionPhase) == false); } @Override @@ -714,7 +749,9 @@ public void sendSearchResponse(InternalSearchResponse internalSearchResponse, At @Override public final void onPhaseFailure(SearchPhase phase, String msg, Throwable cause) { - this.searchRequestContext.getSearchRequestOperationsListener().onPhaseFailure(this); + if (currentPhaseHasLifecycle) { + this.searchRequestContext.getSearchRequestOperationsListener().onPhaseFailure(this, cause); + } raisePhaseFailure(new SearchPhaseExecutionException(phase.getName(), msg, cause, buildShardFailures())); } diff --git a/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java b/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java index c693eea4a2c33..952d83b9e4539 100644 --- a/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java +++ b/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java @@ -44,6 +44,7 @@ import org.opensearch.search.sort.FieldSortBuilder; import org.opensearch.search.sort.MinAndMax; import org.opensearch.search.sort.SortOrder; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.transport.Transport; import java.util.Comparator; @@ -91,7 +92,8 @@ final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction<CanMa SearchTask task, Function<GroupShardsIterator<SearchShardIterator>, SearchPhase> phaseFactory, SearchResponse.Clusters clusters, - SearchRequestContext searchRequestContext + SearchRequestContext searchRequestContext, + Tracer tracer ) { // We set max concurrent shard requests to the number of shards so no throttling happens for can_match requests super( @@ -112,7 +114,8 @@ final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction<CanMa new CanMatchSearchPhaseResults(shardsIts.size()), shardsIts.size(), clusters, - searchRequestContext + searchRequestContext, + tracer ); this.phaseFactory = phaseFactory; this.shardsIts = shardsIts; diff --git a/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java b/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java index 5f46e0c298de4..5b887b48f696e 100644 --- a/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java +++ b/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java @@ -398,4 +398,16 @@ public boolean shouldCancelChildrenOnCancellation() { } }; } + + @Override + public String toString() { + return "MultiSearchRequest{" + + "maxConcurrentSearchRequests=" + + maxConcurrentSearchRequests + + ", requests=" + + requests + + ", indicesOptions=" + + indicesOptions + + '}'; + } } diff --git a/server/src/main/java/org/opensearch/action/search/PutSearchPipelineTransportAction.java b/server/src/main/java/org/opensearch/action/search/PutSearchPipelineTransportAction.java index a92961cdc3fd9..903b7dfce09c0 100644 --- a/server/src/main/java/org/opensearch/action/search/PutSearchPipelineTransportAction.java +++ b/server/src/main/java/org/opensearch/action/search/PutSearchPipelineTransportAction.java @@ -82,7 +82,7 @@ protected void clusterManagerOperation( ClusterState state, ActionListener<AcknowledgedResponse> listener ) throws Exception { - NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(); + NodesInfoRequest nodesInfoRequest = new NodesInfoRequest().clear().addMetric(NodesInfoRequest.Metric.SEARCH_PIPELINES.metricName()); client.admin().cluster().nodesInfo(nodesInfoRequest, ActionListener.wrap(nodeInfos -> { Map<DiscoveryNode, SearchPipelineInfo> searchPipelineInfos = new HashMap<>(); for (NodeInfo nodeInfo : nodeInfos.getNodes()) { diff --git a/server/src/main/java/org/opensearch/action/search/SearchDfsQueryThenFetchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/SearchDfsQueryThenFetchAsyncAction.java index cad092f884f53..87c5dd034a17e 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchDfsQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/SearchDfsQueryThenFetchAsyncAction.java @@ -41,6 +41,7 @@ import org.opensearch.search.dfs.AggregatedDfs; import org.opensearch.search.dfs.DfsSearchResult; import org.opensearch.search.internal.AliasFilter; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.transport.Transport; import java.util.List; @@ -77,7 +78,8 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction final ClusterState clusterState, final SearchTask task, SearchResponse.Clusters clusters, - SearchRequestContext searchRequestContext + SearchRequestContext searchRequestContext, + final Tracer tracer ) { super( SearchPhaseName.DFS_PRE_QUERY.getName(), @@ -97,7 +99,8 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction new ArraySearchPhaseResults<>(shardsIts.size()), request.getMaxConcurrentShardRequests(), clusters, - searchRequestContext + searchRequestContext, + tracer ); this.queryPhaseResultConsumer = queryPhaseResultConsumer; this.searchPhaseController = searchPhaseController; diff --git a/server/src/main/java/org/opensearch/action/search/SearchQueryCounters.java b/server/src/main/java/org/opensearch/action/search/SearchQueryCounters.java index bbb883809b41b..a8a7e352b89dc 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchQueryCounters.java +++ b/server/src/main/java/org/opensearch/action/search/SearchQueryCounters.java @@ -8,39 +8,14 @@ package org.opensearch.action.search; -import org.opensearch.index.query.BoolQueryBuilder; -import org.opensearch.index.query.BoostingQueryBuilder; -import org.opensearch.index.query.ConstantScoreQueryBuilder; -import org.opensearch.index.query.DisMaxQueryBuilder; -import org.opensearch.index.query.DistanceFeatureQueryBuilder; -import org.opensearch.index.query.ExistsQueryBuilder; -import org.opensearch.index.query.FieldMaskingSpanQueryBuilder; -import org.opensearch.index.query.FuzzyQueryBuilder; -import org.opensearch.index.query.GeoBoundingBoxQueryBuilder; -import org.opensearch.index.query.GeoDistanceQueryBuilder; -import org.opensearch.index.query.GeoPolygonQueryBuilder; -import org.opensearch.index.query.GeoShapeQueryBuilder; -import org.opensearch.index.query.IntervalQueryBuilder; -import org.opensearch.index.query.MatchAllQueryBuilder; -import org.opensearch.index.query.MatchPhraseQueryBuilder; -import org.opensearch.index.query.MatchQueryBuilder; -import org.opensearch.index.query.MultiMatchQueryBuilder; -import org.opensearch.index.query.PrefixQueryBuilder; import org.opensearch.index.query.QueryBuilder; -import org.opensearch.index.query.QueryStringQueryBuilder; -import org.opensearch.index.query.RangeQueryBuilder; -import org.opensearch.index.query.RegexpQueryBuilder; -import org.opensearch.index.query.ScriptQueryBuilder; -import org.opensearch.index.query.SimpleQueryStringBuilder; -import org.opensearch.index.query.TermQueryBuilder; -import org.opensearch.index.query.WildcardQueryBuilder; -import org.opensearch.index.query.functionscore.FunctionScoreQueryBuilder; import org.opensearch.telemetry.metrics.Counter; import org.opensearch.telemetry.metrics.MetricsRegistry; import org.opensearch.telemetry.metrics.tags.Tags; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; /** * Class contains all the Counters related to search query types. @@ -49,244 +24,47 @@ final class SearchQueryCounters { private static final String LEVEL_TAG = "level"; private static final String UNIT = "1"; private final MetricsRegistry metricsRegistry; - - // Counters related to Query types public final Counter aggCounter; - public final Counter boolCounter; - public final Counter boostingCounter; - public final Counter constantScoreCounter; - public final Counter disMaxCounter; - public final Counter distanceFeatureCounter; - public final Counter existsCounter; - public final Counter fieldMaskingSpanCounter; - public final Counter functionScoreCounter; - public final Counter fuzzyCounter; - public final Counter geoBoundingBoxCounter; - public final Counter geoDistanceCounter; - public final Counter geoPolygonCounter; - public final Counter geoShapeCounter; - public final Counter intervalCounter; - public final Counter matchCounter; - public final Counter matchallCounter; - public final Counter matchPhrasePrefixCounter; - public final Counter multiMatchCounter; public final Counter otherQueryCounter; - public final Counter prefixCounter; - public final Counter queryStringCounter; - public final Counter rangeCounter; - public final Counter regexpCounter; - public final Counter scriptCounter; - public final Counter simpleQueryStringCounter; public final Counter sortCounter; - public final Counter skippedCounter; - public final Counter termCounter; - public final Counter totalCounter; - public final Counter wildcardCounter; - public final Counter numberOfInputFieldsCounter; private final Map<Class<? extends QueryBuilder>, Counter> queryHandlers; + public final ConcurrentHashMap<String, Counter> nameToQueryTypeCounters; public SearchQueryCounters(MetricsRegistry metricsRegistry) { this.metricsRegistry = metricsRegistry; + this.nameToQueryTypeCounters = new ConcurrentHashMap<>(); this.aggCounter = metricsRegistry.createCounter( "search.query.type.agg.count", "Counter for the number of top level agg search queries", UNIT ); - this.boolCounter = metricsRegistry.createCounter( - "search.query.type.bool.count", - "Counter for the number of top level and nested bool search queries", - UNIT - ); - this.boostingCounter = metricsRegistry.createCounter( - "search.query.type.boost.count", - "Counter for the number of top level and nested boost search queries", - UNIT - ); - this.constantScoreCounter = metricsRegistry.createCounter( - "search.query.type.counstantscore.count", - "Counter for the number of top level and nested constant score search queries", - UNIT - ); - this.disMaxCounter = metricsRegistry.createCounter( - "search.query.type.dismax.count", - "Counter for the number of top level and nested disjuntion max search queries", - UNIT - ); - this.distanceFeatureCounter = metricsRegistry.createCounter( - "search.query.type.distancefeature.count", - "Counter for the number of top level and nested distance feature search queries", - UNIT - ); - this.existsCounter = metricsRegistry.createCounter( - "search.query.type.exists.count", - "Counter for the number of top level and nested exists search queries", - UNIT - ); - this.fieldMaskingSpanCounter = metricsRegistry.createCounter( - "search.query.type.fieldmaskingspan.count", - "Counter for the number of top level and nested field masking span search queries", - UNIT - ); - this.functionScoreCounter = metricsRegistry.createCounter( - "search.query.type.functionscore.count", - "Counter for the number of top level and nested function score search queries", - UNIT - ); - this.fuzzyCounter = metricsRegistry.createCounter( - "search.query.type.fuzzy.count", - "Counter for the number of top level and nested fuzzy search queries", - UNIT - ); - this.geoBoundingBoxCounter = metricsRegistry.createCounter( - "search.query.type.geoboundingbox.count", - "Counter for the number of top level and nested geo bounding box queries", - UNIT - ); - this.geoDistanceCounter = metricsRegistry.createCounter( - "search.query.type.geodistance.count", - "Counter for the number of top level and nested geo distance queries", - UNIT - ); - this.geoPolygonCounter = metricsRegistry.createCounter( - "search.query.type.geopolygon.count", - "Counter for the number of top level and nested geo polygon queries", - UNIT - ); - this.geoShapeCounter = metricsRegistry.createCounter( - "search.query.type.geoshape.count", - "Counter for the number of top level and nested geo shape queries", - UNIT - ); - this.intervalCounter = metricsRegistry.createCounter( - "search.query.type.interval.count", - "Counter for the number of top level and nested interval queries", - UNIT - ); - this.matchCounter = metricsRegistry.createCounter( - "search.query.type.match.count", - "Counter for the number of top level and nested match search queries", - UNIT - ); - this.matchallCounter = metricsRegistry.createCounter( - "search.query.type.matchall.count", - "Counter for the number of top level and nested match all search queries", - UNIT - ); - this.matchPhrasePrefixCounter = metricsRegistry.createCounter( - "search.query.type.matchphrase.count", - "Counter for the number of top level and nested match phrase prefix search queries", - UNIT - ); - this.multiMatchCounter = metricsRegistry.createCounter( - "search.query.type.multimatch.count", - "Counter for the number of top level and nested multi match search queries", - UNIT - ); this.otherQueryCounter = metricsRegistry.createCounter( "search.query.type.other.count", "Counter for the number of top level and nested search queries that do not match any other categories", UNIT ); - this.prefixCounter = metricsRegistry.createCounter( - "search.query.type.prefix.count", - "Counter for the number of top level and nested search queries that match prefix queries", - UNIT - ); - this.queryStringCounter = metricsRegistry.createCounter( - "search.query.type.querystringquery.count", - "Counter for the number of top level and nested queryStringQuery search queries", - UNIT - ); - this.rangeCounter = metricsRegistry.createCounter( - "search.query.type.range.count", - "Counter for the number of top level and nested range search queries", - UNIT - ); - this.regexpCounter = metricsRegistry.createCounter( - "search.query.type.regex.count", - "Counter for the number of top level and nested regex search queries", - UNIT - ); - this.scriptCounter = metricsRegistry.createCounter( - "search.query.type.script.count", - "Counter for the number of top level and nested script search queries", - UNIT - ); - this.simpleQueryStringCounter = metricsRegistry.createCounter( - "search.query.type.simplequerystring.count", - "Counter for the number of top level and nested script simple query string search queries", - UNIT - ); - this.skippedCounter = metricsRegistry.createCounter( - "search.query.type.skipped.count", - "Counter for the number queries skipped due to error", - UNIT - ); this.sortCounter = metricsRegistry.createCounter( "search.query.type.sort.count", "Counter for the number of top level sort search queries", UNIT ); - this.termCounter = metricsRegistry.createCounter( - "search.query.type.term.count", - "Counter for the number of top level and nested term search queries", - UNIT - ); - this.totalCounter = metricsRegistry.createCounter( - "search.query.type.total.count", - "Counter for the number of top level and nested search queries", - UNIT - ); - this.wildcardCounter = metricsRegistry.createCounter( - "search.query.type.wildcard.count", - "Counter for the number of top level and nested wildcard search queries", - UNIT - ); - this.numberOfInputFieldsCounter = metricsRegistry.createCounter( - "search.query.type.numberofinputfields.count", - "Counter for the number of input fields in the search queries", - UNIT - ); this.queryHandlers = new HashMap<>(); - initializeQueryHandlers(); + } public void incrementCounter(QueryBuilder queryBuilder, int level) { - Counter counter = queryHandlers.get(queryBuilder.getClass()); - if (counter != null) { - counter.add(1, Tags.create().addTag(LEVEL_TAG, level)); - } else { - otherQueryCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); - } - } + String uniqueQueryCounterName = queryBuilder.getName(); - private void initializeQueryHandlers() { + Counter counter = nameToQueryTypeCounters.computeIfAbsent(uniqueQueryCounterName, k -> createQueryCounter(k)); + counter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } - queryHandlers.put(BoolQueryBuilder.class, boolCounter); - queryHandlers.put(FunctionScoreQueryBuilder.class, functionScoreCounter); - queryHandlers.put(MatchQueryBuilder.class, matchCounter); - queryHandlers.put(MatchPhraseQueryBuilder.class, matchPhrasePrefixCounter); - queryHandlers.put(MultiMatchQueryBuilder.class, multiMatchCounter); - queryHandlers.put(QueryStringQueryBuilder.class, queryStringCounter); - queryHandlers.put(RangeQueryBuilder.class, rangeCounter); - queryHandlers.put(RegexpQueryBuilder.class, regexpCounter); - queryHandlers.put(TermQueryBuilder.class, termCounter); - queryHandlers.put(WildcardQueryBuilder.class, wildcardCounter); - queryHandlers.put(BoostingQueryBuilder.class, boostingCounter); - queryHandlers.put(ConstantScoreQueryBuilder.class, constantScoreCounter); - queryHandlers.put(DisMaxQueryBuilder.class, disMaxCounter); - queryHandlers.put(DistanceFeatureQueryBuilder.class, distanceFeatureCounter); - queryHandlers.put(ExistsQueryBuilder.class, existsCounter); - queryHandlers.put(FieldMaskingSpanQueryBuilder.class, fieldMaskingSpanCounter); - queryHandlers.put(FuzzyQueryBuilder.class, fuzzyCounter); - queryHandlers.put(GeoBoundingBoxQueryBuilder.class, geoBoundingBoxCounter); - queryHandlers.put(GeoDistanceQueryBuilder.class, geoDistanceCounter); - queryHandlers.put(GeoPolygonQueryBuilder.class, geoPolygonCounter); - queryHandlers.put(GeoShapeQueryBuilder.class, geoShapeCounter); - queryHandlers.put(IntervalQueryBuilder.class, intervalCounter); - queryHandlers.put(MatchAllQueryBuilder.class, matchallCounter); - queryHandlers.put(PrefixQueryBuilder.class, prefixCounter); - queryHandlers.put(ScriptQueryBuilder.class, scriptCounter); - queryHandlers.put(SimpleQueryStringBuilder.class, simpleQueryStringCounter); + private Counter createQueryCounter(String counterName) { + Counter counter = metricsRegistry.createCounter( + "search.query.type." + counterName + ".count", + "Counter for the number of top level and nested " + counterName + " search queries", + UNIT + ); + return counter; } } diff --git a/server/src/main/java/org/opensearch/action/search/SearchQueryThenFetchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/SearchQueryThenFetchAsyncAction.java index c26bd5eef8c15..c8ab5fdaf61a1 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/SearchQueryThenFetchAsyncAction.java @@ -43,6 +43,7 @@ import org.opensearch.search.internal.SearchContext; import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.search.query.QuerySearchResult; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.transport.Transport; import java.util.Map; @@ -82,7 +83,8 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<SearchPh ClusterState clusterState, SearchTask task, SearchResponse.Clusters clusters, - SearchRequestContext searchRequestContext + SearchRequestContext searchRequestContext, + final Tracer tracer ) { super( SearchPhaseName.QUERY.getName(), @@ -102,7 +104,8 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<SearchPh resultConsumer, request.getMaxConcurrentShardRequests(), clusters, - searchRequestContext + searchRequestContext, + tracer ); this.topDocsSize = SearchPhaseController.getTopDocsSize(request); this.trackTotalHitsUpTo = request.resolveTrackTotalHitsUpTo(); diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequest.java b/server/src/main/java/org/opensearch/action/search/SearchRequest.java index 96cea17ff4972..f738c182c06da 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequest.java @@ -359,7 +359,7 @@ boolean isFinalReduce() { * request. When created through {@link #subSearchRequest(SearchRequest, String[], String, long, boolean)}, this method returns * the provided current time, otherwise it will return {@link System#currentTimeMillis()}. */ - long getOrCreateAbsoluteStartMillis() { + public long getOrCreateAbsoluteStartMillis() { return absoluteStartMillis == DEFAULT_ABSOLUTE_START_MILLIS ? System.currentTimeMillis() : absoluteStartMillis; } diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java b/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java index e949c5e0bea29..9dac827e7d518 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java @@ -406,6 +406,15 @@ public SearchRequestBuilder setTrackScores(boolean trackScores) { return this; } + /** + * Applies when fetching scores with named queries, and controls if scores will be tracked as well. + * Defaults to {@code false}. + */ + public SearchRequestBuilder setIncludeNamedQueriesScore(boolean includeNamedQueriesScore) { + sourceBuilder().includeNamedQueriesScores(includeNamedQueriesScore); + return this; + } + /** * Indicates if the total hit count for the query should be tracked. Requests will count total hit count accurately * up to 10,000 by default, see {@link #setTrackTotalHitsUpTo(int)} to change this value or set to true/false to always/never diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestContext.java b/server/src/main/java/org/opensearch/action/search/SearchRequestContext.java index eceac7204b196..b8bbde65ca6bc 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestContext.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestContext.java @@ -22,7 +22,7 @@ * @opensearch.internal */ @InternalApi -class SearchRequestContext { +public class SearchRequestContext { private final SearchRequestOperationsListener searchRequestOperationsListener; private long absoluteStartNanos; private final Map<String, Long> phaseTookMap; @@ -47,7 +47,7 @@ void updatePhaseTookMap(String phaseName, Long tookTime) { this.phaseTookMap.put(phaseName, tookTime); } - Map<String, Long> phaseTookMap() { + public Map<String, Long> phaseTookMap() { return phaseTookMap; } @@ -70,7 +70,7 @@ void setAbsoluteStartNanos(long absoluteStartNanos) { /** * Request start time in nanos */ - long getAbsoluteStartNanos() { + public long getAbsoluteStartNanos() { return absoluteStartNanos; } @@ -78,7 +78,7 @@ void setTotalHits(TotalHits totalHits) { this.totalHits = totalHits; } - TotalHits totalHits() { + public TotalHits totalHits() { return totalHits; } diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java b/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java index 2a09cc084f79f..53efade174502 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java @@ -22,6 +22,16 @@ @InternalApi public abstract class SearchRequestOperationsListener { private volatile boolean enabled; + public static final SearchRequestOperationsListener NOOP = new SearchRequestOperationsListener(false) { + @Override + protected void onPhaseStart(SearchPhaseContext context) {} + + @Override + protected void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} + + @Override + protected void onPhaseFailure(SearchPhaseContext context, Throwable cause) {} + }; protected SearchRequestOperationsListener() { this.enabled = true; @@ -31,21 +41,21 @@ protected SearchRequestOperationsListener(final boolean enabled) { this.enabled = enabled; } - abstract void onPhaseStart(SearchPhaseContext context); + protected abstract void onPhaseStart(SearchPhaseContext context); - abstract void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext); + protected abstract void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext); - abstract void onPhaseFailure(SearchPhaseContext context); + protected abstract void onPhaseFailure(SearchPhaseContext context, Throwable cause); - void onRequestStart(SearchRequestContext searchRequestContext) {} + protected void onRequestStart(SearchRequestContext searchRequestContext) {} - void onRequestEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} + protected void onRequestEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} - boolean isEnabled(SearchRequest searchRequest) { + protected boolean isEnabled(SearchRequest searchRequest) { return isEnabled(); } - boolean isEnabled() { + protected boolean isEnabled() { return enabled; } @@ -69,7 +79,7 @@ static final class CompositeListener extends SearchRequestOperationsListener { } @Override - void onPhaseStart(SearchPhaseContext context) { + protected void onPhaseStart(SearchPhaseContext context) { for (SearchRequestOperationsListener listener : listeners) { try { listener.onPhaseStart(context); @@ -80,7 +90,7 @@ void onPhaseStart(SearchPhaseContext context) { } @Override - void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + protected void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { for (SearchRequestOperationsListener listener : listeners) { try { listener.onPhaseEnd(context, searchRequestContext); @@ -91,10 +101,10 @@ void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestCo } @Override - void onPhaseFailure(SearchPhaseContext context) { + protected void onPhaseFailure(SearchPhaseContext context, Throwable cause) { for (SearchRequestOperationsListener listener : listeners) { try { - listener.onPhaseFailure(context); + listener.onPhaseFailure(context, cause); } catch (Exception e) { logger.warn(() -> new ParameterizedMessage("onPhaseFailure listener [{}] failed", listener), e); } @@ -102,7 +112,7 @@ void onPhaseFailure(SearchPhaseContext context) { } @Override - void onRequestStart(SearchRequestContext searchRequestContext) { + protected void onRequestStart(SearchRequestContext searchRequestContext) { for (SearchRequestOperationsListener listener : listeners) { try { listener.onRequestStart(searchRequestContext); diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestSlowLog.java b/server/src/main/java/org/opensearch/action/search/SearchRequestSlowLog.java index 7f25f9026f215..a9a07c6aca7f4 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestSlowLog.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestSlowLog.java @@ -134,19 +134,19 @@ public SearchRequestSlowLog(ClusterService clusterService) { } @Override - void onPhaseStart(SearchPhaseContext context) {} + protected void onPhaseStart(SearchPhaseContext context) {} @Override - void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} + protected void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} @Override - void onPhaseFailure(SearchPhaseContext context) {} + protected void onPhaseFailure(SearchPhaseContext context, Throwable cause) {} @Override - void onRequestStart(SearchRequestContext searchRequestContext) {} + protected void onRequestStart(SearchRequestContext searchRequestContext) {} @Override - void onRequestEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + protected void onRequestEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { long tookInNanos = System.nanoTime() - searchRequestContext.getAbsoluteStartNanos(); if (warnThreshold >= 0 && tookInNanos > warnThreshold && level.isLevelEnabledFor(SlowLogLevel.WARN)) { diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java index 88d599a0dcdaa..97ef94055faf7 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java @@ -58,12 +58,12 @@ public long getPhaseMetric(SearchPhaseName searchPhaseName) { } @Override - void onPhaseStart(SearchPhaseContext context) { + protected void onPhaseStart(SearchPhaseContext context) { phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()).current.inc(); } @Override - void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + protected void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { StatsHolder phaseStats = phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()); phaseStats.current.dec(); phaseStats.total.inc(); @@ -71,7 +71,7 @@ void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestCo } @Override - void onPhaseFailure(SearchPhaseContext context) { + protected void onPhaseFailure(SearchPhaseContext context, Throwable cause) { phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()).current.dec(); } diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java index 842c10b700d24..65cfd35489033 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java @@ -88,6 +88,12 @@ import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.Task; import org.opensearch.telemetry.metrics.MetricsRegistry; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanBuilder; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.telemetry.tracing.listener.TraceableActionListener; +import org.opensearch.telemetry.tracing.listener.TraceableSearchRequestOperationsListener; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.RemoteClusterAware; import org.opensearch.transport.RemoteClusterService; @@ -173,6 +179,7 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest, private final CircuitBreaker circuitBreaker; private final SearchPipelineService searchPipelineService; private final SearchRequestOperationsCompositeListenerFactory searchRequestOperationsCompositeListenerFactory; + private final Tracer tracer; private volatile boolean searchQueryMetricsEnabled; @@ -195,7 +202,8 @@ public TransportSearchAction( NamedWriteableRegistry namedWriteableRegistry, SearchPipelineService searchPipelineService, MetricsRegistry metricsRegistry, - SearchRequestOperationsCompositeListenerFactory searchRequestOperationsCompositeListenerFactory + SearchRequestOperationsCompositeListenerFactory searchRequestOperationsCompositeListenerFactory, + Tracer tracer ) { super(SearchAction.NAME, transportService, actionFilters, (Writeable.Reader<SearchRequest>) SearchRequest::new); this.client = client; @@ -215,6 +223,7 @@ public TransportSearchAction( this.searchRequestOperationsCompositeListenerFactory = searchRequestOperationsCompositeListenerFactory; clusterService.getClusterSettings() .addSettingsUpdateConsumer(SEARCH_QUERY_METRICS_ENABLED_SETTING, this::setSearchQueryMetricsEnabled); + this.tracer = tracer; } private void setSearchQueryMetricsEnabled(boolean searchQueryMetricsEnabled) { @@ -384,7 +393,8 @@ public AbstractSearchAsyncAction<? extends SearchPhaseResult> asyncSearchAction( new ArraySearchPhaseResults<>(shardsIts.size()), searchRequest.getMaxConcurrentShardRequests(), clusters, - searchRequestContext + searchRequestContext, + tracer ) { @Override protected void executePhaseOnShard( @@ -431,49 +441,58 @@ private void executeRequest( if (originalSearchRequest.isPhaseTook() == null) { originalSearchRequest.setPhaseTook(clusterService.getClusterSettings().get(SEARCH_PHASE_TOOK_ENABLED)); } - SearchRequestOperationsListener.CompositeListener requestOperationsListeners = searchRequestOperationsCompositeListenerFactory - .buildCompositeListener(originalSearchRequest, logger); - SearchRequestContext searchRequestContext = new SearchRequestContext(requestOperationsListeners, originalSearchRequest); - searchRequestContext.getSearchRequestOperationsListener().onRequestStart(searchRequestContext); - - PipelinedRequest searchRequest; - ActionListener<SearchResponse> listener; - try { - searchRequest = searchPipelineService.resolvePipeline(originalSearchRequest); - listener = searchRequest.transformResponseListener(originalListener); - } catch (Exception e) { - originalListener.onFailure(e); - return; - } - ActionListener<SearchRequest> requestTransformListener = ActionListener.wrap(sr -> { - if (searchQueryMetricsEnabled) { - try { - searchQueryCategorizer.categorize(sr.source()); - } catch (Exception e) { - logger.error("Error while trying to categorize the query.", e); - } + final Span requestSpan = tracer.startSpan(SpanBuilder.from(task, actionName)); + try (final SpanScope spanScope = tracer.withSpanInScope(requestSpan)) { + SearchRequestOperationsListener.CompositeListener requestOperationsListeners; + final ActionListener<SearchResponse> updatedListener = TraceableActionListener.create(originalListener, requestSpan, tracer); + requestOperationsListeners = searchRequestOperationsCompositeListenerFactory.buildCompositeListener( + originalSearchRequest, + logger, + TraceableSearchRequestOperationsListener.create(tracer, requestSpan) + ); + SearchRequestContext searchRequestContext = new SearchRequestContext(requestOperationsListeners, originalSearchRequest); + searchRequestContext.getSearchRequestOperationsListener().onRequestStart(searchRequestContext); + + PipelinedRequest searchRequest; + ActionListener<SearchResponse> listener; + try { + searchRequest = searchPipelineService.resolvePipeline(originalSearchRequest); + listener = searchRequest.transformResponseListener(updatedListener); + } catch (Exception e) { + updatedListener.onFailure(e); + return; } - ActionListener<SearchSourceBuilder> rewriteListener = buildRewriteListener( - sr, - task, - timeProvider, - searchAsyncActionProvider, - listener, - searchRequestContext - ); - if (sr.source() == null) { - rewriteListener.onResponse(sr.source()); - } else { - Rewriteable.rewriteAndFetch( - sr.source(), - searchService.getRewriteContext(timeProvider::getAbsoluteStartMillis), - rewriteListener + ActionListener<SearchRequest> requestTransformListener = ActionListener.wrap(sr -> { + if (searchQueryMetricsEnabled) { + try { + searchQueryCategorizer.categorize(sr.source()); + } catch (Exception e) { + logger.error("Error while trying to categorize the query.", e); + } + } + + ActionListener<SearchSourceBuilder> rewriteListener = buildRewriteListener( + sr, + task, + timeProvider, + searchAsyncActionProvider, + listener, + searchRequestContext ); - } - }, listener::onFailure); - searchRequest.transformRequest(requestTransformListener); + if (sr.source() == null) { + rewriteListener.onResponse(sr.source()); + } else { + Rewriteable.rewriteAndFetch( + sr.source(), + searchService.getRewriteContext(timeProvider::getAbsoluteStartMillis), + rewriteListener + ); + } + }, listener::onFailure); + searchRequest.transformRequest(requestTransformListener); + } } private ActionListener<SearchSourceBuilder> buildRewriteListener( @@ -1220,8 +1239,8 @@ private AbstractSearchAsyncAction<? extends SearchPhaseResult> searchAsyncAction timeProvider, clusterState, task, - (iter) -> { - AbstractSearchAsyncAction<? extends SearchPhaseResult> action = searchAsyncAction( + (iter) -> new WrappingSearchAsyncActionPhase( + searchAsyncAction( task, searchRequest, executor, @@ -1237,16 +1256,11 @@ private AbstractSearchAsyncAction<? extends SearchPhaseResult> searchAsyncAction threadPool, clusters, searchRequestContext - ); - return new SearchPhase(action.getName()) { - @Override - public void run() { - action.start(); - } - }; - }, + ) + ), clusters, - searchRequestContext + searchRequestContext, + tracer ); } else { final QueryPhaseResultConsumer queryResultConsumer = searchPhaseController.newSearchPhaseResults( @@ -1277,7 +1291,8 @@ public void run() { clusterState, task, clusters, - searchRequestContext + searchRequestContext, + tracer ); break; case QUERY_THEN_FETCH: @@ -1298,7 +1313,8 @@ public void run() { clusterState, task, clusters, - searchRequestContext + searchRequestContext, + tracer ); break; default: diff --git a/server/src/main/java/org/opensearch/action/search/WrappingSearchAsyncActionPhase.java b/server/src/main/java/org/opensearch/action/search/WrappingSearchAsyncActionPhase.java new file mode 100644 index 0000000000000..3c1ad52a1fe6a --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/WrappingSearchAsyncActionPhase.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.search.SearchPhaseResult; + +/** + * The WrappingSearchAsyncActionPhase (see please {@link CanMatchPreFilterSearchPhase} as one example) is a special case + * of search phase that wraps SearchAsyncActionPhase as {@link SearchPhase}. The {@link AbstractSearchAsyncAction} manages own + * onPhaseStart / onPhaseFailure / OnPhaseDone callbacks and but just wrapping it with the SearchPhase causes + * only some callbacks being called. The {@link AbstractSearchAsyncAction} has special treatment of {@link WrappingSearchAsyncActionPhase}. + */ +class WrappingSearchAsyncActionPhase extends SearchPhase { + private final AbstractSearchAsyncAction<? extends SearchPhaseResult> action; + + protected WrappingSearchAsyncActionPhase(AbstractSearchAsyncAction<? extends SearchPhaseResult> action) { + super(action.getName()); + this.action = action; + } + + @Override + public void run() { + action.start(); + } + + SearchPhase getSearchPhase() { + return action; + } +} diff --git a/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java b/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java index e43c42446de2c..485dd43a5999c 100644 --- a/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java +++ b/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java @@ -718,6 +718,7 @@ public final BootstrapCheckResult check(BootstrapContext context) { return BootstrapCheckResult.success(); } + @SuppressWarnings("removal") boolean isAllPermissionGranted() { final SecurityManager sm = System.getSecurityManager(); assert sm != null; diff --git a/server/src/main/java/org/opensearch/bootstrap/BootstrapInfo.java b/server/src/main/java/org/opensearch/bootstrap/BootstrapInfo.java index 0aa965ce46096..52dd5d710eedc 100644 --- a/server/src/main/java/org/opensearch/bootstrap/BootstrapInfo.java +++ b/server/src/main/java/org/opensearch/bootstrap/BootstrapInfo.java @@ -128,6 +128,7 @@ public Object remove(Object key) { /** * Returns a read-only view of all system properties */ + @SuppressWarnings("removal") public static Dictionary<Object, Object> getSystemProperties() { SecurityManager sm = System.getSecurityManager(); if (sm != null) { diff --git a/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java b/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java index 4d36efff0e192..8eb4f841b9671 100644 --- a/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java +++ b/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java @@ -83,6 +83,7 @@ class OpenSearch extends EnvironmentAwareCommand { /** * Main entry point for starting opensearch */ + @SuppressWarnings("removal") public static void main(final String[] args) throws Exception { overrideDnsCachePolicyProperties(); /* diff --git a/server/src/main/java/org/opensearch/bootstrap/OpenSearchPolicy.java b/server/src/main/java/org/opensearch/bootstrap/OpenSearchPolicy.java index 14435db64274c..4571eb35ca93c 100644 --- a/server/src/main/java/org/opensearch/bootstrap/OpenSearchPolicy.java +++ b/server/src/main/java/org/opensearch/bootstrap/OpenSearchPolicy.java @@ -53,6 +53,7 @@ * * @opensearch.internal **/ +@SuppressWarnings("removal") final class OpenSearchPolicy extends Policy { /** template policy file, the one used in tests */ diff --git a/server/src/main/java/org/opensearch/bootstrap/OpenSearchUncaughtExceptionHandler.java b/server/src/main/java/org/opensearch/bootstrap/OpenSearchUncaughtExceptionHandler.java index 2b28260097ce1..5f9a01436b4cb 100644 --- a/server/src/main/java/org/opensearch/bootstrap/OpenSearchUncaughtExceptionHandler.java +++ b/server/src/main/java/org/opensearch/bootstrap/OpenSearchUncaughtExceptionHandler.java @@ -98,6 +98,7 @@ void onNonFatalUncaught(final String threadName, final Throwable t) { Terminal.DEFAULT.flush(); } + @SuppressWarnings("removal") void halt(int status) { AccessController.doPrivileged(new PrivilegedHaltAction(status)); } diff --git a/server/src/main/java/org/opensearch/bootstrap/Security.java b/server/src/main/java/org/opensearch/bootstrap/Security.java index 749c146de4f16..53b1d990f9a0c 100644 --- a/server/src/main/java/org/opensearch/bootstrap/Security.java +++ b/server/src/main/java/org/opensearch/bootstrap/Security.java @@ -66,6 +66,8 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.regex.Matcher; +import java.util.regex.Pattern; import static org.opensearch.bootstrap.FilePermissionUtils.addDirectoryPath; import static org.opensearch.bootstrap.FilePermissionUtils.addSingleFilePath; @@ -119,7 +121,10 @@ * * @opensearch.internal */ +@SuppressWarnings("removal") final class Security { + private static final Pattern CODEBASE_JAR_WITH_CLASSIFIER = Pattern.compile("^(.+)-\\d+\\.\\d+[^-]*.*?[-]?([^-]+)?\\.jar$"); + /** no instantiation */ private Security() {} @@ -230,33 +235,45 @@ static Policy readPolicy(URL policyFile, Map<String, URL> codebases) { try { List<String> propertiesSet = new ArrayList<>(); try { + final Map<Map.Entry<String, URL>, String> jarsWithPossibleClassifiers = new HashMap<>(); // set codebase properties for (Map.Entry<String, URL> codebase : codebases.entrySet()) { - String name = codebase.getKey(); - URL url = codebase.getValue(); + final String name = codebase.getKey(); + final URL url = codebase.getValue(); // We attempt to use a versionless identifier for each codebase. This assumes a specific version // format in the jar filename. While we cannot ensure all jars in all plugins use this format, nonconformity // only means policy grants would need to include the entire jar filename as they always have before. + final Matcher matcher = CODEBASE_JAR_WITH_CLASSIFIER.matcher(name); + if (matcher.matches() && matcher.group(2) != null) { + // There is a JAR that, possibly, has a classifier or SNAPSHOT at the end, examples are: + // - netty-tcnative-boringssl-static-2.0.61.Final-linux-x86_64.jar + // - kafka-server-common-3.6.1-test.jar + // - lucene-core-9.11.0-snapshot-8a555eb.jar + // - zstd-jni-1.5.5-5.jar + jarsWithPossibleClassifiers.put(codebase, matcher.group(2)); + } else { + String property = "codebase." + name; + String aliasProperty = "codebase." + name.replaceFirst("-\\d+\\.\\d+.*\\.jar", ""); + addCodebaseToSystemProperties(propertiesSet, url, property, aliasProperty); + } + } + + // set codebase properties for JARs that might present with classifiers + for (Map.Entry<Map.Entry<String, URL>, String> jarWithPossibleClassifier : jarsWithPossibleClassifiers.entrySet()) { + final Map.Entry<String, URL> codebase = jarWithPossibleClassifier.getKey(); + final String name = codebase.getKey(); + final URL url = codebase.getValue(); + String property = "codebase." + name; String aliasProperty = "codebase." + name.replaceFirst("-\\d+\\.\\d+.*\\.jar", ""); - if (aliasProperty.equals(property) == false) { - propertiesSet.add(aliasProperty); - String previous = System.setProperty(aliasProperty, url.toString()); - if (previous != null) { - throw new IllegalStateException( - "codebase property already set: " + aliasProperty + " -> " + previous + ", cannot set to " + url.toString() - ); - } - } - propertiesSet.add(property); - String previous = System.setProperty(property, url.toString()); - if (previous != null) { - throw new IllegalStateException( - "codebase property already set: " + property + " -> " + previous + ", cannot set to " + url.toString() - ); + if (System.getProperties().containsKey(aliasProperty)) { + aliasProperty = aliasProperty + "@" + jarWithPossibleClassifier.getValue(); } + + addCodebaseToSystemProperties(propertiesSet, url, property, aliasProperty); } + return Policy.getInstance("JavaPolicy", new URIParameter(policyFile.toURI())); } finally { // clear codebase properties @@ -269,6 +286,27 @@ static Policy readPolicy(URL policyFile, Map<String, URL> codebases) { } } + /** adds the codebase to properties and System properties */ + @SuppressForbidden(reason = "accesses System properties to configure codebases") + private static void addCodebaseToSystemProperties(List<String> propertiesSet, final URL url, String property, String aliasProperty) { + if (aliasProperty.equals(property) == false) { + propertiesSet.add(aliasProperty); + String previous = System.setProperty(aliasProperty, url.toString()); + if (previous != null) { + throw new IllegalStateException( + "codebase property already set: " + aliasProperty + " -> " + previous + ", cannot set to " + url.toString() + ); + } + } + propertiesSet.add(property); + String previous = System.setProperty(property, url.toString()); + if (previous != null) { + throw new IllegalStateException( + "codebase property already set: " + property + " -> " + previous + ", cannot set to " + url.toString() + ); + } + } + /** returns dynamic Permissions to configured paths and bind ports */ static Permissions createPermissions(Environment environment) throws IOException { Permissions policy = new Permissions(); diff --git a/server/src/main/java/org/opensearch/client/Client.java b/server/src/main/java/org/opensearch/client/Client.java index f4ae383249f61..322b435bdf35c 100644 --- a/server/src/main/java/org/opensearch/client/Client.java +++ b/server/src/main/java/org/opensearch/client/Client.java @@ -34,6 +34,8 @@ import org.opensearch.action.admin.indices.segments.IndicesSegmentResponse; import org.opensearch.action.admin.indices.segments.PitSegmentsRequest; +import org.opensearch.action.admin.indices.view.ListViewNamesAction; +import org.opensearch.action.admin.indices.view.SearchViewAction; import org.opensearch.action.bulk.BulkRequest; import org.opensearch.action.bulk.BulkRequestBuilder; import org.opensearch.action.bulk.BulkResponse; @@ -466,6 +468,18 @@ public interface Client extends OpenSearchClient, Releasable { */ void fieldCaps(FieldCapabilitiesRequest request, ActionListener<FieldCapabilitiesResponse> listener); + /** Search a view */ + void searchView(final SearchViewAction.Request request, final ActionListener<SearchResponse> listener); + + /** Search a view */ + ActionFuture<SearchResponse> searchView(final SearchViewAction.Request request); + + /** List all view names */ + void listViewNames(final ListViewNamesAction.Request request, ActionListener<ListViewNamesAction.Response> listener); + + /** List all view names */ + ActionFuture<ListViewNamesAction.Response> listViewNames(final ListViewNamesAction.Request request); + /** * Returns this clients settings */ diff --git a/server/src/main/java/org/opensearch/client/IndicesAdminClient.java b/server/src/main/java/org/opensearch/client/IndicesAdminClient.java index 20dab1caa36c4..588584cd8a280 100644 --- a/server/src/main/java/org/opensearch/client/IndicesAdminClient.java +++ b/server/src/main/java/org/opensearch/client/IndicesAdminClient.java @@ -125,6 +125,9 @@ import org.opensearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.opensearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder; import org.opensearch.action.admin.indices.validate.query.ValidateQueryResponse; +import org.opensearch.action.admin.indices.view.CreateViewAction; +import org.opensearch.action.admin.indices.view.DeleteViewAction; +import org.opensearch.action.admin.indices.view.GetViewAction; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.metadata.IndexMetadata.APIBlock; import org.opensearch.common.Nullable; @@ -838,4 +841,28 @@ public interface IndicesAdminClient extends OpenSearchClient { * Resolves names and wildcard expressions to indices, aliases, and data streams */ ActionFuture<ResolveIndexAction.Response> resolveIndex(ResolveIndexAction.Request request); + + /** Create a view */ + void createView(CreateViewAction.Request request, ActionListener<GetViewAction.Response> listener); + + /** Create a view */ + ActionFuture<GetViewAction.Response> createView(CreateViewAction.Request request); + + /** Get the details of a view */ + void getView(GetViewAction.Request request, ActionListener<GetViewAction.Response> listener); + + /** Get the details of a view */ + ActionFuture<GetViewAction.Response> getView(GetViewAction.Request request); + + /** Delete a view */ + void deleteView(DeleteViewAction.Request request, ActionListener<AcknowledgedResponse> listener); + + /** Delete a view */ + ActionFuture<AcknowledgedResponse> deleteView(DeleteViewAction.Request request); + + /** Update a view */ + void updateView(CreateViewAction.Request request, ActionListener<GetViewAction.Response> listener); + + /** Update a view */ + ActionFuture<GetViewAction.Response> updateView(CreateViewAction.Request request); } diff --git a/server/src/main/java/org/opensearch/client/support/AbstractClient.java b/server/src/main/java/org/opensearch/client/support/AbstractClient.java index 786bfa38bb19c..6c6049f04231b 100644 --- a/server/src/main/java/org/opensearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/opensearch/client/support/AbstractClient.java @@ -312,6 +312,12 @@ import org.opensearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.opensearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder; import org.opensearch.action.admin.indices.validate.query.ValidateQueryResponse; +import org.opensearch.action.admin.indices.view.CreateViewAction; +import org.opensearch.action.admin.indices.view.DeleteViewAction; +import org.opensearch.action.admin.indices.view.GetViewAction; +import org.opensearch.action.admin.indices.view.ListViewNamesAction; +import org.opensearch.action.admin.indices.view.SearchViewAction; +import org.opensearch.action.admin.indices.view.UpdateViewAction; import org.opensearch.action.bulk.BulkAction; import org.opensearch.action.bulk.BulkRequest; import org.opensearch.action.bulk.BulkRequestBuilder; @@ -742,6 +748,26 @@ public FieldCapabilitiesRequestBuilder prepareFieldCaps(String... indices) { return new FieldCapabilitiesRequestBuilder(this, FieldCapabilitiesAction.INSTANCE, indices); } + @Override + public void searchView(final SearchViewAction.Request request, final ActionListener<SearchResponse> listener) { + execute(SearchViewAction.INSTANCE, request); + } + + @Override + public ActionFuture<SearchResponse> searchView(final SearchViewAction.Request request) { + return execute(SearchViewAction.INSTANCE, request); + } + + @Override + public void listViewNames(final ListViewNamesAction.Request request, ActionListener<ListViewNamesAction.Response> listener) { + execute(ListViewNamesAction.INSTANCE, request, listener); + } + + @Override + public ActionFuture<ListViewNamesAction.Response> listViewNames(final ListViewNamesAction.Request request) { + return execute(ListViewNamesAction.INSTANCE, request); + } + static class Admin implements AdminClient { private final ClusterAdmin clusterAdmin; @@ -2070,6 +2096,46 @@ public void resolveIndex(ResolveIndexAction.Request request, ActionListener<Reso public ActionFuture<ResolveIndexAction.Response> resolveIndex(ResolveIndexAction.Request request) { return execute(ResolveIndexAction.INSTANCE, request); } + + @Override + public void createView(CreateViewAction.Request request, ActionListener<GetViewAction.Response> listener) { + execute(CreateViewAction.INSTANCE, request); + } + + @Override + public ActionFuture<GetViewAction.Response> createView(CreateViewAction.Request request) { + return execute(CreateViewAction.INSTANCE, request); + } + + /** Gets a view */ + public void getView(GetViewAction.Request request, ActionListener<GetViewAction.Response> listener) { + execute(GetViewAction.INSTANCE, request, listener); + } + + /** Gets a view */ + public ActionFuture<GetViewAction.Response> getView(GetViewAction.Request request) { + return execute(GetViewAction.INSTANCE, request); + } + + /** Create a view */ + public void deleteView(DeleteViewAction.Request request, ActionListener<AcknowledgedResponse> listener) { + execute(DeleteViewAction.INSTANCE, request, listener); + } + + /** Create a view */ + public ActionFuture<AcknowledgedResponse> deleteView(DeleteViewAction.Request request) { + return execute(DeleteViewAction.INSTANCE, request); + } + + /** Create a view */ + public void updateView(CreateViewAction.Request request, ActionListener<GetViewAction.Response> listener) { + execute(UpdateViewAction.INSTANCE, request, listener); + } + + /** Create a view */ + public ActionFuture<GetViewAction.Response> updateView(CreateViewAction.Request request) { + return execute(UpdateViewAction.INSTANCE, request); + } } @Override diff --git a/server/src/main/java/org/opensearch/cluster/ClusterModule.java b/server/src/main/java/org/opensearch/cluster/ClusterModule.java index bad881f8bda76..b846d382db89d 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterModule.java @@ -49,6 +49,7 @@ import org.opensearch.cluster.metadata.MetadataMappingService; import org.opensearch.cluster.metadata.MetadataUpdateSettingsService; import org.opensearch.cluster.metadata.RepositoriesMetadata; +import org.opensearch.cluster.metadata.ViewMetadata; import org.opensearch.cluster.metadata.WeightedRoutingMetadata; import org.opensearch.cluster.routing.DelayedAllocationService; import org.opensearch.cluster.routing.allocation.AllocationService; @@ -68,6 +69,7 @@ import org.opensearch.cluster.routing.allocation.decider.NodeLoadAwareAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.NodeVersionAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.RebalanceOnlyWhenActiveAllocationDecider; +import org.opensearch.cluster.routing.allocation.decider.RemoteStoreMigrationAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.ResizeAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.RestoreInProgressAllocationDecider; @@ -82,6 +84,7 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.set.Sets; import org.opensearch.core.ParseField; @@ -195,6 +198,7 @@ public static List<Entry> getNamedWriteables() { ComposableIndexTemplateMetadata::readDiffFrom ); registerMetadataCustom(entries, DataStreamMetadata.TYPE, DataStreamMetadata::new, DataStreamMetadata::readDiffFrom); + registerMetadataCustom(entries, ViewMetadata.TYPE, ViewMetadata::new, ViewMetadata::readDiffFrom); registerMetadataCustom(entries, WeightedRoutingMetadata.TYPE, WeightedRoutingMetadata::new, WeightedRoutingMetadata::readDiffFrom); registerMetadataCustom( entries, @@ -292,6 +296,7 @@ public static List<NamedXContentRegistry.Entry> getNamedXWriteables() { DataStreamMetadata::fromXContent ) ); + entries.add(new NamedXContentRegistry.Entry(Metadata.Custom.class, new ParseField(ViewMetadata.TYPE), ViewMetadata::fromXContent)); entries.add( new NamedXContentRegistry.Entry( Metadata.Custom.class, @@ -370,6 +375,9 @@ public static Collection<AllocationDecider> createAllocationDeciders( addAllocationDecider(deciders, new AwarenessAllocationDecider(settings, clusterSettings)); addAllocationDecider(deciders, new NodeLoadAwareAllocationDecider(settings, clusterSettings)); addAllocationDecider(deciders, new TargetPoolAllocationDecider()); + if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING)) { + addAllocationDecider(deciders, new RemoteStoreMigrationAllocationDecider(settings, clusterSettings)); + } clusterPlugins.stream() .flatMap(p -> p.createAllocationDeciders(settings, clusterSettings).stream()) diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java index f701a2f52277d..bc365b9872037 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java @@ -58,6 +58,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.function.BiConsumer; import java.util.stream.Collectors; @@ -176,12 +177,13 @@ public ClusterTasksResult<Task> execute(ClusterState currentState, List<Task> jo DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(newState.nodes()); - // TODO: We are using one of the existing node to build the repository metadata, this will need to be updated - // once we start supporting mixed compatibility mode. An optimization can be done as this will get invoked + // An optimization can be done as this will get invoked // for every set of node join task which we can optimize to not compute if cluster state already has // repository information. + Optional<DiscoveryNode> remoteDN = currentNodes.getNodes().values().stream().filter(DiscoveryNode::isRemoteStoreNode).findFirst(); + DiscoveryNode dn = remoteDN.orElseGet(() -> (currentNodes.getNodes().values()).stream().findFirst().get()); RepositoriesMetadata repositoriesMetadata = remoteStoreNodeService.updateRepositoriesMetadata( - (currentNodes.getNodes().values()).stream().findFirst().get(), + dn, currentState.getMetadata().custom(RepositoriesMetadata.TYPE) ); @@ -212,6 +214,16 @@ public ClusterTasksResult<Task> execute(ClusterState currentState, List<Task> jo // would guarantee that a decommissioned node would never be able to join the cluster and ensures correctness ensureNodeCommissioned(node, currentState.metadata()); nodesBuilder.add(node); + + if (remoteDN.isEmpty()) { + // This is hit only on cases where we encounter first remote node + logger.info("Updating system repository now for remote store"); + repositoriesMetadata = remoteStoreNodeService.updateRepositoriesMetadata( + node, + currentState.getMetadata().custom(RepositoriesMetadata.TYPE) + ); + } + nodesChanged = true; minClusterNodeVersion = Version.min(minClusterNodeVersion, node.getVersion()); maxClusterNodeVersion = Version.max(maxClusterNodeVersion, node.getVersion()); @@ -495,36 +507,46 @@ private static void ensureRemoteStoreNodesCompatibility(DiscoveryNode joiningNod assert existingNodes.isEmpty() == false; - // TODO: The below check is valid till we don't support migration, once we start supporting migration a remote - // store node will be able to join a non remote store cluster and vice versa. #7986 CompatibilityMode remoteStoreCompatibilityMode = REMOTE_STORE_COMPATIBILITY_MODE_SETTING.get(metadata.settings()); if (STRICT.equals(remoteStoreCompatibilityMode)) { + DiscoveryNode existingNode = existingNodes.get(0); if (joiningNode.isRemoteStoreNode()) { + ensureRemoteStoreNodesCompatibility(joiningNode, existingNode); + } else { if (existingNode.isRemoteStoreNode()) { - RemoteStoreNodeAttribute joiningRemoteStoreNodeAttribute = new RemoteStoreNodeAttribute(joiningNode); - RemoteStoreNodeAttribute existingRemoteStoreNodeAttribute = new RemoteStoreNodeAttribute(existingNode); - if (existingRemoteStoreNodeAttribute.equals(joiningRemoteStoreNodeAttribute) == false) { - throw new IllegalStateException( - "a remote store node [" - + joiningNode - + "] is trying to join a remote store cluster with incompatible node attributes in " - + "comparison with existing node [" - + existingNode - + "]" - ); - } - } else { throw new IllegalStateException( - "a remote store node [" + joiningNode + "] is trying to join a non remote store cluster" + "a non remote store node [" + joiningNode + "] is trying to join a remote store cluster" ); } - } else { - if (existingNode.isRemoteStoreNode()) { + } + } else { + if (remoteStoreCompatibilityMode == CompatibilityMode.MIXED) { + if (joiningNode.isRemoteStoreNode()) { + Optional<DiscoveryNode> remoteDN = existingNodes.stream().filter(DiscoveryNode::isRemoteStoreNode).findFirst(); + remoteDN.ifPresent(discoveryNode -> ensureRemoteStoreNodesCompatibility(joiningNode, discoveryNode)); + } + } + } + } + + private static void ensureRemoteStoreNodesCompatibility(DiscoveryNode joiningNode, DiscoveryNode existingNode) { + if (joiningNode.isRemoteStoreNode()) { + if (existingNode.isRemoteStoreNode()) { + RemoteStoreNodeAttribute joiningRemoteStoreNodeAttribute = new RemoteStoreNodeAttribute(joiningNode); + RemoteStoreNodeAttribute existingRemoteStoreNodeAttribute = new RemoteStoreNodeAttribute(existingNode); + if (existingRemoteStoreNodeAttribute.equals(joiningRemoteStoreNodeAttribute) == false) { throw new IllegalStateException( - "a non remote store node [" + joiningNode + "] is trying to join a remote store cluster" + "a remote store node [" + + joiningNode + + "] is trying to join a remote store cluster with incompatible node attributes in " + + "comparison with existing node [" + + existingNode + + "]" ); } + } else { + throw new IllegalStateException("a remote store node [" + joiningNode + "] is trying to join a non remote store cluster"); } } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java index 860b5738ff4ad..d016501dd0910 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java @@ -845,6 +845,10 @@ public Map<String, DataStream> dataStreams() { .orElse(Collections.emptyMap()); } + public Map<String, View> views() { + return Optional.ofNullable((ViewMetadata) this.custom(ViewMetadata.TYPE)).map(ViewMetadata::views).orElse(Collections.emptyMap()); + } + public DecommissionAttributeMetadata decommissionAttributeMetadata() { return custom(DecommissionAttributeMetadata.TYPE); } @@ -1360,6 +1364,36 @@ public Builder removeDataStream(String name) { return this; } + private Map<String, View> getViews() { + return Optional.ofNullable(customs.get(ViewMetadata.TYPE)) + .map(o -> (ViewMetadata) o) + .map(vmd -> vmd.views()) + .orElse(new HashMap<>()); + } + + public View view(final String viewName) { + return getViews().get(viewName); + } + + public Builder views(final Map<String, View> views) { + this.customs.put(ViewMetadata.TYPE, new ViewMetadata(views)); + return this; + } + + public Builder put(final View view) { + Objects.requireNonNull(view, "view cannot be null"); + final var replacementViews = new HashMap<>(getViews()); + replacementViews.put(view.getName(), view); + return views(replacementViews); + } + + public Builder removeView(final String viewName) { + Objects.requireNonNull(viewName, "viewName cannot be null"); + final var replacementViews = new HashMap<>(getViews()); + replacementViews.remove(viewName); + return views(replacementViews); + } + public Custom getCustom(String type) { return customs.get(type); } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index 3384393d8feaf..acc2f3a294745 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -39,6 +39,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.ResourceAlreadyExistsException; import org.opensearch.Version; +import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest; import org.opensearch.action.admin.indices.shrink.ResizeType; @@ -135,8 +136,9 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; import static org.opensearch.cluster.metadata.Metadata.DEFAULT_REPLICA_COUNT_SETTING; +import static org.opensearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; -import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreAttributePresent; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteDataAttributePresent; /** * Service responsible for submitting create index requests @@ -816,6 +818,16 @@ static Settings aggregateIndexSettings( final Settings.Builder requestSettings = Settings.builder().put(request.settings()); final Settings.Builder indexSettingsBuilder = Settings.builder(); + + // Store type of `remote_snapshot` is intended to be system-managed for searchable snapshot indexes so a special case is needed here + // to prevent a user specifying this value when creating an index + String storeTypeSetting = request.settings().get(INDEX_STORE_TYPE_SETTING.getKey()); + if (storeTypeSetting != null && storeTypeSetting.equals(RestoreSnapshotRequest.StorageType.REMOTE_SNAPSHOT.toString())) { + throw new IllegalArgumentException( + "cannot create index with index setting \"index.store.type\" set to \"remote_snapshot\". Store type can be set to \"remote_snapshot\" only when restoring a remote snapshot by using \"storage_type\": \"remote_snapshot\"" + ); + } + if (sourceMetadata == null) { final Settings.Builder additionalIndexSettings = Settings.builder(); final Settings templateAndRequestSettings = Settings.builder().put(combinedTemplateSettings).put(request.settings()).build(); @@ -959,7 +971,7 @@ private static void updateReplicationStrategy( indexReplicationType = INDEX_REPLICATION_TYPE_SETTING.get(combinedTemplateSettings); } else if (CLUSTER_REPLICATION_TYPE_SETTING.exists(clusterSettings)) { indexReplicationType = CLUSTER_REPLICATION_TYPE_SETTING.get(clusterSettings); - } else if (isRemoteStoreAttributePresent(clusterSettings)) { + } else if (isRemoteDataAttributePresent(clusterSettings)) { indexReplicationType = ReplicationType.SEGMENT; } else { indexReplicationType = CLUSTER_REPLICATION_TYPE_SETTING.getDefault(clusterSettings); @@ -973,7 +985,7 @@ private static void updateReplicationStrategy( * @param clusterSettings cluster level settings */ private static void updateRemoteStoreSettings(Settings.Builder settingsBuilder, Settings clusterSettings) { - if (isRemoteStoreAttributePresent(clusterSettings)) { + if (isRemoteDataAttributePresent(clusterSettings)) { settingsBuilder.put(SETTING_REMOTE_STORE_ENABLED, true) .put( SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, @@ -1565,7 +1577,7 @@ public static void validateRefreshIntervalSettings(Settings requestSettings, Clu * @param clusterSettings cluster setting */ static void validateTranslogDurabilitySettings(Settings requestSettings, ClusterSettings clusterSettings, Settings settings) { - if (isRemoteStoreAttributePresent(settings) == false + if (isRemoteDataAttributePresent(settings) == false || IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.exists(requestSettings) == false || clusterSettings.get(IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING) == false) { return; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/View.java b/server/src/main/java/org/opensearch/cluster/metadata/View.java new file mode 100644 index 0000000000000..1b1639bbca945 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/metadata/View.java @@ -0,0 +1,205 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.metadata; + +import org.opensearch.cluster.AbstractDiffable; +import org.opensearch.cluster.Diff; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.stream.Collectors; + +/** View of data in OpenSearch indices */ +@ExperimentalApi +public class View extends AbstractDiffable<View> implements ToXContentObject { + + private final String name; + private final String description; + private final long createdAt; + private final long modifiedAt; + private final SortedSet<Target> targets; + + public View(final String name, final String description, final Long createdAt, final Long modifiedAt, final Set<Target> targets) { + this.name = Objects.requireNonNull(name, "Name must be provided"); + this.description = description; + this.createdAt = createdAt != null ? createdAt : -1; + this.modifiedAt = modifiedAt != null ? modifiedAt : -1; + this.targets = new TreeSet<>(Objects.requireNonNull(targets, "Targets are required on a view")); + } + + public View(final StreamInput in) throws IOException { + this(in.readString(), in.readOptionalString(), in.readZLong(), in.readZLong(), new TreeSet<>(in.readList(Target::new))); + } + + public String getName() { + return name; + } + + public String getDescription() { + return description; + } + + public long getCreatedAt() { + return createdAt; + } + + public long getModifiedAt() { + return modifiedAt; + } + + public SortedSet<Target> getTargets() { + return new TreeSet<>(targets); + } + + public static Diff<View> readDiffFrom(final StreamInput in) throws IOException { + return readDiffFrom(View::new, in); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + View that = (View) o; + return name.equals(that.name) + && description.equals(that.description) + && createdAt == that.createdAt + && modifiedAt == that.modifiedAt + && targets.equals(that.targets); + } + + @Override + public int hashCode() { + return Objects.hash(name, description, createdAt, modifiedAt, targets); + } + + /** The source of data used to project the view */ + @ExperimentalApi + public static class Target implements Writeable, ToXContentObject, Comparable<Target> { + + private final String indexPattern; + + public Target(final String indexPattern) { + this.indexPattern = Objects.requireNonNull(indexPattern, "IndexPattern is required"); + } + + public Target(final StreamInput in) throws IOException { + this(in.readString()); + } + + public String getIndexPattern() { + return indexPattern; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Target that = (Target) o; + return indexPattern.equals(that.indexPattern); + } + + @Override + public int hashCode() { + return Objects.hash(indexPattern); + } + + public static final ParseField INDEX_PATTERN_FIELD = new ParseField("indexPattern"); + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(); + builder.field(INDEX_PATTERN_FIELD.getPreferredName(), indexPattern); + builder.endObject(); + return builder; + } + + private static final ConstructingObjectParser<Target, Void> PARSER = new ConstructingObjectParser<>( + "target", + args -> new Target((String) args[0]) + ); + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), INDEX_PATTERN_FIELD); + } + + public static Target fromXContent(final XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeString(indexPattern); + } + + @Override + public int compareTo(final Target o) { + if (this == o) return 0; + + final Target other = (Target) o; + return this.indexPattern.compareTo(other.indexPattern); + } + } + + public static final ParseField NAME_FIELD = new ParseField("name"); + public static final ParseField DESCRIPTION_FIELD = new ParseField("description"); + public static final ParseField CREATED_AT_FIELD = new ParseField("createdAt"); + public static final ParseField MODIFIED_AT_FIELD = new ParseField("modifiedAt"); + public static final ParseField TARGETS_FIELD = new ParseField("targets"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser<View, Void> PARSER = new ConstructingObjectParser<>( + "view", + args -> new View((String) args[0], (String) args[1], (Long) args[2], (Long) args[3], new TreeSet<>((List<Target>) args[4])) + ); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), NAME_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), DESCRIPTION_FIELD); + PARSER.declareLongOrNull(ConstructingObjectParser.optionalConstructorArg(), -1L, CREATED_AT_FIELD); + PARSER.declareLongOrNull(ConstructingObjectParser.optionalConstructorArg(), -1L, MODIFIED_AT_FIELD); + PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), (p, c) -> Target.fromXContent(p), TARGETS_FIELD); + } + + public static View fromXContent(final XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(); + builder.field(NAME_FIELD.getPreferredName(), name); + builder.field(DESCRIPTION_FIELD.getPreferredName(), description); + builder.field(CREATED_AT_FIELD.getPreferredName(), createdAt); + builder.field(MODIFIED_AT_FIELD.getPreferredName(), modifiedAt); + builder.field(TARGETS_FIELD.getPreferredName(), targets); + builder.endObject(); + return builder; + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeString(name); + out.writeOptionalString(description); + out.writeZLong(createdAt); + out.writeZLong(modifiedAt); + out.writeList(targets.stream().collect(Collectors.toList())); + } +} diff --git a/server/src/main/java/org/opensearch/cluster/metadata/ViewMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/ViewMetadata.java new file mode 100644 index 0000000000000..a89068078be58 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/metadata/ViewMetadata.java @@ -0,0 +1,187 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.metadata; + +import org.opensearch.Version; +import org.opensearch.cluster.Diff; +import org.opensearch.cluster.DiffableUtils; +import org.opensearch.cluster.NamedDiff; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; + +import java.io.IOException; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +import static org.opensearch.cluster.metadata.ComposableIndexTemplateMetadata.MINIMMAL_SUPPORTED_VERSION; + +/** View metadata */ +@ExperimentalApi +public class ViewMetadata implements Metadata.Custom { + + public static final String TYPE = "view"; + private static final ParseField VIEW_FIELD = new ParseField("view"); + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser<ViewMetadata, Void> PARSER = new ConstructingObjectParser<>( + TYPE, + false, + a -> new ViewMetadata((Map<String, View>) a[0]) + ); + + static { + PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> { + Map<String, View> views = new HashMap<>(); + while (p.nextToken() != XContentParser.Token.END_OBJECT) { + views.put(p.currentName(), View.fromXContent(p)); + } + return views; + }, VIEW_FIELD); + } + + private final Map<String, View> views; + + public ViewMetadata(final Map<String, View> views) { + this.views = views; + } + + public ViewMetadata(final StreamInput in) throws IOException { + this.views = in.readMap(StreamInput::readString, View::new); + } + + public Map<String, View> views() { + return this.views; + } + + @Override + public Diff<Metadata.Custom> diff(final Metadata.Custom before) { + return new ViewMetadata.ViewMetadataDiff((ViewMetadata) before, this); + } + + public static NamedDiff<Metadata.Custom> readDiffFrom(final StreamInput in) throws IOException { + return new ViewMetadata.ViewMetadataDiff(in); + } + + @Override + public EnumSet<Metadata.XContentContext> context() { + return Metadata.ALL_CONTEXTS; + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public Version getMinimalSupportedVersion() { + return MINIMMAL_SUPPORTED_VERSION; + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeMap(this.views, StreamOutput::writeString, (stream, val) -> val.writeTo(stream)); + } + + public static ViewMetadata fromXContent(final XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(VIEW_FIELD.getPreferredName()); + for (Map.Entry<String, View> entry : views.entrySet()) { + builder.field(entry.getKey(), entry.getValue()); + } + builder.endObject(); + return builder; + } + + public static Builder builder() { + return new Builder(); + } + + @Override + public int hashCode() { + return Objects.hash(this.views); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + ViewMetadata other = (ViewMetadata) obj; + return Objects.equals(this.views, other.views); + } + + @Override + public String toString() { + return Strings.toString(MediaTypeRegistry.JSON, this); + } + + /** + * Builder of view metadata. + */ + @ExperimentalApi + public static class Builder { + + private final Map<String, View> views = new HashMap<>(); + + public Builder putDataStream(final View view) { + views.put(view.getName(), view); + return this; + } + + public ViewMetadata build() { + return new ViewMetadata(views); + } + } + + /** + * A diff between view metadata. + */ + static class ViewMetadataDiff implements NamedDiff<Metadata.Custom> { + + final Diff<Map<String, View>> dataStreamDiff; + + ViewMetadataDiff(ViewMetadata before, ViewMetadata after) { + this.dataStreamDiff = DiffableUtils.diff(before.views, after.views, DiffableUtils.getStringKeySerializer()); + } + + ViewMetadataDiff(StreamInput in) throws IOException { + this.dataStreamDiff = DiffableUtils.readJdkMapDiff(in, DiffableUtils.getStringKeySerializer(), View::new, View::readDiffFrom); + } + + @Override + public Metadata.Custom apply(Metadata.Custom part) { + return new ViewMetadata(dataStreamDiff.apply(((ViewMetadata) part).views)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + dataStreamDiff.writeTo(out); + } + + @Override + public String getWriteableName() { + return TYPE; + } + } +} diff --git a/server/src/main/java/org/opensearch/cluster/metadata/WeightedRoutingMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/WeightedRoutingMetadata.java index bc24dd22f5c6e..b303c3a2034d5 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/WeightedRoutingMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/WeightedRoutingMetadata.java @@ -26,6 +26,7 @@ import java.util.EnumSet; import java.util.HashMap; import java.util.Map; +import java.util.Objects; /** * Contains metadata for weighted routing @@ -99,7 +100,7 @@ public static NamedDiff<Metadata.Custom> readDiffFrom(StreamInput in) throws IOE public static WeightedRoutingMetadata fromXContent(XContentParser parser) throws IOException { String attrKey = null; Double attrValue; - String attributeName = null; + String attributeName = ""; Map<String, Double> weights = new HashMap<>(); WeightedRouting weightedRouting; XContentParser.Token token; @@ -162,12 +163,12 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; WeightedRoutingMetadata that = (WeightedRoutingMetadata) o; - return weightedRouting.equals(that.weightedRouting); + return weightedRouting.equals(that.weightedRouting) && version == that.version; } @Override public int hashCode() { - return weightedRouting.hashCode(); + return Objects.hash(weightedRouting.hashCode(), version); } @Override diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java index a406552f854da..938a603c459c9 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java @@ -729,23 +729,6 @@ assert node(failedShard.currentNodeId()).getByShardId(failedShard.shardId()) == + " was matched but wasn't removed"; } - public void swapPrimaryWithReplica( - Logger logger, - ShardRouting primaryShard, - ShardRouting replicaShard, - RoutingChangesObserver changes - ) { - assert primaryShard.primary() : "Invalid primary shard provided"; - assert !replicaShard.primary() : "Invalid Replica shard provided"; - - ShardRouting newPrimary = primaryShard.moveActivePrimaryToReplica(); - ShardRouting newReplica = replicaShard.moveActiveReplicaToPrimary(); - updateAssigned(primaryShard, newPrimary); - updateAssigned(replicaShard, newReplica); - logger.info("Swap relocation performed for shard [{}]", newPrimary.shortSummary()); - changes.replicaPromoted(newPrimary); - } - private void unassignPrimaryAndPromoteActiveReplicaIfExists( ShardRouting failedShard, UnassignedInfo unassignedInfo, diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java index 7f2382f8b4910..e4095a84be081 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java @@ -307,6 +307,16 @@ public ShardsIterator allShardsSatisfyingPredicate(Predicate<ShardRouting> predi return allShardsSatisfyingPredicate(indices, predicate, false); } + /** + * All the shards for the provided indices on the node which match the predicate + * @param indices indices to return all the shards. + * @param predicate condition to match + * @return iterator over shards matching the predicate for the specific indices + */ + public ShardsIterator allShardsSatisfyingPredicate(String[] indices, Predicate<ShardRouting> predicate) { + return allShardsSatisfyingPredicate(indices, predicate, false); + } + private ShardsIterator allShardsSatisfyingPredicate( String[] indices, Predicate<ShardRouting> predicate, diff --git a/server/src/main/java/org/opensearch/cluster/routing/WeightedRouting.java b/server/src/main/java/org/opensearch/cluster/routing/WeightedRouting.java index 2b93a1483b801..468fac08d2946 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/WeightedRouting.java +++ b/server/src/main/java/org/opensearch/cluster/routing/WeightedRouting.java @@ -54,6 +54,7 @@ public boolean isSet() { @Override public void writeTo(StreamOutput out) throws IOException { + out.writeString(attributeName); out.writeGenericValue(weights); } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java index 8a14ce3f1a288..a05938c176678 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java @@ -43,6 +43,8 @@ public final class RemoteShardsBalancer extends ShardsBalancer { private final Logger logger; private final RoutingAllocation allocation; private final RoutingNodes routingNodes; + // indicates if there are any nodes being throttled for allocating any unassigned shards + private boolean anyNodesThrottled = false; public RemoteShardsBalancer(Logger logger, RoutingAllocation allocation) { this.logger = logger; @@ -84,24 +86,39 @@ void moveShards() { Queue<RoutingNode> excludedNodes = new ArrayDeque<>(); classifyNodesForShardMovement(eligibleNodes, excludedNodes); - if (excludedNodes.isEmpty()) { - logger.debug("No excluded nodes found. Returning..."); - return; - } - - while (!eligibleNodes.isEmpty() && !excludedNodes.isEmpty()) { - RoutingNode sourceNode = excludedNodes.poll(); - for (ShardRouting ineligibleShard : sourceNode) { - if (ineligibleShard.started() == false) { + // move shards that cannot remain on eligible nodes + final List<ShardRouting> forceMoveShards = new ArrayList<>(); + eligibleNodes.forEach(sourceNode -> { + for (final ShardRouting shardRouting : sourceNode) { + if (ineligibleForMove(shardRouting)) { continue; } - if (!RoutingPool.REMOTE_CAPABLE.equals(RoutingPool.getShardPool(ineligibleShard, allocation))) { + if (allocation.deciders().canRemain(shardRouting, sourceNode, allocation) == Decision.NO) { + forceMoveShards.add(shardRouting); + } + } + }); + for (final ShardRouting shard : forceMoveShards) { + if (eligibleNodes.isEmpty()) { + logger.trace("there are no eligible nodes available, return"); + return; + } + + tryShardMovementToEligibleNode(eligibleNodes, shard); + } + + // move shards that are currently assigned on excluded nodes + while (eligibleNodes.isEmpty() == false && excludedNodes.isEmpty() == false) { + RoutingNode sourceNode = excludedNodes.poll(); + for (final ShardRouting ineligibleShard : sourceNode) { + if (ineligibleForMove(ineligibleShard)) { continue; } if (eligibleNodes.isEmpty()) { - break; + logger.trace("there are no eligible nodes available, return"); + return; } tryShardMovementToEligibleNode(eligibleNodes, ineligibleShard); @@ -109,6 +126,10 @@ void moveShards() { } } + private boolean ineligibleForMove(ShardRouting shard) { + return shard.started() == false || RoutingPool.REMOTE_CAPABLE.equals(RoutingPool.getShardPool(shard, allocation)) == false; + } + /** * Classifies the nodes into eligible and excluded depending on whether node is able or unable for shard assignment * @param eligibleNodes contains the list of classified nodes eligible to accept shards @@ -145,10 +166,23 @@ private void classifyNodesForShardMovement(Queue<RoutingNode> eligibleNodes, Que * @param shard the ineligible shard to be moved */ private void tryShardMovementToEligibleNode(Queue<RoutingNode> eligibleNodes, ShardRouting shard) { - Set<String> nodesCheckedForShard = new HashSet<>(); - while (!eligibleNodes.isEmpty()) { - RoutingNode targetNode = eligibleNodes.poll(); - Decision currentShardDecision = allocation.deciders().canAllocate(shard, targetNode, allocation); + final Set<String> nodesCheckedForShard = new HashSet<>(); + int numNodesToCheck = eligibleNodes.size(); + while (eligibleNodes.isEmpty() == false) { + assert numNodesToCheck > 0; + final RoutingNode targetNode = eligibleNodes.poll(); + --numNodesToCheck; + // skip the node that the target shard is currently allocated on + if (targetNode.nodeId().equals(shard.currentNodeId())) { + assert nodesCheckedForShard.add(targetNode.nodeId()); + eligibleNodes.offer(targetNode); + if (numNodesToCheck == 0) { + return; + } + continue; + } + + final Decision currentShardDecision = allocation.deciders().canAllocate(shard, targetNode, allocation); if (currentShardDecision.type() == Decision.Type.YES) { if (logger.isDebugEnabled()) { @@ -166,7 +200,7 @@ private void tryShardMovementToEligibleNode(Queue<RoutingNode> eligibleNodes, Sh allocation.changes() ); eligibleNodes.offer(targetNode); - break; + return; } else { if (logger.isTraceEnabled()) { logger.trace( @@ -177,18 +211,19 @@ private void tryShardMovementToEligibleNode(Queue<RoutingNode> eligibleNodes, Sh ); } - Decision nodeLevelDecision = allocation.deciders().canAllocateAnyShardToNode(targetNode, allocation); + final Decision nodeLevelDecision = allocation.deciders().canAllocateAnyShardToNode(targetNode, allocation); if (nodeLevelDecision.type() == Decision.Type.YES) { logger.debug("Node: [{}] can still accept shards. Adding it back to the queue.", targetNode.nodeId()); eligibleNodes.offer(targetNode); - nodesCheckedForShard.add(targetNode.nodeId()); + assert nodesCheckedForShard.add(targetNode.nodeId()); } else { logger.debug("Node: [{}] cannot accept any more shards. Removing it from queue.", targetNode.nodeId()); } - // Break out if all nodes in the queue have been checked for this shard - if (eligibleNodes.stream().allMatch(rn -> nodesCheckedForShard.contains(rn.nodeId()))) { - break; + // Break out if all eligible nodes have been examined + if (numNodesToCheck == 0) { + assert eligibleNodes.stream().allMatch(rn -> nodesCheckedForShard.contains(rn.nodeId())); + return; } } } @@ -225,7 +260,7 @@ void balance() { } } - while (!sourceNodes.isEmpty() && !targetNodes.isEmpty()) { + while (sourceNodes.isEmpty() == false && targetNodes.isEmpty() == false) { RoutingNode sourceNode = sourceNodes.poll(); tryRebalanceNode(sourceNode, targetNodes, avgPrimaryPerNode, nodePrimaryShardCount); } @@ -275,11 +310,11 @@ public Map<String, UnassignedIndexShards> groupUnassignedShardsByIndex() { HashMap<String, UnassignedIndexShards> unassignedShardMap = new HashMap<>(); for (ShardRouting shard : routingNodes.unassigned().drain()) { String index = shard.getIndexName(); - if (!RoutingPool.REMOTE_CAPABLE.equals(RoutingPool.getShardPool(shard, allocation))) { + if (RoutingPool.REMOTE_CAPABLE.equals(RoutingPool.getShardPool(shard, allocation)) == false) { routingNodes.unassigned().add(shard); continue; } - if (!unassignedShardMap.containsKey(index)) { + if (unassignedShardMap.containsKey(index) == false) { unassignedShardMap.put(index, new UnassignedIndexShards()); } unassignedShardMap.get(index).addShard(shard); @@ -296,13 +331,15 @@ private void unassignIgnoredRemoteShards(RoutingAllocation routingAllocation) { RoutingNodes.UnassignedShards unassignedShards = routingAllocation.routingNodes().unassigned(); for (ShardRouting shard : unassignedShards.drainIgnored()) { RoutingPool pool = RoutingPool.getShardPool(shard, routingAllocation); - if (pool == RoutingPool.REMOTE_CAPABLE && shard.unassigned() && (shard.primary() || !shard.unassignedInfo().isDelayed())) { + if (pool == RoutingPool.REMOTE_CAPABLE + && shard.unassigned() + && (shard.primary() || shard.unassignedInfo().isDelayed() == false)) { ShardRouting unassignedShard = shard; // Shard when moved to an unassigned state updates the recovery source to be ExistingStoreRecoverySource // Remote shards do not have an existing store to recover from and can be recovered from an empty source // to re-fetch any shard blocks from the repository. if (shard.primary()) { - if (!RecoverySource.Type.SNAPSHOT.equals(shard.recoverySource().getType())) { + if (RecoverySource.Type.SNAPSHOT.equals(shard.recoverySource().getType()) == false) { unassignedShard = shard.updateUnassigned(shard.unassignedInfo(), RecoverySource.EmptyStoreRecoverySource.INSTANCE); } } @@ -323,12 +360,16 @@ private void allocateUnassignedReplicas(Queue<RoutingNode> nodeQueue, Map<String } private void ignoreRemainingShards(Map<String, UnassignedIndexShards> unassignedShardMap) { + // If any nodes are throttled during allocation, mark all remaining unassigned shards as THROTTLED + final UnassignedInfo.AllocationStatus status = anyNodesThrottled + ? UnassignedInfo.AllocationStatus.DECIDERS_THROTTLED + : UnassignedInfo.AllocationStatus.DECIDERS_NO; for (UnassignedIndexShards indexShards : unassignedShardMap.values()) { for (ShardRouting shard : indexShards.getPrimaries()) { - routingNodes.unassigned().ignoreShard(shard, UnassignedInfo.AllocationStatus.DECIDERS_NO, allocation.changes()); + routingNodes.unassigned().ignoreShard(shard, status, allocation.changes()); } for (ShardRouting shard : indexShards.getReplicas()) { - routingNodes.unassigned().ignoreShard(shard, UnassignedInfo.AllocationStatus.DECIDERS_NO, allocation.changes()); + routingNodes.unassigned().ignoreShard(shard, status, allocation.changes()); } } } @@ -353,7 +394,7 @@ private void allocateUnassignedShards( } logger.debug("Allocating shards for index: [{}]", index); - while (!shardsToAllocate.isEmpty() && !nodeQueue.isEmpty()) { + while (shardsToAllocate.isEmpty() == false && nodeQueue.isEmpty() == false) { ShardRouting shard = shardsToAllocate.poll(); if (shard.assignedToNode()) { if (logger.isDebugEnabled()) { @@ -389,11 +430,11 @@ private void allocateUnassignedShards( private void tryAllocateUnassignedShard(Queue<RoutingNode> nodeQueue, ShardRouting shard) { boolean allocated = false; boolean throttled = false; - Set<String> nodesCheckedForShard = new HashSet<>(); - while (!nodeQueue.isEmpty()) { + int numNodesToCheck = nodeQueue.size(); + while (nodeQueue.isEmpty() == false) { RoutingNode node = nodeQueue.poll(); + --numNodesToCheck; Decision allocateDecision = allocation.deciders().canAllocate(shard, node, allocation); - nodesCheckedForShard.add(node.nodeId()); if (allocateDecision.type() == Decision.Type.YES) { if (logger.isTraceEnabled()) { logger.trace("Assigned shard [{}] to [{}]", shardShortSummary(shard), node.nodeId()); @@ -432,6 +473,10 @@ private void tryAllocateUnassignedShard(Queue<RoutingNode> nodeQueue, ShardRouti } nodeQueue.offer(node); } else { + if (nodeLevelDecision.type() == Decision.Type.THROTTLE) { + anyNodesThrottled = true; + } + if (logger.isTraceEnabled()) { logger.trace( "Cannot allocate any shard to node: [{}]. Removing from queue. Node level decisions: [{}],[{}]", @@ -443,14 +488,14 @@ private void tryAllocateUnassignedShard(Queue<RoutingNode> nodeQueue, ShardRouti } // Break out if all nodes in the queue have been checked for this shard - if (nodeQueue.stream().allMatch(rn -> nodesCheckedForShard.contains(rn.nodeId()))) { + if (numNodesToCheck == 0) { break; } } } - if (!allocated) { - UnassignedInfo.AllocationStatus status = throttled + if (allocated == false) { + UnassignedInfo.AllocationStatus status = (throttled || anyNodesThrottled) ? UnassignedInfo.AllocationStatus.DECIDERS_THROTTLED : UnassignedInfo.AllocationStatus.DECIDERS_NO; routingNodes.unassigned().ignoreShard(shard, status, allocation.changes()); @@ -470,14 +515,16 @@ private void tryRebalanceNode( // Try to relocate the valid shards on the sourceNode, one at a time; // until either sourceNode is balanced OR no more active primary shard available OR all the target nodes are exhausted - while (shardsToBalance > 0 && shardIterator.hasNext() && !targetNodes.isEmpty()) { + while (shardsToBalance > 0 && shardIterator.hasNext() && targetNodes.isEmpty() == false) { // Find an active primary shard to relocate ShardRouting shard = shardIterator.next(); - if (!shard.started() || !shard.primary() || !RoutingPool.REMOTE_CAPABLE.equals(RoutingPool.getShardPool(shard, allocation))) { + if (shard.started() == false + || shard.primary() == false + || RoutingPool.REMOTE_CAPABLE.equals(RoutingPool.getShardPool(shard, allocation)) == false) { continue; } - while (!targetNodes.isEmpty()) { + while (targetNodes.isEmpty() == false) { // Find a valid target node that can accommodate the current shard relocation RoutingNode targetNode = targetNodes.poll(); if (primaryCount.get(targetNode.nodeId()) >= avgPrimary) { @@ -485,6 +532,10 @@ private void tryRebalanceNode( continue; } + if (targetNode.getByShardId(shard.shardId()) != null) { + continue; + } + // Try relocate the shard on the target node Decision rebalanceDecision = tryRelocateShard(shard, targetNode); @@ -522,21 +573,10 @@ private void tryRebalanceNode( } /** - * For every primary shard for which this method is invoked, - * swap is attempted with the destination node in case replica shard is present. - * In case replica is not present, relocation of the shard id performed. + * For every primary shard for which this method is invoked, relocation of the shard id performed. */ private Decision tryRelocateShard(ShardRouting shard, RoutingNode destinationNode) { - // Check if there is already a replica for the shard on the destination node. - // Then we can directly swap the replica with the primary shards. - // Invariant: We only allow swap relocation on remote shards. - ShardRouting replicaShard = destinationNode.getByShardId(shard.shardId()); - if (replicaShard != null) { - assert !replicaShard.primary() : "Primary Shard found while expected Replica during shard rebalance"; - return executeSwapShard(shard, replicaShard, allocation); - } - - // Since no replica present on the destinationNode; try relocating the shard to the destination node + assert destinationNode.getByShardId(shard.shardId()) == null; Decision allocationDecision = allocation.deciders().canAllocate(shard, destinationNode, allocation); Decision rebalanceDecision = allocation.deciders().canRebalance(shard, allocation); logger.trace( @@ -566,15 +606,6 @@ private Decision tryRelocateShard(ShardRouting shard, RoutingNode destinationNod return Decision.NO; } - private Decision executeSwapShard(ShardRouting primaryShard, ShardRouting replicaShard, RoutingAllocation allocation) { - if (!replicaShard.started()) { - return new Decision.Single(Decision.Type.NO); - } - - allocation.routingNodes().swapPrimaryWithReplica(logger, primaryShard, replicaShard, allocation.changes()); - return new Decision.Single(Decision.Type.YES); - } - private void failUnattemptedShards() { RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator(); while (unassignedIterator.hasNext()) { diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/RemoteStoreMigrationAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/RemoteStoreMigrationAllocationDecider.java new file mode 100644 index 0000000000000..27ebe5390ea6d --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/RemoteStoreMigrationAllocationDecider.java @@ -0,0 +1,174 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.cluster.routing.allocation.decider; + +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.RoutingNode; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.allocation.RoutingAllocation; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.node.remotestore.RemoteStoreNodeService.CompatibilityMode; +import org.opensearch.node.remotestore.RemoteStoreNodeService.Direction; + +import java.util.Locale; + +/** + * A new allocation decider for migration of document replication clusters to remote store backed clusters: + * - For STRICT compatibility mode, the decision is always YES + * - For remote store backed indices, relocation or allocation/relocation can only be towards a remote node + * - For "REMOTE_STORE" migration direction: + * - New primary shards can only be allocated to a remote node + * - New replica shards can be allocated to a remote node iff the primary has been migrated/allocated to a remote node + * - For other directions ("DOCREP", "NONE"), the decision is always YES + * + * @opensearch.internal + */ +public class RemoteStoreMigrationAllocationDecider extends AllocationDecider { + + public static final String NAME = "remote_store_migration"; + + private Direction migrationDirection; + private CompatibilityMode compatibilityMode; + private boolean remoteStoreBackedIndex; + + public RemoteStoreMigrationAllocationDecider(Settings settings, ClusterSettings clusterSettings) { + this.migrationDirection = RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING.get(settings); + this.compatibilityMode = RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING, this::setMigrationDirection); + clusterSettings.addSettingsUpdateConsumer( + RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING, + this::setCompatibilityMode + ); + } + + private void setMigrationDirection(Direction migrationDirection) { + this.migrationDirection = migrationDirection; + } + + private void setCompatibilityMode(CompatibilityMode compatibilityMode) { + this.compatibilityMode = compatibilityMode; + } + + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { + DiscoveryNode targetNode = node.node(); + + if (compatibilityMode.equals(CompatibilityMode.STRICT)) { + // assuming all nodes are of the same type (all remote or all non-remote) + return allocation.decision( + Decision.YES, + NAME, + getDecisionDetails(true, shardRouting, targetNode, " for strict compatibility mode") + ); + } + + if (migrationDirection.equals(Direction.REMOTE_STORE) == false) { + // docrep migration direction is currently not supported + return allocation.decision( + Decision.YES, + NAME, + getDecisionDetails(true, shardRouting, targetNode, " for non remote_store direction") + ); + } + + // check for remote store backed indices + IndexMetadata indexMetadata = allocation.metadata().getIndexSafe(shardRouting.index()); + if (IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.exists(indexMetadata.getSettings())) { + remoteStoreBackedIndex = IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.get(indexMetadata.getSettings()); + } + if (remoteStoreBackedIndex && targetNode.isRemoteStoreNode() == false) { + // allocations and relocations must be to a remote node + String reason = String.format( + Locale.ROOT, + " because a remote store backed index's shard copy can only be %s to a remote node", + ((shardRouting.assignedToNode() == false) ? "allocated" : "relocated") + ); + return allocation.decision(Decision.NO, NAME, getDecisionDetails(false, shardRouting, targetNode, reason)); + } + + if (shardRouting.primary()) { + return primaryShardDecision(shardRouting, targetNode, allocation); + } + return replicaShardDecision(shardRouting, targetNode, allocation); + } + + // handle scenarios for allocation of a new shard's primary copy + private Decision primaryShardDecision(ShardRouting primaryShardRouting, DiscoveryNode targetNode, RoutingAllocation allocation) { + if (targetNode.isRemoteStoreNode() == false) { + return allocation.decision(Decision.NO, NAME, getDecisionDetails(false, primaryShardRouting, targetNode, "")); + } + return allocation.decision(Decision.YES, NAME, getDecisionDetails(true, primaryShardRouting, targetNode, "")); + } + + private Decision replicaShardDecision(ShardRouting replicaShardRouting, DiscoveryNode targetNode, RoutingAllocation allocation) { + if (targetNode.isRemoteStoreNode()) { + ShardRouting primaryShardRouting = allocation.routingNodes().activePrimary(replicaShardRouting.shardId()); + boolean primaryHasMigratedToRemote = false; + if (primaryShardRouting != null) { + DiscoveryNode primaryShardNode = allocation.nodes().getNodes().get(primaryShardRouting.currentNodeId()); + primaryHasMigratedToRemote = primaryShardNode.isRemoteStoreNode(); + } + if (primaryHasMigratedToRemote == false) { + return allocation.decision( + Decision.NO, + NAME, + getDecisionDetails(false, replicaShardRouting, targetNode, " since primary shard copy is not yet migrated to remote") + ); + } + return allocation.decision( + Decision.YES, + NAME, + getDecisionDetails(true, replicaShardRouting, targetNode, " since primary shard copy has been migrated to remote") + ); + } + return allocation.decision(Decision.YES, NAME, getDecisionDetails(true, replicaShardRouting, targetNode, "")); + } + + // get detailed reason for the decision + private String getDecisionDetails(boolean isYes, ShardRouting shardRouting, DiscoveryNode targetNode, String reason) { + return String.format( + Locale.ROOT, + "[%s migration_direction]: %s shard copy %s be %s to a %s node%s", + migrationDirection.direction, + (shardRouting.primary() ? "primary" : "replica"), + (isYes ? "can" : "can not"), + ((shardRouting.assignedToNode() == false) ? "allocated" : "relocated"), + (targetNode.isRemoteStoreNode() ? "remote" : "non-remote"), + reason + ); + } + +} diff --git a/server/src/main/java/org/opensearch/common/cache/CacheType.java b/server/src/main/java/org/opensearch/common/cache/CacheType.java new file mode 100644 index 0000000000000..c5aeb7cd1fa40 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/CacheType.java @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache; + +import org.opensearch.common.annotation.ExperimentalApi; + +/** + * Cache types available within OpenSearch. + */ +@ExperimentalApi +public enum CacheType { + INDICES_REQUEST_CACHE("indices.requests.cache"); + + private final String settingPrefix; + + CacheType(String settingPrefix) { + this.settingPrefix = settingPrefix; + } + + public String getSettingPrefix() { + return settingPrefix; + } +} diff --git a/server/src/main/java/org/opensearch/common/cache/ICache.java b/server/src/main/java/org/opensearch/common/cache/ICache.java index c6ea5fca1a8fe..f7be46a852631 100644 --- a/server/src/main/java/org/opensearch/common/cache/ICache.java +++ b/server/src/main/java/org/opensearch/common/cache/ICache.java @@ -8,6 +8,12 @@ package org.opensearch.common.cache; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.cache.store.config.CacheConfig; + +import java.io.Closeable; +import java.util.Map; + /** * Represents a cache interface. * @param <K> Type of key. @@ -15,7 +21,8 @@ * * @opensearch.experimental */ -public interface ICache<K, V> { +@ExperimentalApi +public interface ICache<K, V> extends Closeable { V get(K key); void put(K key, V value); @@ -31,4 +38,14 @@ public interface ICache<K, V> { long count(); void refresh(); + + /** + * Factory to create objects. + */ + @ExperimentalApi + interface Factory { + <K, V> ICache<K, V> create(CacheConfig<K, V> config, CacheType cacheType, Map<String, Factory> cacheFactories); + + String getCacheName(); + } } diff --git a/server/src/main/java/org/opensearch/common/cache/LoadAwareCacheLoader.java b/server/src/main/java/org/opensearch/common/cache/LoadAwareCacheLoader.java index 57aa4aa39c782..aafd46560021b 100644 --- a/server/src/main/java/org/opensearch/common/cache/LoadAwareCacheLoader.java +++ b/server/src/main/java/org/opensearch/common/cache/LoadAwareCacheLoader.java @@ -8,13 +8,16 @@ package org.opensearch.common.cache; +import org.opensearch.common.annotation.ExperimentalApi; + /** * Extends a cache loader with awareness of whether the data is loaded or not. * @param <K> Type of key. * @param <V> Type of value. * - * @opensearch.internal + * @opensearch.experimental */ +@ExperimentalApi public interface LoadAwareCacheLoader<K, V> extends CacheLoader<K, V> { boolean isLoaded(); } diff --git a/server/src/main/java/org/opensearch/common/cache/module/CacheModule.java b/server/src/main/java/org/opensearch/common/cache/module/CacheModule.java new file mode 100644 index 0000000000000..832a65b573aec --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/module/CacheModule.java @@ -0,0 +1,66 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.module; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.cache.ICache; +import org.opensearch.common.cache.service.CacheService; +import org.opensearch.common.cache.store.OpenSearchOnHeapCache; +import org.opensearch.common.settings.Settings; +import org.opensearch.plugins.CachePlugin; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Holds all the cache factories and provides a way to fetch them when needed. + */ +@ExperimentalApi +public class CacheModule { + + private final Map<String, ICache.Factory> cacheStoreTypeFactories; + + private final CacheService cacheService; + private final Settings settings; + + public CacheModule(List<CachePlugin> cachePlugins, Settings settings) { + this.cacheStoreTypeFactories = getCacheStoreTypeFactories(cachePlugins); + this.settings = settings; + this.cacheService = new CacheService(cacheStoreTypeFactories, settings); + } + + private static Map<String, ICache.Factory> getCacheStoreTypeFactories(List<CachePlugin> cachePlugins) { + Map<String, ICache.Factory> cacheStoreTypeFactories = new HashMap<>(); + // Add the core OpenSearchOnHeapCache as well. + cacheStoreTypeFactories.put( + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME, + new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory() + ); + for (CachePlugin cachePlugin : cachePlugins) { + Map<String, ICache.Factory> factoryMap = cachePlugin.getCacheFactoryMap(); + for (Map.Entry<String, ICache.Factory> entry : factoryMap.entrySet()) { + if (cacheStoreTypeFactories.put(entry.getKey(), entry.getValue()) != null) { + throw new IllegalArgumentException("Cache name: " + entry.getKey() + " is " + "already registered"); + } + } + } + return Collections.unmodifiableMap(cacheStoreTypeFactories); + } + + public CacheService getCacheService() { + return this.cacheService; + } + + // Package private for testing. + Map<String, ICache.Factory> getCacheStoreTypeFactories() { + return cacheStoreTypeFactories; + } +} diff --git a/server/src/main/java/org/opensearch/common/cache/module/package-info.java b/server/src/main/java/org/opensearch/common/cache/module/package-info.java new file mode 100644 index 0000000000000..95ed25ca21643 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/module/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base package for cache providers. */ +package org.opensearch.common.cache.module; diff --git a/server/src/main/java/org/opensearch/common/cache/policy/CachedQueryResult.java b/server/src/main/java/org/opensearch/common/cache/policy/CachedQueryResult.java new file mode 100644 index 0000000000000..0a98542a05bb7 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/policy/CachedQueryResult.java @@ -0,0 +1,87 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.policy; + +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.search.internal.ShardSearchContextId; +import org.opensearch.search.query.QuerySearchResult; + +import java.io.IOException; + +/** + * A class containing a QuerySearchResult used in a cache, as well as information needed for all cache policies + * to decide whether to admit a given BytesReference. Also handles serialization/deserialization of the underlying QuerySearchResult, + * which is all that is needed outside the cache. At policy checking time, this spares us from having to create an entire + * short-lived QuerySearchResult object just to read a few values. + * @opensearch.internal + */ +public class CachedQueryResult { + private final PolicyValues policyValues; + private final QuerySearchResult qsr; + + public CachedQueryResult(QuerySearchResult qsr, long tookTimeNanos) { + this.qsr = qsr; + this.policyValues = new PolicyValues(tookTimeNanos); + } + + // Retrieve only took time from a serialized CQR, without creating a short-lived QuerySearchResult or CachedQueryResult object. + public static PolicyValues getPolicyValues(BytesReference serializedCQR) throws IOException { + StreamInput in = serializedCQR.streamInput(); + return new PolicyValues(in); + } + + // Retrieve only the QSR from a serialized CQR, and load it into an existing QSR object discarding the took time which isn't needed + // outside the cache + public static void loadQSR( + BytesReference serializedCQR, + QuerySearchResult qsr, + ShardSearchContextId id, + NamedWriteableRegistry registry + ) throws IOException { + StreamInput in = new NamedWriteableAwareStreamInput(serializedCQR.streamInput(), registry); + PolicyValues pv = new PolicyValues(in); // Read and discard PolicyValues + qsr.readFromWithId(id, in); + } + + public void writeToNoId(StreamOutput out) throws IOException { + policyValues.writeTo(out); + qsr.writeToNoId(out); + } + + /** + * A class containing information needed for all cache policies + * to decide whether to admit a given value. + */ + public static class PolicyValues implements Writeable { + final long tookTimeNanos; + // More values can be added here as they're needed for future policies + + public PolicyValues(long tookTimeNanos) { + this.tookTimeNanos = tookTimeNanos; + } + + public PolicyValues(StreamInput in) throws IOException { + this.tookTimeNanos = in.readZLong(); + } + + public long getTookTimeNanos() { + return tookTimeNanos; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeZLong(tookTimeNanos); + } + } +} diff --git a/server/src/main/java/org/opensearch/common/cache/policy/package-info.java b/server/src/main/java/org/opensearch/common/cache/policy/package-info.java new file mode 100644 index 0000000000000..ce9c2f62d7da2 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/policy/package-info.java @@ -0,0 +1,9 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +/** A package for policies controlling what can enter caches. */ +package org.opensearch.common.cache.policy; diff --git a/server/src/main/java/org/opensearch/common/cache/serializer/BytesReferenceSerializer.java b/server/src/main/java/org/opensearch/common/cache/serializer/BytesReferenceSerializer.java new file mode 100644 index 0000000000000..c26e1191888df --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/serializer/BytesReferenceSerializer.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.serializer; + +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; + +import java.util.Arrays; + +/** + * A serializer which transforms BytesReference to byte[]. + * The type of BytesReference is NOT preserved after deserialization, but nothing in opensearch should care. + */ +public class BytesReferenceSerializer implements Serializer<BytesReference, byte[]> { + // This class does not get passed to ehcache itself, so it's not required that classes match after deserialization. + + public BytesReferenceSerializer() {} + + @Override + public byte[] serialize(BytesReference object) { + return BytesReference.toBytesWithoutCompact(object); + } + + @Override + public BytesReference deserialize(byte[] bytes) { + if (bytes == null) { + return null; + } + return new BytesArray(bytes); + } + + @Override + public boolean equals(BytesReference object, byte[] bytes) { + return Arrays.equals(serialize(object), bytes); + } +} diff --git a/server/src/main/java/org/opensearch/common/cache/serializer/Serializer.java b/server/src/main/java/org/opensearch/common/cache/serializer/Serializer.java new file mode 100644 index 0000000000000..35e28707d1ca3 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/serializer/Serializer.java @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.serializer; + +/** + * Defines an interface for serializers, to be used by pluggable caches. + * T is the class of the original object, and U is the serialized class. + */ +public interface Serializer<T, U> { + /** + * Serializes an object. + * @param object A non-serialized object. + * @return The serialized representation of the object. + */ + U serialize(T object); + + /** + * Deserializes bytes into an object. + * @param bytes The serialized representation. + * @return The original object. + */ + T deserialize(U bytes); + + /** + * Compares an object to a serialized representation of an object. + * @param object A non-serialized objet + * @param bytes Serialized representation of an object + * @return true if representing the same object, false if not + */ + boolean equals(T object, U bytes); +} diff --git a/server/src/main/java/org/opensearch/common/cache/serializer/package-info.java b/server/src/main/java/org/opensearch/common/cache/serializer/package-info.java new file mode 100644 index 0000000000000..e66a9aa4cf68c --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/serializer/package-info.java @@ -0,0 +1,9 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +/** A package for serializers used in caches. */ +package org.opensearch.common.cache.serializer; diff --git a/server/src/main/java/org/opensearch/common/cache/service/CacheService.java b/server/src/main/java/org/opensearch/common/cache/service/CacheService.java new file mode 100644 index 0000000000000..b6710e5e4b424 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/service/CacheService.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.service; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.ICache; +import org.opensearch.common.cache.settings.CacheSettings; +import org.opensearch.common.cache.store.OpenSearchOnHeapCache; +import org.opensearch.common.cache.store.config.CacheConfig; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; + +import java.util.HashMap; +import java.util.Map; + +/** + * Service responsible to create caches. + */ +@ExperimentalApi +public class CacheService { + + private final Map<String, ICache.Factory> cacheStoreTypeFactories; + private final Settings settings; + private Map<CacheType, ICache<?, ?>> cacheTypeMap; + + public CacheService(Map<String, ICache.Factory> cacheStoreTypeFactories, Settings settings) { + this.cacheStoreTypeFactories = cacheStoreTypeFactories; + this.settings = settings; + this.cacheTypeMap = new HashMap<>(); + } + + public Map<CacheType, ICache<?, ?>> getCacheTypeMap() { + return this.cacheTypeMap; + } + + public <K, V> ICache<K, V> createCache(CacheConfig<K, V> config, CacheType cacheType) { + Setting<String> cacheSettingForCacheType = CacheSettings.CACHE_TYPE_STORE_NAME.getConcreteSettingForNamespace( + cacheType.getSettingPrefix() + ); + String storeName = cacheSettingForCacheType.get(settings); + if (!FeatureFlags.PLUGGABLE_CACHE_SETTING.get(settings) || (storeName == null || storeName.isBlank())) { + // Condition 1: In case feature flag is off, we default to onHeap. + // Condition 2: In case storeName is not explicitly mentioned, we assume user is looking to use older + // settings, so we again fallback to onHeap to maintain backward compatibility. + // It is guaranteed that we will have this store name registered, so + // should be safe. + storeName = OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME; + } + if (!cacheStoreTypeFactories.containsKey(storeName)) { + throw new IllegalArgumentException("No store name: [" + storeName + "] is registered for cache type: " + cacheType); + } + ICache.Factory factory = cacheStoreTypeFactories.get(storeName); + ICache<K, V> iCache = factory.create(config, cacheType, cacheStoreTypeFactories); + cacheTypeMap.put(cacheType, iCache); + return iCache; + } +} diff --git a/server/src/main/java/org/opensearch/common/cache/service/package-info.java b/server/src/main/java/org/opensearch/common/cache/service/package-info.java new file mode 100644 index 0000000000000..5fb87f7613627 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/service/package-info.java @@ -0,0 +1,9 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +/** Package related to cache service **/ +package org.opensearch.common.cache.service; diff --git a/server/src/main/java/org/opensearch/common/cache/settings/CacheSettings.java b/server/src/main/java/org/opensearch/common/cache/settings/CacheSettings.java new file mode 100644 index 0000000000000..43a047f0f22c6 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/settings/CacheSettings.java @@ -0,0 +1,34 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.settings; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.settings.Setting; + +/** + * Settings related to cache. + */ +@ExperimentalApi +public class CacheSettings { + + /** + * Used to store cache store name for desired cache types within OpenSearch. + * Setting pattern: {cache_type}.store.name + * Example: indices.request.cache.store.name + */ + public static final Setting.AffixSetting<String> CACHE_TYPE_STORE_NAME = Setting.suffixKeySetting( + "store.name", + (key) -> Setting.simpleString(key, "", Setting.Property.NodeScope) + ); + + public static Setting<String> getConcreteStoreNameSettingForCacheType(CacheType cacheType) { + return CACHE_TYPE_STORE_NAME.getConcreteSettingForNamespace(cacheType.getSettingPrefix()); + } +} diff --git a/server/src/main/java/org/opensearch/common/cache/settings/package-info.java b/server/src/main/java/org/opensearch/common/cache/settings/package-info.java new file mode 100644 index 0000000000000..7fa82021c5557 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/settings/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base package for cache settings */ +package org.opensearch.common.cache.settings; diff --git a/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java b/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java index c497c8dbb7ea9..c9bec4ba47def 100644 --- a/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java +++ b/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java @@ -10,12 +10,25 @@ import org.opensearch.common.cache.Cache; import org.opensearch.common.cache.CacheBuilder; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.ICache; import org.opensearch.common.cache.LoadAwareCacheLoader; import org.opensearch.common.cache.RemovalListener; import org.opensearch.common.cache.RemovalNotification; -import org.opensearch.common.cache.store.builders.StoreAwareCacheBuilder; -import org.opensearch.common.cache.store.enums.CacheStoreType; -import org.opensearch.common.cache.store.listeners.StoreAwareCacheEventListener; +import org.opensearch.common.cache.settings.CacheSettings; +import org.opensearch.common.cache.store.builders.ICacheBuilder; +import org.opensearch.common.cache.store.config.CacheConfig; +import org.opensearch.common.cache.store.settings.OpenSearchOnHeapCacheSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.common.unit.ByteSizeValue; + +import java.util.Map; + +import static org.opensearch.common.cache.store.settings.OpenSearchOnHeapCacheSettings.EXPIRE_AFTER_ACCESS_KEY; +import static org.opensearch.common.cache.store.settings.OpenSearchOnHeapCacheSettings.MAXIMUM_SIZE_IN_BYTES_KEY; /** * This variant of on-heap cache uses OpenSearch custom cache implementation. @@ -24,11 +37,10 @@ * * @opensearch.experimental */ -public class OpenSearchOnHeapCache<K, V> implements StoreAwareCache<K, V>, RemovalListener<K, V> { +public class OpenSearchOnHeapCache<K, V> implements ICache<K, V>, RemovalListener<K, V> { private final Cache<K, V> cache; - - private final StoreAwareCacheEventListener<K, V> eventListener; + private final RemovalListener<K, V> removalListener; public OpenSearchOnHeapCache(Builder<K, V> builder) { CacheBuilder<K, V> cacheBuilder = CacheBuilder.<K, V>builder() @@ -39,35 +51,23 @@ public OpenSearchOnHeapCache(Builder<K, V> builder) { cacheBuilder.setExpireAfterAccess(builder.getExpireAfterAcess()); } cache = cacheBuilder.build(); - this.eventListener = builder.getEventListener(); + this.removalListener = builder.getRemovalListener(); } @Override public V get(K key) { V value = cache.get(key); - if (value != null) { - eventListener.onHit(key, value, CacheStoreType.ON_HEAP); - } else { - eventListener.onMiss(key, CacheStoreType.ON_HEAP); - } return value; } @Override public void put(K key, V value) { cache.put(key, value); - eventListener.onCached(key, value, CacheStoreType.ON_HEAP); } @Override public V computeIfAbsent(K key, LoadAwareCacheLoader<K, V> loader) throws Exception { V value = cache.computeIfAbsent(key, key1 -> loader.load(key)); - if (!loader.isLoaded()) { - eventListener.onHit(key, value, CacheStoreType.ON_HEAP); - } else { - eventListener.onMiss(key, CacheStoreType.ON_HEAP); - eventListener.onCached(key, value, CacheStoreType.ON_HEAP); - } return value; } @@ -97,20 +97,46 @@ public void refresh() { } @Override - public CacheStoreType getTierType() { - return CacheStoreType.ON_HEAP; - } + public void close() {} @Override public void onRemoval(RemovalNotification<K, V> notification) { - eventListener.onRemoval( - new StoreAwareCacheRemovalNotification<>( - notification.getKey(), - notification.getValue(), - notification.getRemovalReason(), - CacheStoreType.ON_HEAP + this.removalListener.onRemoval(notification); + } + + /** + * Factory to create OpenSearchOnheap cache. + */ + public static class OpenSearchOnHeapCacheFactory implements Factory { + + public static final String NAME = "opensearch_onheap"; + + @Override + public <K, V> ICache<K, V> create(CacheConfig<K, V> config, CacheType cacheType, Map<String, Factory> cacheFactories) { + Map<String, Setting<?>> settingList = OpenSearchOnHeapCacheSettings.getSettingListForCacheType(cacheType); + Settings settings = config.getSettings(); + ICacheBuilder<K, V> builder = new Builder<K, V>().setMaximumWeightInBytes( + ((ByteSizeValue) settingList.get(MAXIMUM_SIZE_IN_BYTES_KEY).get(settings)).getBytes() ) - ); + .setExpireAfterAccess(((TimeValue) settingList.get(EXPIRE_AFTER_ACCESS_KEY).get(settings))) + .setWeigher(config.getWeigher()) + .setRemovalListener(config.getRemovalListener()); + Setting<String> cacheSettingForCacheType = CacheSettings.CACHE_TYPE_STORE_NAME.getConcreteSettingForNamespace( + cacheType.getSettingPrefix() + ); + String storeName = cacheSettingForCacheType.get(settings); + if (!FeatureFlags.PLUGGABLE_CACHE_SETTING.get(settings) || (storeName == null || storeName.isBlank())) { + // For backward compatibility as the user intent is to use older settings. + builder.setMaximumWeightInBytes(config.getMaxSizeInBytes()); + builder.setExpireAfterAccess(config.getExpireAfterAccess()); + } + return builder.build(); + } + + @Override + public String getCacheName() { + return NAME; + } } /** @@ -118,10 +144,10 @@ public void onRemoval(RemovalNotification<K, V> notification) { * @param <K> Type of key * @param <V> Type of value */ - public static class Builder<K, V> extends StoreAwareCacheBuilder<K, V> { + public static class Builder<K, V> extends ICacheBuilder<K, V> { @Override - public StoreAwareCache<K, V> build() { + public ICache<K, V> build() { return new OpenSearchOnHeapCache<K, V>(this); } } diff --git a/server/src/main/java/org/opensearch/common/cache/store/StoreAwareCache.java b/server/src/main/java/org/opensearch/common/cache/store/StoreAwareCache.java deleted file mode 100644 index 45ca48d94c140..0000000000000 --- a/server/src/main/java/org/opensearch/common/cache/store/StoreAwareCache.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.common.cache.store; - -import org.opensearch.common.cache.ICache; -import org.opensearch.common.cache.store.enums.CacheStoreType; - -/** - * Represents a cache with a specific type of store like onHeap, disk etc. - * @param <K> Type of key. - * @param <V> Type of value. - * - * @opensearch.experimental - */ -public interface StoreAwareCache<K, V> extends ICache<K, V> { - CacheStoreType getTierType(); -} diff --git a/server/src/main/java/org/opensearch/common/cache/store/StoreAwareCacheRemovalNotification.java b/server/src/main/java/org/opensearch/common/cache/store/StoreAwareCacheRemovalNotification.java deleted file mode 100644 index 492dbff3532a1..0000000000000 --- a/server/src/main/java/org/opensearch/common/cache/store/StoreAwareCacheRemovalNotification.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.common.cache.store; - -import org.opensearch.common.cache.RemovalNotification; -import org.opensearch.common.cache.RemovalReason; -import org.opensearch.common.cache.store.enums.CacheStoreType; - -/** - * Removal notification for store aware cache. - * @param <K> Type of key. - * @param <V> Type of value. - * - * @opensearch.internal - */ -public class StoreAwareCacheRemovalNotification<K, V> extends RemovalNotification<K, V> { - private final CacheStoreType cacheStoreType; - - public StoreAwareCacheRemovalNotification(K key, V value, RemovalReason removalReason, CacheStoreType cacheStoreType) { - super(key, value, removalReason); - this.cacheStoreType = cacheStoreType; - } - - public CacheStoreType getCacheStoreType() { - return cacheStoreType; - } -} diff --git a/server/src/main/java/org/opensearch/common/cache/store/StoreAwareCacheValue.java b/server/src/main/java/org/opensearch/common/cache/store/StoreAwareCacheValue.java deleted file mode 100644 index 4fbbbbfebfaa7..0000000000000 --- a/server/src/main/java/org/opensearch/common/cache/store/StoreAwareCacheValue.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.common.cache.store; - -import org.opensearch.common.cache.store.enums.CacheStoreType; - -/** - * Represents a store aware cache value. - * @param <V> Type of value. - * - * @opensearch.internal - */ -public class StoreAwareCacheValue<V> { - private final V value; - private final CacheStoreType source; - - public StoreAwareCacheValue(V value, CacheStoreType source) { - this.value = value; - this.source = source; - } - - public V getValue() { - return value; - } - - public CacheStoreType getCacheStoreType() { - return source; - } -} diff --git a/server/src/main/java/org/opensearch/common/cache/store/builders/ICacheBuilder.java b/server/src/main/java/org/opensearch/common/cache/store/builders/ICacheBuilder.java new file mode 100644 index 0000000000000..7ca9080ec1aa6 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/store/builders/ICacheBuilder.java @@ -0,0 +1,87 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.store.builders; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.cache.ICache; +import org.opensearch.common.cache.RemovalListener; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; + +import java.util.function.ToLongBiFunction; + +/** + * Builder for store aware cache. + * @param <K> Type of key. + * @param <V> Type of value. + * + * @opensearch.experimental + */ +@ExperimentalApi +public abstract class ICacheBuilder<K, V> { + + private long maxWeightInBytes; + + private ToLongBiFunction<K, V> weigher; + + private TimeValue expireAfterAcess; + + private Settings settings; + + private RemovalListener<K, V> removalListener; + + public ICacheBuilder() {} + + public ICacheBuilder<K, V> setMaximumWeightInBytes(long sizeInBytes) { + this.maxWeightInBytes = sizeInBytes; + return this; + } + + public ICacheBuilder<K, V> setWeigher(ToLongBiFunction<K, V> weigher) { + this.weigher = weigher; + return this; + } + + public ICacheBuilder<K, V> setExpireAfterAccess(TimeValue expireAfterAcess) { + this.expireAfterAcess = expireAfterAcess; + return this; + } + + public ICacheBuilder<K, V> setSettings(Settings settings) { + this.settings = settings; + return this; + } + + public ICacheBuilder<K, V> setRemovalListener(RemovalListener<K, V> removalListener) { + this.removalListener = removalListener; + return this; + } + + public long getMaxWeightInBytes() { + return maxWeightInBytes; + } + + public TimeValue getExpireAfterAcess() { + return expireAfterAcess; + } + + public ToLongBiFunction<K, V> getWeigher() { + return weigher; + } + + public RemovalListener<K, V> getRemovalListener() { + return this.removalListener; + } + + public Settings getSettings() { + return settings; + } + + public abstract ICache<K, V> build(); +} diff --git a/server/src/main/java/org/opensearch/common/cache/store/builders/StoreAwareCacheBuilder.java b/server/src/main/java/org/opensearch/common/cache/store/builders/StoreAwareCacheBuilder.java deleted file mode 100644 index fc5aa48aae90f..0000000000000 --- a/server/src/main/java/org/opensearch/common/cache/store/builders/StoreAwareCacheBuilder.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.common.cache.store.builders; - -import org.opensearch.common.cache.store.StoreAwareCache; -import org.opensearch.common.cache.store.listeners.StoreAwareCacheEventListener; -import org.opensearch.common.unit.TimeValue; - -import java.util.function.ToLongBiFunction; - -/** - * Builder for store aware cache. - * @param <K> Type of key. - * @param <V> Type of value. - * - * @opensearch.internal - */ -public abstract class StoreAwareCacheBuilder<K, V> { - - private long maxWeightInBytes; - - private ToLongBiFunction<K, V> weigher; - - private TimeValue expireAfterAcess; - - private StoreAwareCacheEventListener<K, V> eventListener; - - public StoreAwareCacheBuilder() {} - - public StoreAwareCacheBuilder<K, V> setMaximumWeightInBytes(long sizeInBytes) { - this.maxWeightInBytes = sizeInBytes; - return this; - } - - public StoreAwareCacheBuilder<K, V> setWeigher(ToLongBiFunction<K, V> weigher) { - this.weigher = weigher; - return this; - } - - public StoreAwareCacheBuilder<K, V> setExpireAfterAccess(TimeValue expireAfterAcess) { - this.expireAfterAcess = expireAfterAcess; - return this; - } - - public StoreAwareCacheBuilder<K, V> setEventListener(StoreAwareCacheEventListener<K, V> eventListener) { - this.eventListener = eventListener; - return this; - } - - public long getMaxWeightInBytes() { - return maxWeightInBytes; - } - - public TimeValue getExpireAfterAcess() { - return expireAfterAcess; - } - - public ToLongBiFunction<K, V> getWeigher() { - return weigher; - } - - public StoreAwareCacheEventListener<K, V> getEventListener() { - return eventListener; - } - - public abstract StoreAwareCache<K, V> build(); -} diff --git a/server/src/main/java/org/opensearch/common/cache/store/config/CacheConfig.java b/server/src/main/java/org/opensearch/common/cache/store/config/CacheConfig.java new file mode 100644 index 0000000000000..4c9881e845d42 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/store/config/CacheConfig.java @@ -0,0 +1,198 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.store.config; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.cache.RemovalListener; +import org.opensearch.common.cache.policy.CachedQueryResult; +import org.opensearch.common.cache.serializer.Serializer; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; + +import java.util.function.Function; +import java.util.function.ToLongBiFunction; + +/** + * Common configurations related to store aware caches. + * + * @opensearch.experimental + */ +@ExperimentalApi +public class CacheConfig<K, V> { + + private final Settings settings; + + /** + * Defines the key type. + */ + private final Class<K> keyType; + + /** + * Defines the value type. + */ + private final Class<V> valueType; + + /** + * Represents a function that calculates the size or weight of a key-value pair. + */ + private final ToLongBiFunction<K, V> weigher; + + private final RemovalListener<K, V> removalListener; + + // Serializers for keys and values. Not required for all caches. + private final Serializer<K, ?> keySerializer; + private final Serializer<V, ?> valueSerializer; + + /** A function which extracts policy-relevant information, such as took time, from values, to allow inspection by policies if present. */ + private Function<V, CachedQueryResult.PolicyValues> cachedResultParser; + /** + * Max size in bytes for the cache. This is needed for backward compatibility. + */ + private final long maxSizeInBytes; + + /** + * Defines the expiration time for a cache entry. This is needed for backward compatibility. + */ + private final TimeValue expireAfterAccess; + + private CacheConfig(Builder<K, V> builder) { + this.keyType = builder.keyType; + this.valueType = builder.valueType; + this.settings = builder.settings; + this.removalListener = builder.removalListener; + this.weigher = builder.weigher; + this.keySerializer = builder.keySerializer; + this.valueSerializer = builder.valueSerializer; + this.cachedResultParser = builder.cachedResultParser; + this.maxSizeInBytes = builder.maxSizeInBytes; + this.expireAfterAccess = builder.expireAfterAccess; + } + + public Class<K> getKeyType() { + return keyType; + } + + public Class<V> getValueType() { + return valueType; + } + + public Settings getSettings() { + return settings; + } + + public RemovalListener<K, V> getRemovalListener() { + return removalListener; + } + + public Serializer<K, ?> getKeySerializer() { + return keySerializer; + } + + public Serializer<V, ?> getValueSerializer() { + return valueSerializer; + } + + public ToLongBiFunction<K, V> getWeigher() { + return weigher; + } + + public Function<V, CachedQueryResult.PolicyValues> getCachedResultParser() { + return cachedResultParser; + } + + public Long getMaxSizeInBytes() { + return maxSizeInBytes; + } + + public TimeValue getExpireAfterAccess() { + return expireAfterAccess; + } + + /** + * Builder class to build Cache config related parameters. + * @param <K> Type of key. + * @param <V> Type of value. + */ + public static class Builder<K, V> { + + private Settings settings; + + private Class<K> keyType; + + private Class<V> valueType; + + private RemovalListener<K, V> removalListener; + + private Serializer<K, ?> keySerializer; + private Serializer<V, ?> valueSerializer; + + private ToLongBiFunction<K, V> weigher; + private Function<V, CachedQueryResult.PolicyValues> cachedResultParser; + + private long maxSizeInBytes; + + private TimeValue expireAfterAccess; + + public Builder() {} + + public Builder<K, V> setSettings(Settings settings) { + this.settings = settings; + return this; + } + + public Builder<K, V> setKeyType(Class<K> keyType) { + this.keyType = keyType; + return this; + } + + public Builder<K, V> setValueType(Class<V> valueType) { + this.valueType = valueType; + return this; + } + + public Builder<K, V> setRemovalListener(RemovalListener<K, V> removalListener) { + this.removalListener = removalListener; + return this; + } + + public Builder<K, V> setKeySerializer(Serializer<K, ?> keySerializer) { + this.keySerializer = keySerializer; + return this; + } + + public Builder<K, V> setValueSerializer(Serializer<V, ?> valueSerializer) { + this.valueSerializer = valueSerializer; + return this; + } + + public Builder<K, V> setWeigher(ToLongBiFunction<K, V> weigher) { + this.weigher = weigher; + return this; + } + + public Builder<K, V> setCachedResultParser(Function<V, CachedQueryResult.PolicyValues> function) { + this.cachedResultParser = function; + return this; + } + + public Builder<K, V> setMaxSizeInBytes(long sizeInBytes) { + this.maxSizeInBytes = sizeInBytes; + return this; + } + + public Builder<K, V> setExpireAfterAccess(TimeValue expireAfterAccess) { + this.expireAfterAccess = expireAfterAccess; + return this; + } + + public CacheConfig<K, V> build() { + return new CacheConfig<>(this); + } + } +} diff --git a/server/src/main/java/org/opensearch/common/cache/store/config/package-info.java b/server/src/main/java/org/opensearch/common/cache/store/config/package-info.java new file mode 100644 index 0000000000000..6b662a8af3f9d --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/store/config/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base package for store aware cache config */ +package org.opensearch.common.cache.store.config; diff --git a/server/src/main/java/org/opensearch/common/cache/store/listeners/StoreAwareCacheEventListener.java b/server/src/main/java/org/opensearch/common/cache/store/listeners/StoreAwareCacheEventListener.java deleted file mode 100644 index 6d7e4b39aaf9f..0000000000000 --- a/server/src/main/java/org/opensearch/common/cache/store/listeners/StoreAwareCacheEventListener.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.common.cache.store.listeners; - -import org.opensearch.common.cache.store.StoreAwareCacheRemovalNotification; -import org.opensearch.common.cache.store.enums.CacheStoreType; - -/** - * This can be used to listen to tiered caching events - * @param <K> Type of key - * @param <V> Type of value - * - * @opensearch.internal - */ -public interface StoreAwareCacheEventListener<K, V> { - - void onMiss(K key, CacheStoreType cacheStoreType); - - void onRemoval(StoreAwareCacheRemovalNotification<K, V> notification); - - void onHit(K key, V value, CacheStoreType cacheStoreType); - - void onCached(K key, V value, CacheStoreType cacheStoreType); -} diff --git a/server/src/main/java/org/opensearch/common/cache/store/settings/OpenSearchOnHeapCacheSettings.java b/server/src/main/java/org/opensearch/common/cache/store/settings/OpenSearchOnHeapCacheSettings.java new file mode 100644 index 0000000000000..5a2964ad011bf --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/store/settings/OpenSearchOnHeapCacheSettings.java @@ -0,0 +1,84 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.store.settings; + +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.store.OpenSearchOnHeapCache; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.unit.ByteSizeValue; + +import java.util.HashMap; +import java.util.Map; + +import static org.opensearch.common.settings.Setting.Property.NodeScope; + +/** + * Settings for OpenSearchOnHeap + */ +public class OpenSearchOnHeapCacheSettings { + + /** + * Setting to define maximum size for the cache as a percentage of heap memory available. + * + * Setting pattern: {cache_type}.opensearch_onheap.size + */ + public static final Setting.AffixSetting<ByteSizeValue> MAXIMUM_SIZE_IN_BYTES = Setting.suffixKeySetting( + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME + ".size", + (key) -> Setting.memorySizeSetting(key, "1%", NodeScope) + ); + + /** + * Setting to define expire after access. + * + * Setting pattern: {cache_type}.opensearch_onheap.expire + */ + public static final Setting.AffixSetting<TimeValue> EXPIRE_AFTER_ACCESS_SETTING = Setting.suffixKeySetting( + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME + ".expire", + (key) -> Setting.positiveTimeSetting(key, TimeValue.MAX_VALUE, Setting.Property.NodeScope) + ); + + public static final String MAXIMUM_SIZE_IN_BYTES_KEY = "maximum_size_in_bytes"; + public static final String EXPIRE_AFTER_ACCESS_KEY = "expire_after_access"; + + private static final Map<String, Setting.AffixSetting<?>> KEY_SETTING_MAP = Map.of( + MAXIMUM_SIZE_IN_BYTES_KEY, + MAXIMUM_SIZE_IN_BYTES, + EXPIRE_AFTER_ACCESS_KEY, + EXPIRE_AFTER_ACCESS_SETTING + ); + + public static final Map<CacheType, Map<String, Setting<?>>> CACHE_TYPE_MAP = getCacheTypeMap(); + + private static Map<CacheType, Map<String, Setting<?>>> getCacheTypeMap() { + Map<CacheType, Map<String, Setting<?>>> cacheTypeMap = new HashMap<>(); + for (CacheType cacheType : CacheType.values()) { + Map<String, Setting<?>> settingMap = new HashMap<>(); + for (Map.Entry<String, Setting.AffixSetting<?>> entry : KEY_SETTING_MAP.entrySet()) { + settingMap.put(entry.getKey(), entry.getValue().getConcreteSettingForNamespace(cacheType.getSettingPrefix())); + } + cacheTypeMap.put(cacheType, settingMap); + } + return cacheTypeMap; + } + + public static Map<String, Setting<?>> getSettingListForCacheType(CacheType cacheType) { + Map<String, Setting<?>> cacheTypeSettings = CACHE_TYPE_MAP.get(cacheType); + if (cacheTypeSettings == null) { + throw new IllegalArgumentException( + "No settings exist for cache store name: " + + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME + + "associated with " + + "cache type: " + + cacheType + ); + } + return cacheTypeSettings; + } +} diff --git a/server/src/main/java/org/opensearch/common/cache/store/settings/package-info.java b/server/src/main/java/org/opensearch/common/cache/store/settings/package-info.java new file mode 100644 index 0000000000000..91613876a5f31 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/store/settings/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base package for cache setting **/ +package org.opensearch.common.cache.store.settings; diff --git a/server/src/main/java/org/opensearch/common/cache/tier/TieredSpilloverCache.java b/server/src/main/java/org/opensearch/common/cache/tier/TieredSpilloverCache.java deleted file mode 100644 index 8b432c9484aed..0000000000000 --- a/server/src/main/java/org/opensearch/common/cache/tier/TieredSpilloverCache.java +++ /dev/null @@ -1,268 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.common.cache.tier; - -import org.opensearch.common.cache.ICache; -import org.opensearch.common.cache.LoadAwareCacheLoader; -import org.opensearch.common.cache.RemovalReason; -import org.opensearch.common.cache.store.StoreAwareCache; -import org.opensearch.common.cache.store.StoreAwareCacheRemovalNotification; -import org.opensearch.common.cache.store.StoreAwareCacheValue; -import org.opensearch.common.cache.store.builders.StoreAwareCacheBuilder; -import org.opensearch.common.cache.store.enums.CacheStoreType; -import org.opensearch.common.cache.store.listeners.StoreAwareCacheEventListener; -import org.opensearch.common.util.concurrent.ReleasableLock; -import org.opensearch.common.util.iterable.Iterables; - -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Objects; -import java.util.Optional; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.function.Function; - -/** - * This cache spillover the evicted items from heap tier to disk tier. All the new items are first cached on heap - * and the items evicted from on heap cache are moved to disk based cache. If disk based cache also gets full, - * then items are eventually evicted from it and removed which will result in cache miss. - * - * @param <K> Type of key - * @param <V> Type of value - * - * @opensearch.experimental - */ -public class TieredSpilloverCache<K, V> implements ICache<K, V>, StoreAwareCacheEventListener<K, V> { - - // TODO: Remove optional when diskCache implementation is integrated. - private final Optional<StoreAwareCache<K, V>> onDiskCache; - private final StoreAwareCache<K, V> onHeapCache; - private final StoreAwareCacheEventListener<K, V> listener; - ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); - ReleasableLock readLock = new ReleasableLock(readWriteLock.readLock()); - ReleasableLock writeLock = new ReleasableLock(readWriteLock.writeLock()); - - /** - * Maintains caching tiers in ascending order of cache latency. - */ - private final List<StoreAwareCache<K, V>> cacheList; - - TieredSpilloverCache(Builder<K, V> builder) { - Objects.requireNonNull(builder.onHeapCacheBuilder, "onHeap cache builder can't be null"); - this.onHeapCache = builder.onHeapCacheBuilder.setEventListener(this).build(); - if (builder.onDiskCacheBuilder != null) { - this.onDiskCache = Optional.of(builder.onDiskCacheBuilder.setEventListener(this).build()); - } else { - this.onDiskCache = Optional.empty(); - } - this.listener = builder.listener; - this.cacheList = this.onDiskCache.map(diskTier -> Arrays.asList(this.onHeapCache, diskTier)).orElse(List.of(this.onHeapCache)); - } - - // Package private for testing - StoreAwareCache<K, V> getOnHeapCache() { - return onHeapCache; - } - - // Package private for testing - Optional<StoreAwareCache<K, V>> getOnDiskCache() { - return onDiskCache; - } - - @Override - public V get(K key) { - StoreAwareCacheValue<V> cacheValue = getValueFromTieredCache(true).apply(key); - if (cacheValue == null) { - return null; - } - return cacheValue.getValue(); - } - - @Override - public void put(K key, V value) { - try (ReleasableLock ignore = writeLock.acquire()) { - onHeapCache.put(key, value); - listener.onCached(key, value, CacheStoreType.ON_HEAP); - } - } - - @Override - public V computeIfAbsent(K key, LoadAwareCacheLoader<K, V> loader) throws Exception { - // We are skipping calling event listeners at this step as we do another get inside below computeIfAbsent. - // Where we might end up calling onMiss twice for a key not present in onHeap cache. - // Similary we might end up calling both onMiss and onHit for a key, in case we are receiving concurrent - // requests for the same key which requires loading only once. - StoreAwareCacheValue<V> cacheValue = getValueFromTieredCache(false).apply(key); - if (cacheValue == null) { - // Add the value to the onHeap cache. We are calling computeIfAbsent which does another get inside. - // This is needed as there can be many requests for the same key at the same time and we only want to load - // the value once. - V value = null; - try (ReleasableLock ignore = writeLock.acquire()) { - value = onHeapCache.computeIfAbsent(key, loader); - } - if (loader.isLoaded()) { - listener.onMiss(key, CacheStoreType.ON_HEAP); - onDiskCache.ifPresent(diskTier -> listener.onMiss(key, CacheStoreType.DISK)); - listener.onCached(key, value, CacheStoreType.ON_HEAP); - } else { - listener.onHit(key, value, CacheStoreType.ON_HEAP); - } - return value; - } - listener.onHit(key, cacheValue.getValue(), cacheValue.getCacheStoreType()); - if (cacheValue.getCacheStoreType().equals(CacheStoreType.DISK)) { - listener.onMiss(key, CacheStoreType.ON_HEAP); - } - return cacheValue.getValue(); - } - - @Override - public void invalidate(K key) { - // We are trying to invalidate the key from all caches though it would be present in only of them. - // Doing this as we don't know where it is located. We could do a get from both and check that, but what will - // also trigger a hit/miss listener event, so ignoring it for now. - try (ReleasableLock ignore = writeLock.acquire()) { - for (StoreAwareCache<K, V> storeAwareCache : cacheList) { - storeAwareCache.invalidate(key); - } - } - } - - @Override - public void invalidateAll() { - try (ReleasableLock ignore = writeLock.acquire()) { - for (StoreAwareCache<K, V> storeAwareCache : cacheList) { - storeAwareCache.invalidateAll(); - } - } - } - - /** - * Provides an iteration over both onHeap and disk keys. This is not protected from any mutations to the cache. - * @return An iterable over (onHeap + disk) keys - */ - @Override - public Iterable<K> keys() { - Iterable<K> onDiskKeysIterable; - if (onDiskCache.isPresent()) { - onDiskKeysIterable = onDiskCache.get().keys(); - } else { - onDiskKeysIterable = Collections::emptyIterator; - } - return Iterables.concat(onHeapCache.keys(), onDiskKeysIterable); - } - - @Override - public long count() { - long totalCount = 0; - for (StoreAwareCache<K, V> storeAwareCache : cacheList) { - totalCount += storeAwareCache.count(); - } - return totalCount; - } - - @Override - public void refresh() { - try (ReleasableLock ignore = writeLock.acquire()) { - for (StoreAwareCache<K, V> storeAwareCache : cacheList) { - storeAwareCache.refresh(); - } - } - } - - @Override - public void onMiss(K key, CacheStoreType cacheStoreType) { - // Misses for tiered cache are tracked here itself. - } - - @Override - public void onRemoval(StoreAwareCacheRemovalNotification<K, V> notification) { - if (RemovalReason.EVICTED.equals(notification.getRemovalReason()) - || RemovalReason.CAPACITY.equals(notification.getRemovalReason())) { - switch (notification.getCacheStoreType()) { - case ON_HEAP: - try (ReleasableLock ignore = writeLock.acquire()) { - onDiskCache.ifPresent(diskTier -> { diskTier.put(notification.getKey(), notification.getValue()); }); - } - onDiskCache.ifPresent( - diskTier -> listener.onCached(notification.getKey(), notification.getValue(), CacheStoreType.DISK) - ); - break; - default: - break; - } - } - listener.onRemoval(notification); - } - - @Override - public void onHit(K key, V value, CacheStoreType cacheStoreType) { - // Hits for tiered cache are tracked here itself. - } - - @Override - public void onCached(K key, V value, CacheStoreType cacheStoreType) { - // onCached events for tiered cache are tracked here itself. - } - - private Function<K, StoreAwareCacheValue<V>> getValueFromTieredCache(boolean triggerEventListener) { - return key -> { - try (ReleasableLock ignore = readLock.acquire()) { - for (StoreAwareCache<K, V> storeAwareCache : cacheList) { - V value = storeAwareCache.get(key); - if (value != null) { - if (triggerEventListener) { - listener.onHit(key, value, storeAwareCache.getTierType()); - } - return new StoreAwareCacheValue<>(value, storeAwareCache.getTierType()); - } else { - if (triggerEventListener) { - listener.onMiss(key, storeAwareCache.getTierType()); - } - } - } - } - return null; - }; - } - - /** - * Builder object for tiered spillover cache. - * @param <K> Type of key - * @param <V> Type of value - */ - public static class Builder<K, V> { - private StoreAwareCacheBuilder<K, V> onHeapCacheBuilder; - private StoreAwareCacheBuilder<K, V> onDiskCacheBuilder; - private StoreAwareCacheEventListener<K, V> listener; - - public Builder() {} - - public Builder<K, V> setOnHeapCacheBuilder(StoreAwareCacheBuilder<K, V> onHeapCacheBuilder) { - this.onHeapCacheBuilder = onHeapCacheBuilder; - return this; - } - - public Builder<K, V> setOnDiskCacheBuilder(StoreAwareCacheBuilder<K, V> onDiskCacheBuilder) { - this.onDiskCacheBuilder = onDiskCacheBuilder; - return this; - } - - public Builder<K, V> setListener(StoreAwareCacheEventListener<K, V> listener) { - this.listener = listener; - return this; - } - - public TieredSpilloverCache<K, V> build() { - return new TieredSpilloverCache<>(this); - } - } -} diff --git a/server/src/main/java/org/opensearch/common/lucene/search/function/ScriptScoreQuery.java b/server/src/main/java/org/opensearch/common/lucene/search/function/ScriptScoreQuery.java index 90350c0a21a42..5aff09d715622 100644 --- a/server/src/main/java/org/opensearch/common/lucene/search/function/ScriptScoreQuery.java +++ b/server/src/main/java/org/opensearch/common/lucene/search/function/ScriptScoreQuery.java @@ -45,6 +45,7 @@ import org.apache.lucene.search.Scorable; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.TwoPhaseIterator; import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; import org.opensearch.Version; @@ -302,6 +303,11 @@ public DocIdSetIterator iterator() { return subQueryScorer.iterator(); } + @Override + public TwoPhaseIterator twoPhaseIterator() { + return subQueryScorer.twoPhaseIterator(); + } + @Override public float getMaxScore(int upTo) { return Float.MAX_VALUE; // TODO: what would be a good upper bound? diff --git a/server/src/main/java/org/opensearch/common/lucene/store/ByteArrayIndexInput.java b/server/src/main/java/org/opensearch/common/lucene/store/ByteArrayIndexInput.java index bb273b14c42e2..1804a9ac05a29 100644 --- a/server/src/main/java/org/opensearch/common/lucene/store/ByteArrayIndexInput.java +++ b/server/src/main/java/org/opensearch/common/lucene/store/ByteArrayIndexInput.java @@ -144,7 +144,7 @@ public long readLong(long pos) throws IOException { } private void validatePos(long pos, int len) throws EOFException { - if (pos < 0 || pos + len > length + offset) { + if (pos < 0 || pos + len > length) { throw new EOFException("seek past EOF"); } } diff --git a/server/src/main/java/org/opensearch/common/path/PathTrie.java b/server/src/main/java/org/opensearch/common/path/PathTrie.java index 7cb7b46acfafe..0b516fa037c48 100644 --- a/server/src/main/java/org/opensearch/common/path/PathTrie.java +++ b/server/src/main/java/org/opensearch/common/path/PathTrie.java @@ -37,6 +37,7 @@ import java.util.Iterator; import java.util.Map; import java.util.NoSuchElementException; +import java.util.Stack; import java.util.function.BiFunction; import java.util.function.Supplier; @@ -405,4 +406,45 @@ public T next() { } }; } + + public Iterator<T> retrieveAll() { + Stack<TrieNode> stack = new Stack<>(); + stack.add(root); + + return new Iterator<T>() { + @Override + public boolean hasNext() { + while (!stack.empty()) { + TrieNode node = stack.peek(); + + if (node.value != null) { + return true; + } + + advance(); + } + + return false; + } + + @Override + public T next() { + while (!stack.empty()) { + TrieNode node = advance(); + + if (node.value != null) { + return node.value; + } + } + + throw new NoSuchElementException("called next() without validating hasNext()! no more nodes available"); + } + + private TrieNode advance() { + TrieNode node = stack.pop(); + stack.addAll(node.children.values()); + return node; + } + }; + } } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 277286ae1ff1b..6b4be45929553 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -81,6 +81,8 @@ import org.opensearch.cluster.service.ClusterManagerTaskThrottler; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.settings.CacheSettings; import org.opensearch.common.logging.Loggers; import org.opensearch.common.network.NetworkModule; import org.opensearch.common.network.NetworkService; @@ -138,6 +140,7 @@ import org.opensearch.plugins.PluginsService; import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlSettings; import org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings; +import org.opensearch.ratelimitting.admissioncontrol.settings.IoBasedAdmissionControllerSettings; import org.opensearch.repositories.fs.FsRepository; import org.opensearch.rest.BaseRestHandler; import org.opensearch.script.ScriptService; @@ -293,6 +296,7 @@ public void apply(Settings value, Settings current, Settings previous) { RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_FILE_CHUNKS_SETTING, RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_OPERATIONS_SETTING, RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_REMOTE_STORE_STREAMS_SETTING, + RecoverySettings.INDICES_INTERNAL_REMOTE_UPLOAD_TIMEOUT, RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_REPLICAS_RECOVERIES_SETTING, @@ -666,6 +670,7 @@ public void apply(Settings value, Settings current, Settings previous) { // Settings related to resource trackers ResourceTrackerSettings.GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING, ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING, + ResourceTrackerSettings.GLOBAL_IO_USAGE_AC_WINDOW_DURATION_SETTING, // Settings related to Searchable Snapshots Node.NODE_SEARCH_CACHE_SIZE_SETTING, @@ -697,13 +702,20 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteClusterStateService.GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING, RemoteClusterStateService.METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING, RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING, + RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING, IndicesService.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING, AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE, CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE, CpuBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT, CpuBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT, - IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING + IoBasedAdmissionControllerSettings.IO_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE, + IoBasedAdmissionControllerSettings.SEARCH_IO_USAGE_LIMIT, + IoBasedAdmissionControllerSettings.INDEXING_IO_USAGE_LIMIT, + IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING, + // Concurrent segment search settings + SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING, + SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING ) ) ); @@ -716,11 +728,6 @@ public void apply(Settings value, Settings current, Settings previous) { * setting should be moved to {@link #BUILT_IN_CLUSTER_SETTINGS}. */ public static final Map<List<String>, List<Setting>> FEATURE_FLAGGED_CLUSTER_SETTINGS = Map.of( - List.of(FeatureFlags.CONCURRENT_SEGMENT_SEARCH), - List.of( - SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING, - SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING - ), List.of(FeatureFlags.TELEMETRY), List.of( TelemetrySettings.TRACER_ENABLED_SETTING, @@ -728,6 +735,8 @@ public void apply(Settings value, Settings current, Settings previous) { TelemetrySettings.METRICS_PUBLISH_INTERVAL_SETTING, TelemetrySettings.TRACER_FEATURE_ENABLED_SETTING, TelemetrySettings.METRICS_FEATURE_ENABLED_SETTING - ) + ), + List.of(FeatureFlags.PLUGGABLE_CACHE), + List.of(CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE)) ); } diff --git a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java index e19f8e8370d5b..985eb40711e16 100644 --- a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java @@ -32,9 +32,10 @@ protected FeatureFlagSettings( public static final Set<Setting<?>> BUILT_IN_FEATURE_FLAGS = Set.of( FeatureFlags.EXTENSIONS_SETTING, FeatureFlags.IDENTITY_SETTING, - FeatureFlags.CONCURRENT_SEGMENT_SEARCH_SETTING, FeatureFlags.TELEMETRY_SETTING, FeatureFlags.DATETIME_FORMATTER_CACHING_SETTING, - FeatureFlags.WRITEABLE_REMOTE_INDEX_SETTING + FeatureFlags.WRITEABLE_REMOTE_INDEX_SETTING, + FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING, + FeatureFlags.PLUGGABLE_CACHE_SETTING ); } diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index b34a2aaffe408..49bb3abf1decd 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -41,7 +41,6 @@ import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.logging.Loggers; import org.opensearch.common.settings.Setting.Property; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; import org.opensearch.index.IndexSortConfig; @@ -150,6 +149,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.MAX_ADJACENCY_MATRIX_FILTERS_SETTING, IndexSettings.MAX_ANALYZED_OFFSET_SETTING, IndexSettings.MAX_TERMS_COUNT_SETTING, + IndexSettings.MAX_NESTED_QUERY_DEPTH_SETTING, IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING, IndexSettings.DEFAULT_FIELD_SETTING, IndexSettings.QUERY_STRING_LENIENT_SETTING, @@ -230,6 +230,12 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexMetadata.INDEX_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING, IndexMetadata.INDEX_REMOTE_TRANSLOG_REPOSITORY_SETTING, + IndexSettings.INDEX_DOC_ID_FUZZY_SET_ENABLED_SETTING, + IndexSettings.INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING, + + // Settings for concurrent segment search + IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING, + // validate that built-in similarities don't get redefined Setting.groupSetting("index.similarity.", (s) -> { Map<String, Settings> groups = s.getAsGroups(); @@ -252,10 +258,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { * is ready for production release, the feature flag can be removed, and the * setting should be moved to {@link #BUILT_IN_INDEX_SETTINGS}. */ - public static final Map<String, List<Setting>> FEATURE_FLAGGED_INDEX_SETTINGS = Map.of( - FeatureFlags.CONCURRENT_SEGMENT_SEARCH, - List.of(IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING) - ); + public static final Map<String, List<Setting>> FEATURE_FLAGGED_INDEX_SETTINGS = Map.of(); public static final IndexScopedSettings DEFAULT_SCOPED_SETTINGS = new IndexScopedSettings(Settings.EMPTY, BUILT_IN_INDEX_SETTINGS); diff --git a/server/src/main/java/org/opensearch/common/settings/Setting.java b/server/src/main/java/org/opensearch/common/settings/Setting.java index 0e96edff0681c..fea4c165809ba 100644 --- a/server/src/main/java/org/opensearch/common/settings/Setting.java +++ b/server/src/main/java/org/opensearch/common/settings/Setting.java @@ -978,6 +978,9 @@ private Setting<T> getConcreteSetting(String namespace, String key) { * Get a setting with the given namespace filled in for prefix and suffix. */ public Setting<T> getConcreteSettingForNamespace(String namespace) { + if (namespace == null) { + throw new IllegalArgumentException("Namespace should not be null"); + } String fullKey = key.toConcreteKey(namespace).toString(); return getConcreteSetting(namespace, fullKey); } @@ -2804,6 +2807,12 @@ public static <T> AffixSetting<T> prefixKeySetting(String prefix, Function<Strin return affixKeySetting(new AffixKey(prefix), delegateFactoryWithNamespace); } + public static <T> AffixSetting<T> suffixKeySetting(String suffix, Function<String, Setting<T>> delegateFactory) { + BiFunction<String, String, Setting<T>> delegateFactoryWithNamespace = (ns, k) -> delegateFactory.apply(k); + AffixKey affixKey = new AffixKey(null, suffix); + return affixKeySetting(affixKey, delegateFactoryWithNamespace); + } + /** * This setting type allows to validate settings that have the same type and a common prefix and suffix. For instance * storage.${backend}.enable=[true|false] can easily be added with this setting. Yet, affix key settings don't support updaters @@ -2943,12 +2952,14 @@ public static final class AffixKey implements Key { assert prefix != null || suffix != null : "Either prefix or suffix must be non-null"; this.prefix = prefix; - if (prefix.endsWith(".") == false) { + if (prefix != null && prefix.endsWith(".") == false) { throw new IllegalArgumentException("prefix must end with a '.'"); } this.suffix = suffix; if (suffix == null) { pattern = Pattern.compile("(" + Pattern.quote(prefix) + "((?:[-\\w]+[.])*[-\\w]+$))"); + } else if (prefix == null) { + pattern = Pattern.compile("((?:[-\\w]+[.])*[-\\w]+\\." + Pattern.quote(suffix) + ")"); } else { // the last part of this regexp is to support both list and group keys pattern = Pattern.compile("(" + Pattern.quote(prefix) + "([-\\w]+)\\." + Pattern.quote(suffix) + ")(?:\\..*)?"); diff --git a/server/src/main/java/org/opensearch/common/time/DateFormatters.java b/server/src/main/java/org/opensearch/common/time/DateFormatters.java index e74ab687b903b..527dce7677dd8 100644 --- a/server/src/main/java/org/opensearch/common/time/DateFormatters.java +++ b/server/src/main/java/org/opensearch/common/time/DateFormatters.java @@ -1299,6 +1299,41 @@ public class DateFormatters { .withResolverStyle(ResolverStyle.STRICT) ); + /** + * Returns RFC 3339 a popular ISO 8601 profile compatible date time formatter and parser. + * This is not fully compatible to the existing spec, its more linient and closely follows w3c note on datetime + */ + + public static final DateFormatter RFC3339_LENIENT_DATE_FORMATTER = new JavaDateFormatter( + "rfc3339_lenient", + new OpenSearchDateTimeFormatter(STRICT_DATE_OPTIONAL_TIME_PRINTER), + new RFC3339CompatibleDateTimeFormatter( + new DateTimeFormatterBuilder().append(DATE_FORMATTER) + .optionalStart() + .appendLiteral('T') + .appendValue(HOUR_OF_DAY, 1, 2, SignStyle.NOT_NEGATIVE) + .appendLiteral(':') + .appendValue(MINUTE_OF_HOUR, 1, 2, SignStyle.NOT_NEGATIVE) + .optionalStart() + .appendLiteral(':') + .appendValue(SECOND_OF_MINUTE, 1, 2, SignStyle.NOT_NEGATIVE) + .optionalStart() + .appendFraction(NANO_OF_SECOND, 1, 9, true) + .optionalEnd() + .optionalStart() + .appendLiteral(',') + .appendFraction(NANO_OF_SECOND, 1, 9, false) + .optionalEnd() + .optionalStart() + .appendOffsetId() + .optionalEnd() + .optionalEnd() + .optionalEnd() + .toFormatter(Locale.ROOT) + .withResolverStyle(ResolverStyle.STRICT) + ) + ); + private static final DateTimeFormatter HOUR_MINUTE_SECOND_FORMATTER = new DateTimeFormatterBuilder().append(HOUR_MINUTE_FORMATTER) .appendLiteral(":") .appendValue(SECOND_OF_MINUTE, 1, 2, SignStyle.NOT_NEGATIVE) @@ -2152,6 +2187,8 @@ static DateFormatter forPattern(String input) { return STRICT_YEAR_MONTH; } else if (FormatNames.STRICT_YEAR_MONTH_DAY.matches(input)) { return STRICT_YEAR_MONTH_DAY; + } else if (FormatNames.RFC3339_LENIENT.matches(input)) { + return RFC3339_LENIENT_DATE_FORMATTER; } else { try { return new JavaDateFormatter( diff --git a/server/src/main/java/org/opensearch/common/time/EpochTime.java b/server/src/main/java/org/opensearch/common/time/EpochTime.java index 19e70fbc2202d..e5364d0b84a9f 100644 --- a/server/src/main/java/org/opensearch/common/time/EpochTime.java +++ b/server/src/main/java/org/opensearch/common/time/EpochTime.java @@ -126,7 +126,12 @@ public boolean isSupportedBy(TemporalAccessor temporal) { @Override public long getFrom(TemporalAccessor temporal) { - long instantSecondsInMillis = temporal.getLong(ChronoField.INSTANT_SECONDS) * 1_000; + long instantSeconds = temporal.getLong(ChronoField.INSTANT_SECONDS); + if (instantSeconds < Long.MIN_VALUE / 1000L || instantSeconds > Long.MAX_VALUE / 1000L) { + // Multiplying would yield integer overflow + return Long.MAX_VALUE; + } + long instantSecondsInMillis = instantSeconds * 1_000; if (instantSecondsInMillis >= 0) { if (temporal.isSupported(ChronoField.NANO_OF_SECOND)) { return instantSecondsInMillis + (temporal.getLong(ChronoField.NANO_OF_SECOND) / 1_000_000); diff --git a/server/src/main/java/org/opensearch/common/time/FormatNames.java b/server/src/main/java/org/opensearch/common/time/FormatNames.java index ba0a8fcf4a17a..ec5e825fc933e 100644 --- a/server/src/main/java/org/opensearch/common/time/FormatNames.java +++ b/server/src/main/java/org/opensearch/common/time/FormatNames.java @@ -44,6 +44,7 @@ */ public enum FormatNames { ISO8601(null, "iso8601"), + RFC3339_LENIENT(null, "rfc3339_lenient"), BASIC_DATE("basicDate", "basic_date"), BASIC_DATE_TIME("basicDateTime", "basic_date_time"), BASIC_DATE_TIME_NO_MILLIS("basicDateTimeNoMillis", "basic_date_time_no_millis"), diff --git a/server/src/main/java/org/opensearch/common/time/JavaDateFormatter.java b/server/src/main/java/org/opensearch/common/time/JavaDateFormatter.java index f711b14aeb928..033ea280e6172 100644 --- a/server/src/main/java/org/opensearch/common/time/JavaDateFormatter.java +++ b/server/src/main/java/org/opensearch/common/time/JavaDateFormatter.java @@ -36,6 +36,7 @@ import org.opensearch.core.common.Strings; import java.text.ParsePosition; +import java.time.DateTimeException; import java.time.ZoneId; import java.time.format.DateTimeFormatter; import java.time.format.DateTimeFormatterBuilder; @@ -52,7 +53,6 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; -import java.util.concurrent.CopyOnWriteArrayList; import java.util.function.BiConsumer; import java.util.stream.Collectors; @@ -70,11 +70,11 @@ class JavaDateFormatter implements DateFormatter { private final String format; private final String printFormat; - private final DateTimeFormatter printer; - private final List<DateTimeFormatter> parsers; + private final OpenSearchDateTimePrinter printer; + private final List<OpenSearchDateTimeFormatter> parsers; private final JavaDateFormatter roundupParser; private final Boolean canCacheLastParsedFormatter; - private volatile DateTimeFormatter lastParsedformatter = null; + private volatile OpenSearchDateTimeFormatter lastParsedformatter = null; /** * A round up formatter @@ -83,11 +83,11 @@ class JavaDateFormatter implements DateFormatter { */ static class RoundUpFormatter extends JavaDateFormatter { - RoundUpFormatter(String format, List<DateTimeFormatter> roundUpParsers) { + RoundUpFormatter(String format, List<OpenSearchDateTimeFormatter> roundUpParsers) { super(format, firstFrom(roundUpParsers), null, roundUpParsers); } - private static DateTimeFormatter firstFrom(List<DateTimeFormatter> roundUpParsers) { + private static OpenSearchDateTimeFormatter firstFrom(List<OpenSearchDateTimeFormatter> roundUpParsers) { return roundUpParsers.get(0); } @@ -101,14 +101,18 @@ JavaDateFormatter getRoundupParser() { JavaDateFormatter( String format, String printFormat, - DateTimeFormatter printer, + OpenSearchDateTimePrinter printer, Boolean canCacheLastParsedFormatter, - DateTimeFormatter... parsers + OpenSearchDateTimeFormatter... parsers ) { this(format, printFormat, printer, ROUND_UP_BASE_FIELDS, canCacheLastParsedFormatter, parsers); } JavaDateFormatter(String format, DateTimeFormatter printer, DateTimeFormatter... parsers) { + this(format, format, wrapFormatter(printer), false, wrapAllFormatters(parsers)); + } + + JavaDateFormatter(String format, OpenSearchDateTimePrinter printer, OpenSearchDateTimeFormatter... parsers) { this(format, format, printer, false, parsers); } @@ -127,19 +131,19 @@ JavaDateFormatter getRoundupParser() { JavaDateFormatter( String format, String printFormat, - DateTimeFormatter printer, + OpenSearchDateTimePrinter printer, BiConsumer<DateTimeFormatterBuilder, DateTimeFormatter> roundupParserConsumer, Boolean canCacheLastParsedFormatter, - DateTimeFormatter... parsers + OpenSearchDateTimeFormatter... parsers ) { if (printer == null) { throw new IllegalArgumentException("printer may not be null"); } - long distinctZones = Arrays.stream(parsers).map(DateTimeFormatter::getZone).distinct().count(); + long distinctZones = Arrays.stream(parsers).map(OpenSearchDateTimeFormatter::getZone).distinct().count(); if (distinctZones > 1) { throw new IllegalArgumentException("formatters must have the same time zone"); } - long distinctLocales = Arrays.stream(parsers).map(DateTimeFormatter::getLocale).distinct().count(); + long distinctLocales = Arrays.stream(parsers).map(OpenSearchDateTimeFormatter::getLocale).distinct().count(); if (distinctLocales > 1) { throw new IllegalArgumentException("formatters must have the same locale"); } @@ -149,12 +153,12 @@ JavaDateFormatter getRoundupParser() { this.canCacheLastParsedFormatter = canCacheLastParsedFormatter; if (parsers.length == 0) { - this.parsers = Collections.singletonList(printer); + this.parsers = Collections.singletonList((OpenSearchDateTimeFormatter) printer); } else { this.parsers = Arrays.asList(parsers); } List<DateTimeFormatter> roundUp = createRoundUpParser(format, roundupParserConsumer); - this.roundupParser = new RoundUpFormatter(format, roundUp); + this.roundupParser = new RoundUpFormatter(format, wrapAllFormatters(roundUp)); } JavaDateFormatter( @@ -163,7 +167,7 @@ JavaDateFormatter getRoundupParser() { BiConsumer<DateTimeFormatterBuilder, DateTimeFormatter> roundupParserConsumer, DateTimeFormatter... parsers ) { - this(format, format, printer, roundupParserConsumer, false, parsers); + this(format, format, wrapFormatter(printer), roundupParserConsumer, false, wrapAllFormatters(parsers)); } /** @@ -181,7 +185,8 @@ private List<DateTimeFormatter> createRoundUpParser( ) { if (format.contains("||") == false) { List<DateTimeFormatter> roundUpParsers = new ArrayList<>(); - for (DateTimeFormatter parser : this.parsers) { + for (OpenSearchDateTimeFormatter customparser : this.parsers) { + DateTimeFormatter parser = customparser.getFormatter(); DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder(); builder.append(parser); roundupParserConsumer.accept(builder, parser); @@ -201,12 +206,12 @@ public static DateFormatter combined( assert formatters.size() > 0; assert printFormatter != null; - List<DateTimeFormatter> parsers = new ArrayList<>(formatters.size()); - List<DateTimeFormatter> roundUpParsers = new ArrayList<>(formatters.size()); + List<OpenSearchDateTimeFormatter> parsers = new ArrayList<>(formatters.size()); + List<OpenSearchDateTimeFormatter> roundUpParsers = new ArrayList<>(formatters.size()); assert printFormatter instanceof JavaDateFormatter; JavaDateFormatter javaPrintFormatter = (JavaDateFormatter) printFormatter; - DateTimeFormatter printer = javaPrintFormatter.getPrinter(); + OpenSearchDateTimePrinter printer = javaPrintFormatter.getPrinter(); for (DateFormatter formatter : formatters) { assert formatter instanceof JavaDateFormatter; JavaDateFormatter javaDateFormatter = (JavaDateFormatter) formatter; @@ -227,9 +232,9 @@ public static DateFormatter combined( private JavaDateFormatter( String format, String printFormat, - DateTimeFormatter printer, - List<DateTimeFormatter> roundUpParsers, - List<DateTimeFormatter> parsers, + OpenSearchDateTimePrinter printer, + List<OpenSearchDateTimeFormatter> roundUpParsers, + List<OpenSearchDateTimeFormatter> parsers, Boolean canCacheLastParsedFormatter ) { this.format = format; @@ -245,6 +250,15 @@ private JavaDateFormatter( DateTimeFormatter printer, List<DateTimeFormatter> roundUpParsers, List<DateTimeFormatter> parsers + ) { + this(format, format, wrapFormatter(printer), wrapAllFormatters(roundUpParsers), wrapAllFormatters(parsers), false); + } + + private JavaDateFormatter( + String format, + OpenSearchDateTimePrinter printer, + List<OpenSearchDateTimeFormatter> roundUpParsers, + List<OpenSearchDateTimeFormatter> parsers ) { this(format, format, printer, roundUpParsers, parsers, false); } @@ -253,7 +267,7 @@ JavaDateFormatter getRoundupParser() { return roundupParser; } - DateTimeFormatter getPrinter() { + OpenSearchDateTimePrinter getPrinter() { return printer; } @@ -265,7 +279,7 @@ public TemporalAccessor parse(String input) { try { return doParse(input); - } catch (DateTimeParseException e) { + } catch (DateTimeException e) { throw new IllegalArgumentException("failed to parse date field [" + input + "] with format [" + format + "]", e); } } @@ -289,14 +303,14 @@ private TemporalAccessor doParse(String input) { Object object = null; if (canCacheLastParsedFormatter && lastParsedformatter != null) { ParsePosition pos = new ParsePosition(0); - object = lastParsedformatter.toFormat().parseObject(input, pos); + object = lastParsedformatter.parseObject(input, pos); if (parsingSucceeded(object, input, pos)) { return (TemporalAccessor) object; } } - for (DateTimeFormatter formatter : parsers) { + for (OpenSearchDateTimeFormatter formatter : parsers) { ParsePosition pos = new ParsePosition(0); - object = formatter.toFormat().parseObject(input, pos); + object = formatter.parseObject(input, pos); if (parsingSucceeded(object, input, pos)) { lastParsedformatter = formatter; return (TemporalAccessor) object; @@ -312,16 +326,28 @@ private boolean parsingSucceeded(Object object, String input, ParsePosition pos) return object != null && pos.getIndex() == input.length(); } + private static OpenSearchDateTimeFormatter wrapFormatter(DateTimeFormatter formatter) { + return new OpenSearchDateTimeFormatter(formatter); + } + + private static OpenSearchDateTimeFormatter[] wrapAllFormatters(DateTimeFormatter... formatters) { + return Arrays.stream(formatters).map(JavaDateFormatter::wrapFormatter).toArray(OpenSearchDateTimeFormatter[]::new); + } + + private static List<OpenSearchDateTimeFormatter> wrapAllFormatters(List<DateTimeFormatter> formatters) { + return formatters.stream().map(JavaDateFormatter::wrapFormatter).collect(Collectors.toList()); + } + @Override public DateFormatter withZone(ZoneId zoneId) { // shortcurt to not create new objects unnecessarily if (zoneId.equals(zone())) { return this; } - List<DateTimeFormatter> parsers = new CopyOnWriteArrayList<>( + List<OpenSearchDateTimeFormatter> parsers = new ArrayList<>( this.parsers.stream().map(p -> p.withZone(zoneId)).collect(Collectors.toList()) ); - List<DateTimeFormatter> roundUpParsers = this.roundupParser.getParsers() + List<OpenSearchDateTimeFormatter> roundUpParsers = this.roundupParser.getParsers() .stream() .map(p -> p.withZone(zoneId)) .collect(Collectors.toList()); @@ -334,10 +360,10 @@ public DateFormatter withLocale(Locale locale) { if (locale.equals(locale())) { return this; } - List<DateTimeFormatter> parsers = new CopyOnWriteArrayList<>( + List<OpenSearchDateTimeFormatter> parsers = new ArrayList<>( this.parsers.stream().map(p -> p.withLocale(locale)).collect(Collectors.toList()) ); - List<DateTimeFormatter> roundUpParsers = this.roundupParser.getParsers() + List<OpenSearchDateTimeFormatter> roundUpParsers = this.roundupParser.getParsers() .stream() .map(p -> p.withLocale(locale)) .collect(Collectors.toList()); @@ -396,7 +422,7 @@ public String toString() { return String.format(Locale.ROOT, "format[%s] locale[%s]", format, locale()); } - Collection<DateTimeFormatter> getParsers() { + Collection<OpenSearchDateTimeFormatter> getParsers() { return parsers; } } diff --git a/server/src/main/java/org/opensearch/common/time/OpenSearchDateTimeFormatter.java b/server/src/main/java/org/opensearch/common/time/OpenSearchDateTimeFormatter.java new file mode 100644 index 0000000000000..3a629d8843949 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/time/OpenSearchDateTimeFormatter.java @@ -0,0 +1,85 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.time; + +import java.text.Format; +import java.text.ParsePosition; +import java.time.ZoneId; +import java.time.format.DateTimeFormatter; +import java.time.temporal.TemporalAccessor; +import java.time.temporal.TemporalQuery; +import java.util.Locale; + +/** +* Wrapper class for DateTimeFormatter{@link java.time.format.DateTimeFormatter} +* to allow for custom implementations for datetime parsing/formatting + */ +class OpenSearchDateTimeFormatter implements OpenSearchDateTimePrinter { + private final DateTimeFormatter formatter; + + public OpenSearchDateTimeFormatter(String pattern) { + this.formatter = DateTimeFormatter.ofPattern(pattern, Locale.ROOT); + } + + public OpenSearchDateTimeFormatter(String pattern, Locale locale) { + this.formatter = DateTimeFormatter.ofPattern(pattern, locale); + } + + public OpenSearchDateTimeFormatter(DateTimeFormatter formatter) { + this.formatter = formatter; + } + + public OpenSearchDateTimeFormatter withLocale(Locale locale) { + return new OpenSearchDateTimeFormatter(getFormatter().withLocale(locale)); + } + + public OpenSearchDateTimeFormatter withZone(ZoneId zoneId) { + return new OpenSearchDateTimeFormatter(getFormatter().withZone(zoneId)); + } + + public String format(TemporalAccessor temporal) { + return this.getFormatter().format(temporal); + } + + public TemporalAccessor parse(CharSequence text, ParsePosition position) { + return this.getFormatter().parse(text, position); + } + + public TemporalAccessor parse(CharSequence text) { + return this.getFormatter().parse(text); + } + + public <T> T parse(CharSequence text, TemporalQuery<T> query) { + return this.getFormatter().parse(text, query); + } + + public ZoneId getZone() { + return this.getFormatter().getZone(); + } + + public Locale getLocale() { + return this.getFormatter().getLocale(); + } + + public TemporalAccessor parse(String input) { + return formatter.parse(input); + } + + public DateTimeFormatter getFormatter() { + return formatter; + } + + public Format toFormat() { + return getFormatter().toFormat(); + } + + public Object parseObject(String text, ParsePosition pos) { + return getFormatter().toFormat().parseObject(text, pos); + } +} diff --git a/server/src/main/java/org/opensearch/common/time/OpenSearchDateTimePrinter.java b/server/src/main/java/org/opensearch/common/time/OpenSearchDateTimePrinter.java new file mode 100644 index 0000000000000..350bae21b22b1 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/time/OpenSearchDateTimePrinter.java @@ -0,0 +1,30 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.time; + +import java.time.ZoneId; +import java.time.temporal.TemporalAccessor; +import java.util.Locale; + +/** + * Interface for DateTimeFormatter{@link java.time.format.DateTimeFormatter} + * to allow for custom implementations for datetime formatting + */ +interface OpenSearchDateTimePrinter { + + public OpenSearchDateTimePrinter withLocale(Locale locale); + + public OpenSearchDateTimePrinter withZone(ZoneId zoneId); + + public String format(TemporalAccessor temporal); + + public Locale getLocale(); + + public ZoneId getZone(); +} diff --git a/server/src/main/java/org/opensearch/common/time/RFC3339CompatibleDateTimeFormatter.java b/server/src/main/java/org/opensearch/common/time/RFC3339CompatibleDateTimeFormatter.java new file mode 100644 index 0000000000000..98b87efd2380b --- /dev/null +++ b/server/src/main/java/org/opensearch/common/time/RFC3339CompatibleDateTimeFormatter.java @@ -0,0 +1,428 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Based on code from the Internet Time Utility project (https://github.com/ethlo/itu) under the Apache License, version 2.0. + * Copyright (C) 2017 Morten Haraldsen (ethlo) + * Modifications (C) OpenSearch Contributors. All Rights Reserved. + */ + +package org.opensearch.common.time; + +import java.text.ParsePosition; +import java.time.DateTimeException; +import java.time.OffsetDateTime; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.format.DateTimeParseException; +import java.time.temporal.TemporalAccessor; +import java.util.Arrays; +import java.util.Locale; + +/** + * Defines a close profile of RFC3339 datetime format where the date is mandatory and the time is optional. + * <p> + * The returned formatter can only be used for parsing, printing is unsupported. + * <p> + * This parser can parse zoned datetimes. + * The parser is strict by default, thus time string {@code 24:00} cannot be parsed. + * <p> + * It accepts formats described by the following syntax: + * <pre> + * Year: + * YYYY (eg 1997) + * Year and month: + * YYYY-MM (eg 1997-07) + * Complete date: + * YYYY-MM-DD (eg 1997-07-16) + * Complete date plus hours and minutes: + * YYYY-MM-DDThh:mmTZD (eg 1997-07-16T19:20+01:00) + * Complete date plus hours, minutes and seconds: + * YYYY-MM-DDThh:mm:ssTZD (eg 1997-07-16T19:20:30+01:00) + * Complete date plus hours, minutes, seconds and a decimal fraction of a second + * YYYY-MM-DDThh:mm:ss.sTZD (eg 1997-07-16T19:20:30.45+01:00) + * YYYY-MM-DDThh:mm:ss,sTZD (eg 1997-07-16T19:20:30,45+01:00) + * where: + * + * YYYY = four-digit year + * MM = two-digit month (01=January, etc.) + * DD = two-digit day of month (01 through 31) + * hh = two digits of hour (00 through 23) (am/pm NOT allowed) + * mm = two digits of minute (00 through 59) + * ss = two digits of second (00 through 59) + * s = one or more(max 9) digits representing a decimal fraction of a second + * TZD = time zone designator (Z or z or +hh:mm or -hh:mm) + * </pre> + */ +final class RFC3339CompatibleDateTimeFormatter extends OpenSearchDateTimeFormatter { + public static final char DATE_SEPARATOR = '-'; + public static final char TIME_SEPARATOR = ':'; + public static final char SEPARATOR_UPPER = 'T'; + private static final char PLUS = '+'; + private static final char MINUS = '-'; + private static final char SEPARATOR_LOWER = 't'; + private static final char SEPARATOR_SPACE = ' '; + private static final char FRACTION_SEPARATOR_1 = '.'; + private static final char FRACTION_SEPARATOR_2 = ','; + private static final char ZULU_UPPER = 'Z'; + private static final char ZULU_LOWER = 'z'; + + private ZoneId zone; + + public RFC3339CompatibleDateTimeFormatter(String pattern) { + super(pattern); + } + + public RFC3339CompatibleDateTimeFormatter(java.time.format.DateTimeFormatter formatter) { + super(formatter); + } + + public RFC3339CompatibleDateTimeFormatter(java.time.format.DateTimeFormatter formatter, ZoneId zone) { + super(formatter); + this.zone = zone; + } + + @Override + public OpenSearchDateTimeFormatter withZone(ZoneId zoneId) { + return new RFC3339CompatibleDateTimeFormatter(getFormatter().withZone(zoneId), zoneId); + } + + @Override + public OpenSearchDateTimeFormatter withLocale(Locale locale) { + return new RFC3339CompatibleDateTimeFormatter(getFormatter().withLocale(locale)); + } + + @Override + public Object parseObject(String text, ParsePosition pos) { + try { + return parse(text); + } catch (DateTimeException e) { + return null; + } + } + + @Override + public TemporalAccessor parse(final String dateTime) { + OffsetDateTime parsedDatetime = parse(dateTime, new ParsePosition(0)); + return zone == null ? parsedDatetime : parsedDatetime.atZoneSameInstant(zone); + } + + public OffsetDateTime parse(String date, ParsePosition pos) { + if (date == null) { + throw new IllegalArgumentException("date cannot be null"); + } + + final int len = date.length() - pos.getIndex(); + if (len <= 0) { + throw new DateTimeParseException("out of bound parse position", date, pos.getIndex()); + } + final char[] chars = date.substring(pos.getIndex()).toCharArray(); + + // Date portion + + // YEAR + final int years = getYear(chars, pos); + if (4 == len) { + return OffsetDateTime.of(years, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); + } + + // MONTH + consumeChar(chars, pos, DATE_SEPARATOR); + final int months = getMonth(chars, pos); + if (7 == len) { + return OffsetDateTime.of(years, months, 1, 0, 0, 0, 0, ZoneOffset.UTC); + } + + // DAY + consumeChar(chars, pos, DATE_SEPARATOR); + final int days = getDay(chars, pos); + if (10 == len) { + return OffsetDateTime.of(years, months, days, 0, 0, 0, 0, ZoneOffset.UTC); + } + + // HOURS + consumeChar(chars, pos, SEPARATOR_UPPER, SEPARATOR_LOWER, SEPARATOR_SPACE); + final int hours = getHour(chars, pos); + + // MINUTES + consumeChar(chars, pos, TIME_SEPARATOR); + final int minutes = getMinute(chars, pos); + if (16 == len) { + throw new DateTimeParseException("No timezone offset information", new String(chars), pos.getIndex()); + } + + // SECONDS or TIMEZONE + return handleTime(chars, pos, years, months, days, hours, minutes); + } + + private static boolean isDigit(char c) { + return (c >= '0' && c <= '9'); + } + + private static int digit(char c) { + return c - '0'; + } + + private static int readInt(final char[] strNum, ParsePosition pos, int n) { + int start = pos.getIndex(), end = start + n; + if (end > strNum.length) { + pos.setErrorIndex(end); + throw new DateTimeParseException("Unexpected end of expression at position " + strNum.length, new String(strNum), end); + } + + int result = 0; + for (int i = start; i < end; i++) { + final char c = strNum[i]; + if (isDigit(c) == false) { + pos.setErrorIndex(i); + throw new DateTimeParseException("Character " + c + " is not a digit", new String(strNum), i); + } + int digit = digit(c); + result = result * 10 + digit; + } + pos.setIndex(end); + return result; + } + + private static int readIntUnchecked(final char[] strNum, ParsePosition pos, int n) { + int start = pos.getIndex(), end = start + n; + int result = 0; + for (int i = start; i < end; i++) { + final char c = strNum[i]; + int digit = digit(c); + result = result * 10 + digit; + } + pos.setIndex(end); + return result; + } + + private static int getHour(final char[] chars, ParsePosition pos) { + return readInt(chars, pos, 2); + } + + private static int getMinute(final char[] chars, ParsePosition pos) { + return readInt(chars, pos, 2); + } + + private static int getDay(final char[] chars, ParsePosition pos) { + return readInt(chars, pos, 2); + } + + private static boolean isValidOffset(char[] chars, int offset) { + return offset < chars.length; + } + + private static void consumeChar(char[] chars, ParsePosition pos, char expected) { + int offset = pos.getIndex(); + if (isValidOffset(chars, offset) == false) { + throw new DateTimeParseException("Unexpected end of input", new String(chars), offset); + } + + if (chars[offset] != expected) { + throw new DateTimeParseException("Expected character " + expected + " at position " + offset, new String(chars), offset); + } + pos.setIndex(offset + 1); + } + + private static void consumeNextChar(char[] chars, ParsePosition pos) { + int offset = pos.getIndex(); + if (isValidOffset(chars, offset) == false) { + throw new DateTimeParseException("Unexpected end of input", new String(chars), offset); + } + pos.setIndex(offset + 1); + } + + private static boolean checkPositionContains(char[] chars, ParsePosition pos, char... expected) { + int offset = pos.getIndex(); + if (offset >= chars.length) { + throw new DateTimeParseException("Unexpected end of input", new String(chars), offset); + } + + boolean found = false; + for (char e : expected) { + if (chars[offset] == e) { + found = true; + break; + } + } + return found; + } + + private static void consumeChar(char[] chars, ParsePosition pos, char... expected) { + int offset = pos.getIndex(); + if (offset >= chars.length) { + throw new DateTimeParseException("Unexpected end of input", new String(chars), offset); + } + + boolean found = false; + for (char e : expected) { + if (chars[offset] == e) { + found = true; + pos.setIndex(offset + 1); + break; + } + } + if (!found) { + throw new DateTimeParseException( + "Expected character " + Arrays.toString(expected) + " at position " + offset, + new String(chars), + offset + ); + } + } + + private static void assertNoMoreChars(char[] chars, ParsePosition pos) { + if (chars.length > pos.getIndex()) { + throw new DateTimeParseException("Trailing junk data after position " + pos.getIndex(), new String(chars), pos.getIndex()); + } + } + + private static ZoneOffset parseTimezone(char[] chars, ParsePosition pos) { + int offset = pos.getIndex(); + final int left = chars.length - offset; + if (checkPositionContains(chars, pos, ZULU_LOWER, ZULU_UPPER)) { + consumeNextChar(chars, pos); + assertNoMoreChars(chars, pos); + return ZoneOffset.UTC; + } + + if (left != 6) { + throw new DateTimeParseException("Invalid timezone offset", new String(chars, offset, left), offset); + } + + final char sign = chars[offset]; + consumeNextChar(chars, pos); + int hours = getHour(chars, pos); + consumeChar(chars, pos, TIME_SEPARATOR); + int minutes = getMinute(chars, pos); + if (sign == MINUS) { + if (hours == 0 && minutes == 0) { + throw new DateTimeParseException("Unknown 'Local Offset Convention' date-time not allowed", new String(chars), offset); + } + hours = -hours; + minutes = -minutes; + } else if (sign != PLUS) { + throw new DateTimeParseException("Invalid character starting at position " + offset, new String(chars), offset); + } + + return ZoneOffset.ofHoursMinutes(hours, minutes); + } + + private static OffsetDateTime handleTime(char[] chars, ParsePosition pos, int year, int month, int day, int hour, int minute) { + switch (chars[pos.getIndex()]) { + case TIME_SEPARATOR: + consumeChar(chars, pos, TIME_SEPARATOR); + return handleSeconds(year, month, day, hour, minute, chars, pos); + + case PLUS: + case MINUS: + case ZULU_UPPER: + case ZULU_LOWER: + final ZoneOffset zoneOffset = parseTimezone(chars, pos); + return OffsetDateTime.of(year, month, day, hour, minute, 0, 0, zoneOffset); + } + throw new DateTimeParseException("Unexpected character " + " at position " + pos.getIndex(), new String(chars), pos.getIndex()); + } + + private static int getMonth(final char[] chars, ParsePosition pos) { + return readInt(chars, pos, 2); + } + + private static int getYear(final char[] chars, ParsePosition pos) { + return readInt(chars, pos, 4); + } + + private static int getSeconds(final char[] chars, ParsePosition pos) { + return readInt(chars, pos, 2); + } + + private static int getFractions(final char[] chars, final ParsePosition pos, final int len) { + final int fractions; + fractions = readIntUnchecked(chars, pos, len); + switch (len) { + case 0: + throw new DateTimeParseException("Must have at least 1 fraction digit", new String(chars), pos.getIndex()); + case 1: + return fractions * 100_000_000; + case 2: + return fractions * 10_000_000; + case 3: + return fractions * 1_000_000; + case 4: + return fractions * 100_000; + case 5: + return fractions * 10_000; + case 6: + return fractions * 1_000; + case 7: + return fractions * 100; + case 8: + return fractions * 10; + default: + return fractions; + } + } + + public static int indexOfNonDigit(final char[] text, int offset) { + for (int i = offset; i < text.length; i++) { + if (isDigit(text[i]) == false) { + return i; + } + } + return -1; + } + + public static void consumeDigits(final char[] text, ParsePosition pos) { + final int idx = indexOfNonDigit(text, pos.getIndex()); + if (idx == -1) { + pos.setErrorIndex(text.length); + } else { + pos.setIndex(idx); + } + } + + private static OffsetDateTime handleSeconds(int year, int month, int day, int hour, int minute, char[] chars, ParsePosition pos) { + // From here the specification is more lenient + final int seconds = getSeconds(chars, pos); + int currPos = pos.getIndex(); + final int remaining = chars.length - currPos; + if (remaining == 0) { + // No offset + throw new DateTimeParseException("No timezone offset information", new String(chars), pos.getIndex()); + } + + ZoneOffset offset = null; + int fractions = 0; + if (remaining == 1 && checkPositionContains(chars, pos, ZULU_LOWER, ZULU_UPPER)) { + consumeNextChar(chars, pos); + // Do nothing we are done + offset = ZoneOffset.UTC; + assertNoMoreChars(chars, pos); + } else if (remaining >= 1 && checkPositionContains(chars, pos, FRACTION_SEPARATOR_1, FRACTION_SEPARATOR_2)) { + // We have fractional seconds; + consumeNextChar(chars, pos); + ParsePosition initPosition = new ParsePosition(pos.getIndex()); + consumeDigits(chars, pos); + if (pos.getErrorIndex() == -1) { + // We have an end of fractions + final int len = pos.getIndex() - initPosition.getIndex(); + fractions = getFractions(chars, initPosition, len); + offset = parseTimezone(chars, pos); + } else { + throw new DateTimeParseException("No timezone offset information", new String(chars), pos.getIndex()); + } + } else if (remaining >= 1 && checkPositionContains(chars, pos, PLUS, MINUS)) { + // No fractional sections + offset = parseTimezone(chars, pos); + } else { + throw new DateTimeParseException("Unexpected character at position " + (pos.getIndex()), new String(chars), pos.getIndex()); + } + + return OffsetDateTime.of(year, month, day, hour, minute, seconds, fractions, offset); + } +} diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index d4ab161527cc0..8633cf1fe25ea 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -20,6 +20,11 @@ * @opensearch.internal */ public class FeatureFlags { + /** + * Gates the visibility of the remote store migration support from docrep . + */ + public static final String REMOTE_STORE_MIGRATION_EXPERIMENTAL = "opensearch.experimental.feature.remote_store.migration.enabled"; + /** * Gates the ability for Searchable Snapshots to read snapshots that are older than the * guaranteed backward compatibility for OpenSearch (one prior major version) on a best effort basis. @@ -38,12 +43,6 @@ public class FeatureFlags { */ public static final String IDENTITY = "opensearch.experimental.feature.identity.enabled"; - /** - * Gates the functionality of concurrently searching the segments - * Once the feature is ready for release, this feature flag can be removed. - */ - public static final String CONCURRENT_SEGMENT_SEARCH = "opensearch.experimental.feature.concurrent_segment_search.enabled"; - /** * Gates the functionality of telemetry framework. */ @@ -60,6 +59,12 @@ public class FeatureFlags { */ public static final String WRITEABLE_REMOTE_INDEX = "opensearch.experimental.feature.writeable_remote_index.enabled"; + /** + * Gates the functionality of pluggable cache. + * Enables OpenSearch to use pluggable caches with respective store names via setting. + */ + public static final String PLUGGABLE_CACHE = "opensearch.experimental.feature.pluggable.caching.enabled"; + /** * Should store the settings from opensearch.yml. */ @@ -99,18 +104,18 @@ public static boolean isEnabled(Setting<Boolean> featureFlag) { } } + public static final Setting<Boolean> REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING = Setting.boolSetting( + REMOTE_STORE_MIGRATION_EXPERIMENTAL, + false, + Property.NodeScope + ); + public static final Setting<Boolean> EXTENSIONS_SETTING = Setting.boolSetting(EXTENSIONS, false, Property.NodeScope); public static final Setting<Boolean> IDENTITY_SETTING = Setting.boolSetting(IDENTITY, false, Property.NodeScope); public static final Setting<Boolean> TELEMETRY_SETTING = Setting.boolSetting(TELEMETRY, false, Property.NodeScope); - public static final Setting<Boolean> CONCURRENT_SEGMENT_SEARCH_SETTING = Setting.boolSetting( - CONCURRENT_SEGMENT_SEARCH, - false, - Property.NodeScope - ); - public static final Setting<Boolean> DATETIME_FORMATTER_CACHING_SETTING = Setting.boolSetting( DATETIME_FORMATTER_CACHING, true, @@ -122,4 +127,6 @@ public static boolean isEnabled(Setting<Boolean> featureFlag) { false, Property.NodeScope ); + + public static final Setting<Boolean> PLUGGABLE_CACHE_SETTING = Setting.boolSetting(PLUGGABLE_CACHE, false, Property.NodeScope); } diff --git a/server/src/main/java/org/opensearch/common/util/ReorganizingLongHash.java b/server/src/main/java/org/opensearch/common/util/ReorganizingLongHash.java index 86e7227cb6c85..fe053a26329e4 100644 --- a/server/src/main/java/org/opensearch/common/util/ReorganizingLongHash.java +++ b/server/src/main/java/org/opensearch/common/util/ReorganizingLongHash.java @@ -118,10 +118,17 @@ public ReorganizingLongHash(final long initialCapacity, final float loadFactor, mask = capacity - 1; grow = (long) (capacity * loadFactor); size = 0; - - table = bigArrays.newLongArray(capacity, false); - table.fill(0, capacity, -1); // -1 represents an empty slot - keys = bigArrays.newLongArray(initialCapacity, false); + try { + table = bigArrays.newLongArray(capacity, false); + table.fill(0, capacity, -1); // -1 represents an empty slot + keys = bigArrays.newLongArray(initialCapacity, false); + } finally { + if (table == null || keys == null) { + // it's important to close the arrays initialized above to prevent memory leak + // refer: https://github.com/opensearch-project/OpenSearch/issues/10154 + Releasables.closeWhileHandlingException(table, keys); + } + } } /** diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchExecutors.java b/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchExecutors.java index ec1024bbe5f30..6e45c3fb7b58d 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchExecutors.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchExecutors.java @@ -393,6 +393,7 @@ static class OpenSearchThreadFactory implements ThreadFactory { final AtomicInteger threadNumber = new AtomicInteger(1); final String namePrefix; + @SuppressWarnings("removal") OpenSearchThreadFactory(String namePrefix) { this.namePrefix = namePrefix; SecurityManager s = System.getSecurityManager(); @@ -446,6 +447,30 @@ public boolean offer(E e) { } } + /** + * Workaround for https://bugs.openjdk.org/browse/JDK-8323659 regression, introduced in JDK-21.0.2. + */ + @Override + public void put(E e) { + super.offer(e); + } + + /** + * Workaround for https://bugs.openjdk.org/browse/JDK-8323659 regression, introduced in JDK-21.0.2. + */ + @Override + public boolean offer(E e, long timeout, TimeUnit unit) { + return super.offer(e); + } + + /** + * Workaround for https://bugs.openjdk.org/browse/JDK-8323659 regression, introduced in JDK-21.0.2. + */ + @Override + public boolean add(E e) { + return super.offer(e); + } + } /** diff --git a/server/src/main/java/org/opensearch/env/NodeEnvironment.java b/server/src/main/java/org/opensearch/env/NodeEnvironment.java index c7ba5eb040a1f..2748938d8b761 100644 --- a/server/src/main/java/org/opensearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/opensearch/env/NodeEnvironment.java @@ -199,6 +199,8 @@ public String toString() { private final NodeMetadata nodeMetadata; + private final IndexStoreListener indexStoreListener; + /** * Maximum number of data nodes that should run in an environment. */ @@ -295,18 +297,23 @@ public void close() { } } + public NodeEnvironment(Settings settings, Environment environment) throws IOException { + this(settings, environment, IndexStoreListener.EMPTY); + } + /** * Setup the environment. * @param settings settings from opensearch.yml */ - public NodeEnvironment(Settings settings, Environment environment) throws IOException { - if (!DiscoveryNode.nodeRequiresLocalStorage(settings)) { + public NodeEnvironment(Settings settings, Environment environment, IndexStoreListener indexStoreListener) throws IOException { + if (DiscoveryNode.nodeRequiresLocalStorage(settings) == false) { nodePaths = null; fileCacheNodePath = null; sharedDataPath = null; locks = null; nodeLockId = -1; nodeMetadata = new NodeMetadata(generateNodeId(settings), Version.CURRENT); + this.indexStoreListener = IndexStoreListener.EMPTY; return; } boolean success = false; @@ -385,6 +392,7 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce } this.nodeMetadata = loadNodeMetadata(settings, logger, nodePaths); + this.indexStoreListener = indexStoreListener; success = true; } finally { if (success == false) { @@ -577,6 +585,9 @@ public static void acquireFSLockForPaths(IndexSettings indexSettings, Path... sh public void deleteShardDirectoryUnderLock(ShardLock lock, IndexSettings indexSettings) throws IOException { final ShardId shardId = lock.getShardId(); assert isShardLocked(shardId) : "shard " + shardId + " is not locked"; + + indexStoreListener.beforeShardPathDeleted(shardId, indexSettings, this); + final Path[] paths = availableShardPaths(shardId); logger.trace("acquiring locks for {}, paths: [{}]", shardId, paths); acquireFSLockForPaths(indexSettings, paths); @@ -653,6 +664,8 @@ public void deleteIndexDirectorySafe(Index index, long lockTimeoutMS, IndexSetti * @param indexSettings settings for the index being deleted */ public void deleteIndexDirectoryUnderLock(Index index, IndexSettings indexSettings) throws IOException { + indexStoreListener.beforeIndexPathDeleted(index, indexSettings, this); + final Path[] indexPaths = indexPaths(index); logger.trace("deleting index {} directory, paths({}): [{}]", index, indexPaths.length, indexPaths); IOUtils.rm(indexPaths); @@ -663,6 +676,18 @@ public void deleteIndexDirectoryUnderLock(Index index, IndexSettings indexSettin } } + private void deleteIndexFileCacheDirectory(Index index) { + final Path indexCachePath = fileCacheNodePath().fileCachePath.resolve(index.getUUID()); + logger.trace("deleting index {} file cache directory, path: [{}]", index, indexCachePath); + if (Files.exists(indexCachePath)) { + try { + IOUtils.rm(indexCachePath); + } catch (IOException e) { + logger.error(() -> new ParameterizedMessage("Failed to delete cache path for index {}", index), e); + } + } + } + /** * Tries to lock all local shards for the given index. If any of the shard locks can't be acquired * a {@link ShardLockObtainFailedException} is thrown and all previously acquired locks are released. @@ -1387,4 +1412,18 @@ private static void tryWriteTempFile(Path path) throws IOException { } } } + + /** + * A listener that is executed on per-index and per-shard store events, like deleting shard path + * + * @opensearch.internal + */ + public interface IndexStoreListener { + default void beforeShardPathDeleted(ShardId shardId, IndexSettings indexSettings, NodeEnvironment env) {} + + default void beforeIndexPathDeleted(Index index, IndexSettings indexSettings, NodeEnvironment env) {} + + IndexStoreListener EMPTY = new IndexStoreListener() { + }; + } } diff --git a/server/src/main/java/org/opensearch/gateway/AsyncShardFetch.java b/server/src/main/java/org/opensearch/gateway/AsyncShardFetch.java index 50774f7e0cb1c..3d129d4794a10 100644 --- a/server/src/main/java/org/opensearch/gateway/AsyncShardFetch.java +++ b/server/src/main/java/org/opensearch/gateway/AsyncShardFetch.java @@ -32,9 +32,6 @@ package org.opensearch.gateway; import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.ExceptionsHelper; -import org.opensearch.OpenSearchTimeoutException; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.action.support.nodes.BaseNodesResponse; @@ -43,21 +40,22 @@ import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.common.Nullable; import org.opensearch.common.lease.Releasable; +import org.opensearch.common.logging.Loggers; import org.opensearch.core.action.ActionListener; -import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.store.ShardAttributes; -import org.opensearch.transport.ReceiveTimeoutTransportException; +import org.opensearch.indices.store.TransportNodesListShardStoreMetadata; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicLong; +import reactor.util.annotation.NonNull; + import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; @@ -65,11 +63,9 @@ * Allows to asynchronously fetch shard related data from other nodes for allocation, without blocking * the cluster update thread. * <p> - * The async fetch logic maintains a map of which nodes are being fetched from in an async manner, - * and once the results are back, it makes sure to schedule a reroute to make sure those results will - * be taken into account. + * The async fetch logic maintains a cache {@link AsyncShardFetchCache} which is filled in async manner when nodes respond back. + * It also schedules a reroute to make sure those results will be taken into account. * - * It comes in two modes, to single fetch a shard or fetch a batch of shards. * @opensearch.internal */ public abstract class AsyncShardFetch<T extends BaseNodeResponse> implements Releasable { @@ -86,14 +82,12 @@ public interface Lister<NodesResponse extends BaseNodesResponse<NodeResponse>, N protected final String type; protected final Map<ShardId, ShardAttributes> shardAttributesMap; private final Lister<BaseNodesResponse<T>, T> action; - private final Map<String, NodeEntry<T>> cache = new HashMap<>(); + private final AsyncShardFetchCache<T> cache; private final AtomicLong round = new AtomicLong(); private boolean closed; private final String reroutingKey; private final Map<ShardId, Set<String>> shardToIgnoreNodes = new HashMap<>(); - private final boolean enableBatchMode; - @SuppressWarnings("unchecked") protected AsyncShardFetch( Logger logger, @@ -108,17 +102,17 @@ protected AsyncShardFetch( shardAttributesMap.put(shardId, new ShardAttributes(shardId, customDataPath)); this.action = (Lister<BaseNodesResponse<T>, T>) action; this.reroutingKey = "ShardId=[" + shardId.toString() + "]"; - enableBatchMode = false; + cache = new ShardCache<>(logger, reroutingKey, type); } /** * Added to fetch a batch of shards from nodes * - * @param logger Logger - * @param type type of action + * @param logger Logger + * @param type type of action * @param shardAttributesMap Map of {@link ShardId} to {@link ShardAttributes} to perform fetching on them a - * @param action Transport Action - * @param batchId For the given ShardAttributesMap, we expect them to tie with a single batch id for logging and later identification + * @param action Transport Action + * @param batchId For the given ShardAttributesMap, we expect them to tie with a single batch id for logging and later identification */ @SuppressWarnings("unchecked") protected AsyncShardFetch( @@ -133,7 +127,7 @@ protected AsyncShardFetch( this.shardAttributesMap = shardAttributesMap; this.action = (Lister<BaseNodesResponse<T>, T>) action; this.reroutingKey = "BatchID=[" + batchId + "]"; - enableBatchMode = true; + cache = new ShardCache<>(logger, reroutingKey, type); } @Override @@ -141,19 +135,6 @@ public synchronized void close() { this.closed = true; } - /** - * Returns the number of async fetches that are currently ongoing. - */ - public synchronized int getNumberOfInFlightFetches() { - int count = 0; - for (NodeEntry<T> nodeEntry : cache.values()) { - if (nodeEntry.isFetching()) { - count++; - } - } - return count; - } - /** * Fetches the data for the relevant shard. If there any ongoing async fetches going on, or new ones have * been initiated by this call, the result will have no data. @@ -166,7 +147,7 @@ public synchronized FetchResult<T> fetchData(DiscoveryNodes nodes, Map<ShardId, throw new IllegalStateException(reroutingKey + ": can't fetch data on closed async fetch"); } - if (enableBatchMode == false) { + if (shardAttributesMap.size() == 1) { // we will do assertions here on ignoreNodes if (ignoreNodes.size() > 1) { throw new IllegalStateException( @@ -187,48 +168,24 @@ public synchronized FetchResult<T> fetchData(DiscoveryNodes nodes, Map<ShardId, shardToIgnoreNodes.put(ignoreNodesEntry.getKey(), ignoreNodesSet); } - fillShardCacheWithDataNodes(cache, nodes); - List<NodeEntry<T>> nodesToFetch = findNodesToFetch(cache); - if (nodesToFetch.isEmpty() == false) { + cache.fillShardCacheWithDataNodes(nodes); + List<String> nodeIds = cache.findNodesToFetch(); + if (nodeIds.isEmpty() == false) { // mark all node as fetching and go ahead and async fetch them // use a unique round id to detect stale responses in processAsyncFetch final long fetchingRound = round.incrementAndGet(); - for (NodeEntry<T> nodeEntry : nodesToFetch) { - nodeEntry.markAsFetching(fetchingRound); - } - DiscoveryNode[] discoNodesToFetch = nodesToFetch.stream() - .map(NodeEntry::getNodeId) - .map(nodes::get) - .toArray(DiscoveryNode[]::new); + cache.markAsFetching(nodeIds, fetchingRound); + DiscoveryNode[] discoNodesToFetch = nodeIds.stream().map(nodes::get).toArray(DiscoveryNode[]::new); asyncFetch(discoNodesToFetch, fetchingRound); } // if we are still fetching, return null to indicate it - if (hasAnyNodeFetching(cache)) { + if (cache.hasAnyNodeFetching()) { return new FetchResult<>(null, emptyMap()); } else { // nothing to fetch, yay, build the return value - Map<DiscoveryNode, T> fetchData = new HashMap<>(); Set<String> failedNodes = new HashSet<>(); - for (Iterator<Map.Entry<String, NodeEntry<T>>> it = cache.entrySet().iterator(); it.hasNext();) { - Map.Entry<String, NodeEntry<T>> entry = it.next(); - String nodeId = entry.getKey(); - NodeEntry<T> nodeEntry = entry.getValue(); - - DiscoveryNode node = nodes.get(nodeId); - if (node != null) { - if (nodeEntry.isFailed()) { - // if its failed, remove it from the list of nodes, so if this run doesn't work - // we try again next round to fetch it again - it.remove(); - failedNodes.add(nodeEntry.getNodeId()); - } else { - if (nodeEntry.getValue() != null) { - fetchData.put(node, nodeEntry.getValue()); - } - } - } - } + Map<DiscoveryNode, T> fetchData = cache.getCacheData(nodes, failedNodes); Map<ShardId, Set<String>> allIgnoreNodesMap = unmodifiableMap(new HashMap<>(shardToIgnoreNodes)); // clear the nodes to ignore, we had a successful run in fetching everything we can @@ -268,77 +225,18 @@ protected synchronized void processAsyncFetch(List<T> responses, List<FailedNode logger.trace("{} processing fetched [{}] results", reroutingKey, type); if (responses != null) { - for (T response : responses) { - NodeEntry<T> nodeEntry = cache.get(response.getNode().getId()); - if (nodeEntry != null) { - if (nodeEntry.getFetchingRound() != fetchingRound) { - assert nodeEntry.getFetchingRound() > fetchingRound : "node entries only replaced by newer rounds"; - logger.trace( - "{} received response for [{}] from node {} for an older fetching round (expected: {} but was: {})", - reroutingKey, - nodeEntry.getNodeId(), - type, - nodeEntry.getFetchingRound(), - fetchingRound - ); - } else if (nodeEntry.isFailed()) { - logger.trace( - "{} node {} has failed for [{}] (failure [{}])", - reroutingKey, - nodeEntry.getNodeId(), - type, - nodeEntry.getFailure() - ); - } else { - // if the entry is there, for the right fetching round and not marked as failed already, process it - logger.trace("{} marking {} as done for [{}], result is [{}]", reroutingKey, nodeEntry.getNodeId(), type, response); - nodeEntry.doneFetching(response); - } - } - } + cache.processResponses(responses, fetchingRound); } if (failures != null) { - for (FailedNodeException failure : failures) { - logger.trace("{} processing failure {} for [{}]", reroutingKey, failure, type); - NodeEntry<T> nodeEntry = cache.get(failure.nodeId()); - if (nodeEntry != null) { - if (nodeEntry.getFetchingRound() != fetchingRound) { - assert nodeEntry.getFetchingRound() > fetchingRound : "node entries only replaced by newer rounds"; - logger.trace( - "{} received failure for [{}] from node {} for an older fetching round (expected: {} but was: {})", - reroutingKey, - nodeEntry.getNodeId(), - type, - nodeEntry.getFetchingRound(), - fetchingRound - ); - } else if (nodeEntry.isFailed() == false) { - // if the entry is there, for the right fetching round and not marked as failed already, process it - Throwable unwrappedCause = ExceptionsHelper.unwrapCause(failure.getCause()); - // if the request got rejected or timed out, we need to try it again next time... - if (unwrappedCause instanceof OpenSearchRejectedExecutionException - || unwrappedCause instanceof ReceiveTimeoutTransportException - || unwrappedCause instanceof OpenSearchTimeoutException) { - nodeEntry.restartFetching(); - } else { - logger.warn( - () -> new ParameterizedMessage( - "{}: failed to list shard for {} on node [{}]", - reroutingKey, - type, - failure.nodeId() - ), - failure - ); - nodeEntry.doneFetching(failure.getCause()); - } - } - } - } + cache.processFailures(failures, fetchingRound); } reroute(reroutingKey, "post_response"); } + public synchronized int getNumberOfInFlightFetches() { + return cache.getInflightFetches(); + } + /** * Implement this in order to scheduled another round that causes a call to fetch data. */ @@ -351,47 +249,6 @@ synchronized void clearCacheForNode(String nodeId) { cache.remove(nodeId); } - /** - * Fills the shard fetched data with new (data) nodes and a fresh NodeEntry, and removes from - * it nodes that are no longer part of the state. - */ - private void fillShardCacheWithDataNodes(Map<String, NodeEntry<T>> shardCache, DiscoveryNodes nodes) { - // verify that all current data nodes are there - for (final DiscoveryNode node : nodes.getDataNodes().values()) { - if (shardCache.containsKey(node.getId()) == false) { - shardCache.put(node.getId(), new NodeEntry<T>(node.getId())); - } - } - // remove nodes that are not longer part of the data nodes set - shardCache.keySet().removeIf(nodeId -> !nodes.nodeExists(nodeId)); - } - - /** - * Finds all the nodes that need to be fetched. Those are nodes that have no - * data, and are not in fetch mode. - */ - private List<NodeEntry<T>> findNodesToFetch(Map<String, NodeEntry<T>> shardCache) { - List<NodeEntry<T>> nodesToFetch = new ArrayList<>(); - for (NodeEntry<T> nodeEntry : shardCache.values()) { - if (nodeEntry.hasData() == false && nodeEntry.isFetching() == false) { - nodesToFetch.add(nodeEntry); - } - } - return nodesToFetch; - } - - /** - * Are there any nodes that are fetching data? - */ - private boolean hasAnyNodeFetching(Map<String, NodeEntry<T>> shardCache) { - for (NodeEntry<T> nodeEntry : shardCache.values()) { - if (nodeEntry.isFetching()) { - return true; - } - } - return false; - } - /** * Async fetches data for the provided shard with the set of nodes that need to be fetched from. */ @@ -415,6 +272,72 @@ public void onFailure(Exception e) { }); } + /** + * Cache implementation of transport actions returning single shard related data in the response. + * Store node level responses of transport actions like {@link TransportNodesListGatewayStartedShards} or + * {@link TransportNodesListShardStoreMetadata}. + * + * @param <K> Response type of transport action. + */ + static class ShardCache<K extends BaseNodeResponse> extends AsyncShardFetchCache<K> { + + private final Map<String, NodeEntry<K>> cache; + + public ShardCache(Logger logger, String logKey, String type) { + super(Loggers.getLogger(logger, "_" + logKey), type); + cache = new HashMap<>(); + } + + @Override + public void initData(DiscoveryNode node) { + cache.put(node.getId(), new NodeEntry<>(node.getId())); + } + + @Override + public void putData(DiscoveryNode node, K response) { + cache.get(node.getId()).doneFetching(response); + } + + @Override + public K getData(DiscoveryNode node) { + return cache.get(node.getId()).getValue(); + } + + @NonNull + @Override + public Map<String, ? extends BaseNodeEntry> getCache() { + return cache; + } + + @Override + public void deleteShard(ShardId shardId) { + cache.clear(); // single shard cache can clear the full map + } + + /** + * A node entry, holding the state of the fetched data for a specific shard + * for a giving node. + */ + static class NodeEntry<U extends BaseNodeResponse> extends AsyncShardFetchCache.BaseNodeEntry { + @Nullable + private U value; + + void doneFetching(U value) { + super.doneFetching(); + this.value = value; + } + + NodeEntry(String nodeId) { + super(nodeId); + } + + U getValue() { + return value; + } + + } + } + /** * The result of a fetch operation. Make sure to first check {@link #hasData()} before * fetching the actual data. @@ -460,83 +383,4 @@ public void processAllocation(RoutingAllocation allocation) { } } - - /** - * A node entry, holding the state of the fetched data for a specific shard - * for a giving node. - */ - static class NodeEntry<T> { - private final String nodeId; - private boolean fetching; - @Nullable - private T value; - private boolean valueSet; - private Throwable failure; - private long fetchingRound; - - NodeEntry(String nodeId) { - this.nodeId = nodeId; - } - - String getNodeId() { - return this.nodeId; - } - - boolean isFetching() { - return fetching; - } - - void markAsFetching(long fetchingRound) { - assert fetching == false : "double marking a node as fetching"; - this.fetching = true; - this.fetchingRound = fetchingRound; - } - - void doneFetching(T value) { - assert fetching : "setting value but not in fetching mode"; - assert failure == null : "setting value when failure already set"; - this.valueSet = true; - this.value = value; - this.fetching = false; - } - - void doneFetching(Throwable failure) { - assert fetching : "setting value but not in fetching mode"; - assert valueSet == false : "setting failure when already set value"; - assert failure != null : "setting failure can't be null"; - this.failure = failure; - this.fetching = false; - } - - void restartFetching() { - assert fetching : "restarting fetching, but not in fetching mode"; - assert valueSet == false : "value can't be set when restarting fetching"; - assert failure == null : "failure can't be set when restarting fetching"; - this.fetching = false; - } - - boolean isFailed() { - return failure != null; - } - - boolean hasData() { - return valueSet || failure != null; - } - - Throwable getFailure() { - assert hasData() : "getting failure when data has not been fetched"; - return failure; - } - - @Nullable - T getValue() { - assert failure == null : "trying to fetch value, but its marked as failed, check isFailed"; - assert valueSet : "value is not set, hasn't been fetched yet"; - return value; - } - - long getFetchingRound() { - return fetchingRound; - } - } } diff --git a/server/src/main/java/org/opensearch/gateway/AsyncShardFetchCache.java b/server/src/main/java/org/opensearch/gateway/AsyncShardFetchCache.java new file mode 100644 index 0000000000000..3140ceef4f3ee --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/AsyncShardFetchCache.java @@ -0,0 +1,316 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.ExceptionsHelper; +import org.opensearch.OpenSearchTimeoutException; +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.support.nodes.BaseNodeResponse; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.transport.ReceiveTimeoutTransportException; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import reactor.util.annotation.NonNull; + +/** + * AsyncShardFetchCache will operate on the node level cache which is map of String and BaseNodeEntry. initData, + * putData and getData needs to be called for all the nodes. This class is responsible for managing the flow for all + * the nodes. + * It'll also give useful insights like how many ongoing fetches are happening, how many nodes are left for fetch or + * mark some node in fetching mode. All of these functionalities require checking the cache information and respond + * accordingly. + * <p> + * initData : how to initialize an entry of shard cache for a node. + * putData : how to store the response of transport action in the cache. + * getData : how to get the stored data for any shard allocators like {@link PrimaryShardAllocator} or + * {@link ReplicaShardAllocator} + * deleteShard : how to clean up the stored data from cache for a shard. + * + * @param <K> Response type of transport action which has the data to be stored in the cache. + * + * @opensearch.internal + */ +public abstract class AsyncShardFetchCache<K extends BaseNodeResponse> { + private final Logger logger; + private final String type; + + protected AsyncShardFetchCache(Logger logger, String type) { + this.logger = logger; + this.type = type; + } + + abstract void initData(DiscoveryNode node); + + abstract void putData(DiscoveryNode node, K response); + + abstract K getData(DiscoveryNode node); + + @NonNull + abstract Map<String, ? extends BaseNodeEntry> getCache(); + + /** + * Cleanup cached data for this shard once it's started. Cleanup only happens at shard level. Node entries will + * automatically be cleaned up once shards are assigned. + * + * @param shardId for which we need to free up the cached data. + */ + abstract void deleteShard(ShardId shardId); + + /** + * Returns the number of fetches that are currently ongoing. + */ + int getInflightFetches() { + int count = 0; + for (BaseNodeEntry nodeEntry : getCache().values()) { + if (nodeEntry.isFetching()) { + count++; + } + } + return count; + } + + /** + * Fills the shard fetched data with new (data) nodes and a fresh NodeEntry, and removes from + * it nodes that are no longer part of the state. + */ + void fillShardCacheWithDataNodes(DiscoveryNodes nodes) { + // verify that all current data nodes are there + for (final DiscoveryNode node : nodes.getDataNodes().values()) { + if (getCache().containsKey(node.getId()) == false) { + initData(node); + } + } + // remove nodes that are not longer part of the data nodes set + getCache().keySet().removeIf(nodeId -> !nodes.nodeExists(nodeId)); + } + + /** + * Finds all the nodes that need to be fetched. Those are nodes that have no + * data, and are not in fetch mode. + */ + List<String> findNodesToFetch() { + List<String> nodesToFetch = new ArrayList<>(); + for (BaseNodeEntry nodeEntry : getCache().values()) { + if (nodeEntry.hasData() == false && nodeEntry.isFetching() == false) { + nodesToFetch.add(nodeEntry.getNodeId()); + } + } + return nodesToFetch; + } + + /** + * Are there any nodes that are fetching data? + */ + boolean hasAnyNodeFetching() { + for (BaseNodeEntry nodeEntry : getCache().values()) { + if (nodeEntry.isFetching()) { + return true; + } + } + return false; + } + + /** + * Get the data from cache, ignore the failed entries. Use getData functional interface to get the data, as + * different implementations may have different ways to populate the data from cache. + * + * @param nodes Discovery nodes for which we need to return the cache data. + * @param failedNodes return failedNodes with the nodes where fetch has failed. + * @return Map of cache data for every DiscoveryNode. + */ + Map<DiscoveryNode, K> getCacheData(DiscoveryNodes nodes, Set<String> failedNodes) { + Map<DiscoveryNode, K> fetchData = new HashMap<>(); + for (Iterator<? extends Map.Entry<String, ? extends BaseNodeEntry>> it = getCache().entrySet().iterator(); it.hasNext();) { + Map.Entry<String, BaseNodeEntry> entry = (Map.Entry<String, BaseNodeEntry>) it.next(); + String nodeId = entry.getKey(); + BaseNodeEntry nodeEntry = entry.getValue(); + + DiscoveryNode node = nodes.get(nodeId); + if (node != null) { + if (nodeEntry.isFailed()) { + // if its failed, remove it from the list of nodes, so if this run doesn't work + // we try again next round to fetch it again + it.remove(); + failedNodes.add(nodeEntry.getNodeId()); + } else { + K nodeResponse = getData(node); + if (nodeResponse != null) { + fetchData.put(node, nodeResponse); + } + } + } + } + return fetchData; + } + + void processResponses(List<K> responses, long fetchingRound) { + for (K response : responses) { + BaseNodeEntry nodeEntry = getCache().get(response.getNode().getId()); + if (nodeEntry != null) { + if (validateNodeResponse(nodeEntry, fetchingRound)) { + // if the entry is there, for the right fetching round and not marked as failed already, process it + logger.trace("marking {} as done for [{}], result is [{}]", nodeEntry.getNodeId(), type, response); + putData(response.getNode(), response); + } + } + } + } + + private boolean validateNodeResponse(BaseNodeEntry nodeEntry, long fetchingRound) { + if (nodeEntry.getFetchingRound() != fetchingRound) { + assert nodeEntry.getFetchingRound() > fetchingRound : "node entries only replaced by newer rounds"; + logger.trace( + "received response for [{}] from node {} for an older fetching round (expected: {} but was: {})", + nodeEntry.getNodeId(), + type, + nodeEntry.getFetchingRound(), + fetchingRound + ); + return false; + } else if (nodeEntry.isFailed()) { + logger.trace("node {} has failed for [{}] (failure [{}])", nodeEntry.getNodeId(), type, nodeEntry.getFailure()); + return false; + } + return true; + } + + private void handleNodeFailure(BaseNodeEntry nodeEntry, FailedNodeException failure, long fetchingRound) { + if (nodeEntry.getFetchingRound() != fetchingRound) { + assert nodeEntry.getFetchingRound() > fetchingRound : "node entries only replaced by newer rounds"; + logger.trace( + "received failure for [{}] from node {} for an older fetching round (expected: {} but was: {})", + nodeEntry.getNodeId(), + type, + nodeEntry.getFetchingRound(), + fetchingRound + ); + } else if (nodeEntry.isFailed() == false) { + // if the entry is there, for the right fetching round and not marked as failed already, process it + Throwable unwrappedCause = ExceptionsHelper.unwrapCause(failure.getCause()); + // if the request got rejected or timed out, we need to try it again next time... + if (retryableException(unwrappedCause)) { + nodeEntry.restartFetching(); + } else { + logger.warn(() -> new ParameterizedMessage("failed to list shard for {} on node [{}]", type, failure.nodeId()), failure); + nodeEntry.doneFetching(failure.getCause()); + } + } + } + + boolean retryableException(Throwable unwrappedCause) { + return unwrappedCause instanceof OpenSearchRejectedExecutionException + || unwrappedCause instanceof ReceiveTimeoutTransportException + || unwrappedCause instanceof OpenSearchTimeoutException; + } + + void processFailures(List<FailedNodeException> failures, long fetchingRound) { + for (FailedNodeException failure : failures) { + logger.trace("processing failure {} for [{}]", failure, type); + BaseNodeEntry nodeEntry = getCache().get(failure.nodeId()); + if (nodeEntry != null) { + handleNodeFailure(nodeEntry, failure, fetchingRound); + } + } + } + + /** + * Common function for removing whole node entry. + * + * @param nodeId nodeId to be cleaned. + */ + void remove(String nodeId) { + this.getCache().remove(nodeId); + } + + void markAsFetching(List<String> nodeIds, long fetchingRound) { + for (String nodeId : nodeIds) { + getCache().get(nodeId).markAsFetching(fetchingRound); + } + } + + /** + * A node entry, holding only node level fetching related information. + * Actual metadata of shard is stored in child classes. + */ + static class BaseNodeEntry { + private final String nodeId; + private boolean fetching; + private boolean valueSet; + private Throwable failure; + private long fetchingRound; + + BaseNodeEntry(String nodeId) { + this.nodeId = nodeId; + } + + String getNodeId() { + return this.nodeId; + } + + boolean isFetching() { + return fetching; + } + + void markAsFetching(long fetchingRound) { + assert fetching == false : "double marking a node as fetching"; + this.fetching = true; + this.fetchingRound = fetchingRound; + } + + void doneFetching() { + assert fetching : "setting value but not in fetching mode"; + assert failure == null : "setting value when failure already set"; + this.valueSet = true; + this.fetching = false; + } + + void doneFetching(Throwable failure) { + assert fetching : "setting value but not in fetching mode"; + assert valueSet == false : "setting failure when already set value"; + assert failure != null : "setting failure can't be null"; + this.failure = failure; + this.fetching = false; + } + + void restartFetching() { + assert fetching : "restarting fetching, but not in fetching mode"; + assert valueSet == false : "value can't be set when restarting fetching"; + assert failure == null : "failure can't be set when restarting fetching"; + this.fetching = false; + } + + boolean isFailed() { + return failure != null; + } + + boolean hasData() { + return valueSet || failure != null; + } + + Throwable getFailure() { + assert hasData() : "getting failure when data has not been fetched"; + return failure; + } + + long getFetchingRound() { + return fetchingRound; + } + } +} diff --git a/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java b/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java index 853fe03904c53..e0831293fc7e1 100644 --- a/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java @@ -36,6 +36,7 @@ import org.apache.logging.log4j.Logger; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RoutingNode; +import org.opensearch.cluster.routing.RoutingNodes; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.allocation.AllocateUnassignedDecision; import org.opensearch.cluster.routing.allocation.AllocationDecision; @@ -45,7 +46,9 @@ import org.opensearch.cluster.routing.allocation.decider.Decision; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.stream.Collectors; /** * An abstract class that implements basic functionality for allocating @@ -64,8 +67,9 @@ public abstract class BaseGatewayShardAllocator { * Allocate an unassigned shard to nodes (if any) where valid copies of the shard already exist. * It is up to the individual implementations of {@link #makeAllocationDecision(ShardRouting, RoutingAllocation, Logger)} * to make decisions on assigning shards to nodes. - * @param shardRouting the shard to allocate - * @param allocation the allocation state container object + * + * @param shardRouting the shard to allocate + * @param allocation the allocation state container object * @param unassignedAllocationHandler handles the allocation of the current shard */ public void allocateUnassigned( @@ -74,7 +78,46 @@ public void allocateUnassigned( ExistingShardsAllocator.UnassignedAllocationHandler unassignedAllocationHandler ) { final AllocateUnassignedDecision allocateUnassignedDecision = makeAllocationDecision(shardRouting, allocation, logger); + executeDecision(shardRouting, allocateUnassignedDecision, allocation, unassignedAllocationHandler); + } + + /** + * Allocate Batch of unassigned shard to nodes where valid copies of the shard already exists + * @param shardRoutings the shards to allocate + * @param allocation the allocation state container object + */ + public void allocateUnassignedBatch(List<ShardRouting> shardRoutings, RoutingAllocation allocation) { + // make Allocation Decisions for all shards + HashMap<ShardRouting, AllocateUnassignedDecision> decisionMap = makeAllocationDecision(shardRoutings, allocation, logger); + assert shardRoutings.size() == decisionMap.size() : "make allocation decision didn't return allocation decision for " + + "some shards"; + // get all unassigned shards iterator + RoutingNodes.UnassignedShards.UnassignedIterator iterator = allocation.routingNodes().unassigned().iterator(); + + while (iterator.hasNext()) { + ShardRouting shard = iterator.next(); + try { + if (decisionMap.isEmpty() == false) { + if (decisionMap.containsKey(shard)) { + executeDecision(shard, decisionMap.remove(shard), allocation, iterator); + } + } else { + // no need to keep iterating the unassigned shards, if we don't have anything in decision map + break; + } + } catch (Exception e) { + logger.error("Failed to execute decision for shard {} while initializing {}", shard, e); + throw e; + } + } + } + private void executeDecision( + ShardRouting shardRouting, + AllocateUnassignedDecision allocateUnassignedDecision, + RoutingAllocation allocation, + ExistingShardsAllocator.UnassignedAllocationHandler unassignedAllocationHandler + ) { if (allocateUnassignedDecision.isDecisionTaken() == false) { // no decision was taken by this allocator return; @@ -109,9 +152,9 @@ protected long getExpectedShardSize(ShardRouting shardRouting, RoutingAllocation * {@link #allocateUnassigned(ShardRouting, RoutingAllocation, ExistingShardsAllocator.UnassignedAllocationHandler)} to make decisions * about whether or not the shard can be allocated by this allocator and if so, to which node it will be allocated. * - * @param unassignedShard the unassigned shard to allocate - * @param allocation the current routing state - * @param logger the logger + * @param unassignedShard the unassigned shard to allocate + * @param allocation the current routing state + * @param logger the logger * @return an {@link AllocateUnassignedDecision} with the final decision of whether to allocate and details of the decision */ public abstract AllocateUnassignedDecision makeAllocationDecision( @@ -120,6 +163,21 @@ public abstract AllocateUnassignedDecision makeAllocationDecision( Logger logger ); + public HashMap<ShardRouting, AllocateUnassignedDecision> makeAllocationDecision( + List<ShardRouting> unassignedShardBatch, + RoutingAllocation allocation, + Logger logger + ) { + + return (HashMap<ShardRouting, AllocateUnassignedDecision>) unassignedShardBatch.stream() + .collect( + Collectors.toMap( + unassignedShard -> unassignedShard, + unassignedShard -> makeAllocationDecision(unassignedShard, allocation, logger) + ) + ); + } + /** * Builds decisions for all nodes in the cluster, so that the explain API can provide information on * allocation decisions for each node, while still waiting to allocate the shard (e.g. due to fetching shard data). diff --git a/server/src/main/java/org/opensearch/gateway/GatewayModule.java b/server/src/main/java/org/opensearch/gateway/GatewayModule.java index 59ec0243c88c9..847ba01737332 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayModule.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayModule.java @@ -47,6 +47,7 @@ protected void configure() { bind(GatewayService.class).asEagerSingleton(); bind(TransportNodesListGatewayMetaState.class).asEagerSingleton(); bind(TransportNodesListGatewayStartedShards.class).asEagerSingleton(); + bind(TransportNodesListGatewayStartedShardsBatch.class).asEagerSingleton(); bind(LocalAllocateDangledIndices.class).asEagerSingleton(); } } diff --git a/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java b/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java index 2807be00feeaa..f41545cbdf9bf 100644 --- a/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java @@ -50,6 +50,7 @@ import org.opensearch.cluster.routing.allocation.decider.Decision.Type; import org.opensearch.env.ShardLockObtainFailedException; import org.opensearch.gateway.AsyncShardFetch.FetchResult; +import org.opensearch.gateway.TransportNodesGatewayStartedShardHelper.NodeGatewayStartedShard; import org.opensearch.gateway.TransportNodesListGatewayStartedShards.NodeGatewayStartedShards; import java.util.ArrayList; @@ -81,7 +82,7 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator { /** * Is the allocator responsible for allocating the given {@link ShardRouting}? */ - private static boolean isResponsibleFor(final ShardRouting shard) { + protected static boolean isResponsibleFor(final ShardRouting shard) { return shard.primary() // must be primary && shard.unassigned() // must be unassigned // only handle either an existing store or a snapshot recovery @@ -89,19 +90,20 @@ private static boolean isResponsibleFor(final ShardRouting shard) { || shard.recoverySource().getType() == RecoverySource.Type.SNAPSHOT); } - @Override - public AllocateUnassignedDecision makeAllocationDecision( - final ShardRouting unassignedShard, - final RoutingAllocation allocation, - final Logger logger - ) { + /** + * Skip doing fetchData call for a shard if recovery mode is snapshot. Also do not take decision if allocator is + * not responsible for this particular shard. + * + * @param unassignedShard unassigned shard routing + * @param allocation routing allocation object + * @return allocation decision taken for this shard + */ + protected AllocateUnassignedDecision getInEligibleShardDecision(ShardRouting unassignedShard, RoutingAllocation allocation) { if (isResponsibleFor(unassignedShard) == false) { // this allocator is not responsible for allocating this shard return AllocateUnassignedDecision.NOT_TAKEN; } - final boolean explain = allocation.debugDecision(); - if (unassignedShard.recoverySource().getType() == RecoverySource.Type.SNAPSHOT && allocation.snapshotShardSizeInfo().getShardSize(unassignedShard) == null) { List<NodeAllocationResult> nodeDecisions = null; @@ -110,9 +112,55 @@ public AllocateUnassignedDecision makeAllocationDecision( } return AllocateUnassignedDecision.no(UnassignedInfo.AllocationStatus.FETCHING_SHARD_DATA, nodeDecisions); } + return null; + } + @Override + public AllocateUnassignedDecision makeAllocationDecision( + final ShardRouting unassignedShard, + final RoutingAllocation allocation, + final Logger logger + ) { + AllocateUnassignedDecision decision = getInEligibleShardDecision(unassignedShard, allocation); + if (decision != null) { + return decision; + } final FetchResult<NodeGatewayStartedShards> shardState = fetchData(unassignedShard, allocation); - if (shardState.hasData() == false) { + List<NodeGatewayStartedShard> nodeShardStates = adaptToNodeStartedShardList(shardState); + return getAllocationDecision(unassignedShard, allocation, nodeShardStates, logger); + } + + /** + * Transforms {@link FetchResult} of {@link NodeGatewayStartedShards} to {@link List} of {@link NodeGatewayStartedShard} + * Returns null if {@link FetchResult} does not have any data. + */ + private static List<NodeGatewayStartedShard> adaptToNodeStartedShardList(FetchResult<NodeGatewayStartedShards> shardsState) { + if (!shardsState.hasData()) { + return null; + } + List<NodeGatewayStartedShard> nodeShardStates = new ArrayList<>(); + shardsState.getData().forEach((node, nodeGatewayStartedShard) -> { + nodeShardStates.add( + new NodeGatewayStartedShard( + nodeGatewayStartedShard.getGatewayShardStarted().allocationId(), + nodeGatewayStartedShard.getGatewayShardStarted().primary(), + nodeGatewayStartedShard.getGatewayShardStarted().replicationCheckpoint(), + nodeGatewayStartedShard.getGatewayShardStarted().storeException(), + node + ) + ); + }); + return nodeShardStates; + } + + protected AllocateUnassignedDecision getAllocationDecision( + ShardRouting unassignedShard, + RoutingAllocation allocation, + List<NodeGatewayStartedShard> shardState, + Logger logger + ) { + final boolean explain = allocation.debugDecision(); + if (shardState == null) { allocation.setHasPendingAsyncFetch(); List<NodeAllocationResult> nodeDecisions = null; if (explain) { @@ -120,7 +168,6 @@ public AllocateUnassignedDecision makeAllocationDecision( } return AllocateUnassignedDecision.no(AllocationStatus.FETCHING_SHARD_DATA, nodeDecisions); } - // don't create a new IndexSetting object for every shard as this could cause a lot of garbage // on cluster restart if we allocate a boat load of shards final IndexMetadata indexMetadata = allocation.metadata().getIndexSafe(unassignedShard.index()); @@ -200,7 +247,7 @@ public AllocateUnassignedDecision makeAllocationDecision( nodesToAllocate = buildNodesToAllocate(allocation, nodeShardsResult.orderedAllocationCandidates, unassignedShard, true); if (nodesToAllocate.yesNodeShards.isEmpty() == false) { final DecidedNode decidedNode = nodesToAllocate.yesNodeShards.get(0); - final NodeGatewayStartedShards nodeShardState = decidedNode.nodeShardState; + final NodeGatewayStartedShard nodeShardState = decidedNode.nodeShardState; logger.debug( "[{}][{}]: allocating [{}] to [{}] on forced primary allocation", unassignedShard.index(), @@ -260,11 +307,11 @@ public AllocateUnassignedDecision makeAllocationDecision( */ private static List<NodeAllocationResult> buildNodeDecisions( NodesToAllocate nodesToAllocate, - FetchResult<NodeGatewayStartedShards> fetchedShardData, + List<NodeGatewayStartedShard> fetchedShardData, Set<String> inSyncAllocationIds ) { List<NodeAllocationResult> nodeResults = new ArrayList<>(); - Collection<NodeGatewayStartedShards> ineligibleShards; + Collection<NodeGatewayStartedShard> ineligibleShards = new ArrayList<>(); if (nodesToAllocate != null) { final Set<DiscoveryNode> discoNodes = new HashSet<>(); nodeResults.addAll( @@ -280,15 +327,13 @@ private static List<NodeAllocationResult> buildNodeDecisions( }) .collect(Collectors.toList()) ); - ineligibleShards = fetchedShardData.getData() - .values() - .stream() + ineligibleShards = fetchedShardData.stream() .filter(shardData -> discoNodes.contains(shardData.getNode()) == false) .collect(Collectors.toList()); } else { // there were no shard copies that were eligible for being assigned the allocation, // so all fetched shard data are ineligible shards - ineligibleShards = fetchedShardData.getData().values(); + ineligibleShards = fetchedShardData; } nodeResults.addAll( @@ -300,21 +345,21 @@ private static List<NodeAllocationResult> buildNodeDecisions( return nodeResults; } - private static ShardStoreInfo shardStoreInfo(NodeGatewayStartedShards nodeShardState, Set<String> inSyncAllocationIds) { + private static ShardStoreInfo shardStoreInfo(NodeGatewayStartedShard nodeShardState, Set<String> inSyncAllocationIds) { final Exception storeErr = nodeShardState.storeException(); final boolean inSync = nodeShardState.allocationId() != null && inSyncAllocationIds.contains(nodeShardState.allocationId()); return new ShardStoreInfo(nodeShardState.allocationId(), inSync, storeErr); } - private static final Comparator<NodeGatewayStartedShards> NO_STORE_EXCEPTION_FIRST_COMPARATOR = Comparator.comparing( - (NodeGatewayStartedShards state) -> state.storeException() == null + private static final Comparator<NodeGatewayStartedShard> NO_STORE_EXCEPTION_FIRST_COMPARATOR = Comparator.comparing( + (NodeGatewayStartedShard state) -> state.storeException() == null ).reversed(); - private static final Comparator<NodeGatewayStartedShards> PRIMARY_FIRST_COMPARATOR = Comparator.comparing( - NodeGatewayStartedShards::primary + private static final Comparator<NodeGatewayStartedShard> PRIMARY_FIRST_COMPARATOR = Comparator.comparing( + NodeGatewayStartedShard::primary ).reversed(); - private static final Comparator<NodeGatewayStartedShards> HIGHEST_REPLICATION_CHECKPOINT_FIRST_COMPARATOR = Comparator.comparing( - NodeGatewayStartedShards::replicationCheckpoint, + private static final Comparator<NodeGatewayStartedShard> HIGHEST_REPLICATION_CHECKPOINT_FIRST_COMPARATOR = Comparator.comparing( + NodeGatewayStartedShard::replicationCheckpoint, Comparator.nullsLast(Comparator.naturalOrder()) ); @@ -328,12 +373,12 @@ protected static NodeShardsResult buildNodeShardsResult( boolean matchAnyShard, Set<String> ignoreNodes, Set<String> inSyncAllocationIds, - FetchResult<NodeGatewayStartedShards> shardState, + List<NodeGatewayStartedShard> shardState, Logger logger ) { - List<NodeGatewayStartedShards> nodeShardStates = new ArrayList<>(); + List<NodeGatewayStartedShard> nodeShardStates = new ArrayList<>(); int numberOfAllocationsFound = 0; - for (NodeGatewayStartedShards nodeShardState : shardState.getData().values()) { + for (NodeGatewayStartedShard nodeShardState : shardState) { DiscoveryNode node = nodeShardState.getNode(); String allocationId = nodeShardState.allocationId(); @@ -386,17 +431,30 @@ protected static NodeShardsResult buildNodeShardsResult( } } - /* - Orders the active shards copies based on below comparators - 1. No store exception i.e. shard copy is readable - 2. Prefer previous primary shard - 3. Prefer shard copy with the highest replication checkpoint. It is NO-OP for doc rep enabled indices. + nodeShardStates.sort(createActiveShardComparator(matchAnyShard, inSyncAllocationIds)); + + if (logger.isTraceEnabled()) { + logger.trace( + "{} candidates for allocation: {}", + shard, + nodeShardStates.stream().map(s -> s.getNode().getName()).collect(Collectors.joining(", ")) + ); + } + return new NodeShardsResult(nodeShardStates, numberOfAllocationsFound); + } + + private static Comparator<NodeGatewayStartedShard> createActiveShardComparator(boolean matchAnyShard, Set<String> inSyncAllocationIds) { + /** + * Orders the active shards copies based on below comparators + * 1. No store exception i.e. shard copy is readable + * 2. Prefer previous primary shard + * 3. Prefer shard copy with the highest replication checkpoint. It is NO-OP for doc rep enabled indices. */ - final Comparator<NodeGatewayStartedShards> comparator; // allocation preference + final Comparator<NodeGatewayStartedShard> comparator; // allocation preference if (matchAnyShard) { // prefer shards with matching allocation ids - Comparator<NodeGatewayStartedShards> matchingAllocationsFirst = Comparator.comparing( - (NodeGatewayStartedShards state) -> inSyncAllocationIds.contains(state.allocationId()) + Comparator<NodeGatewayStartedShard> matchingAllocationsFirst = Comparator.comparing( + (NodeGatewayStartedShard state) -> inSyncAllocationIds.contains(state.allocationId()) ).reversed(); comparator = matchingAllocationsFirst.thenComparing(NO_STORE_EXCEPTION_FIRST_COMPARATOR) .thenComparing(PRIMARY_FIRST_COMPARATOR) @@ -406,16 +464,7 @@ protected static NodeShardsResult buildNodeShardsResult( .thenComparing(HIGHEST_REPLICATION_CHECKPOINT_FIRST_COMPARATOR); } - nodeShardStates.sort(comparator); - - if (logger.isTraceEnabled()) { - logger.trace( - "{} candidates for allocation: {}", - shard, - nodeShardStates.stream().map(s -> s.getNode().getName()).collect(Collectors.joining(", ")) - ); - } - return new NodeShardsResult(nodeShardStates, numberOfAllocationsFound); + return comparator; } /** @@ -423,14 +472,14 @@ protected static NodeShardsResult buildNodeShardsResult( */ private static NodesToAllocate buildNodesToAllocate( RoutingAllocation allocation, - List<NodeGatewayStartedShards> nodeShardStates, + List<NodeGatewayStartedShard> nodeShardStates, ShardRouting shardRouting, boolean forceAllocate ) { List<DecidedNode> yesNodeShards = new ArrayList<>(); List<DecidedNode> throttledNodeShards = new ArrayList<>(); List<DecidedNode> noNodeShards = new ArrayList<>(); - for (NodeGatewayStartedShards nodeShardState : nodeShardStates) { + for (NodeGatewayStartedShard nodeShardState : nodeShardStates) { RoutingNode node = allocation.routingNodes().node(nodeShardState.getNode().getId()); if (node == null) { continue; @@ -457,17 +506,23 @@ private static NodesToAllocate buildNodesToAllocate( protected abstract FetchResult<NodeGatewayStartedShards> fetchData(ShardRouting shard, RoutingAllocation allocation); - private static class NodeShardsResult { - final List<NodeGatewayStartedShards> orderedAllocationCandidates; + /** + * This class encapsulates the result of a call to {@link #buildNodeShardsResult} + */ + static class NodeShardsResult { + final List<NodeGatewayStartedShard> orderedAllocationCandidates; final int allocationsFound; - NodeShardsResult(List<NodeGatewayStartedShards> orderedAllocationCandidates, int allocationsFound) { + NodeShardsResult(List<NodeGatewayStartedShard> orderedAllocationCandidates, int allocationsFound) { this.orderedAllocationCandidates = orderedAllocationCandidates; this.allocationsFound = allocationsFound; } } - static class NodesToAllocate { + /** + * This class encapsulates the result of a call to {@link #buildNodesToAllocate} + */ + protected static class NodesToAllocate { final List<DecidedNode> yesNodeShards; final List<DecidedNode> throttleNodeShards; final List<DecidedNode> noNodeShards; @@ -484,10 +539,10 @@ static class NodesToAllocate { * by the allocator for allocating to the node that holds the shard copy. */ private static class DecidedNode { - final NodeGatewayStartedShards nodeShardState; + final NodeGatewayStartedShard nodeShardState; final Decision decision; - private DecidedNode(NodeGatewayStartedShards nodeShardState, Decision decision) { + private DecidedNode(NodeGatewayStartedShard nodeShardState, Decision decision) { this.nodeShardState = nodeShardState; this.decision = decision; } diff --git a/server/src/main/java/org/opensearch/gateway/PrimaryShardBatchAllocator.java b/server/src/main/java/org/opensearch/gateway/PrimaryShardBatchAllocator.java new file mode 100644 index 0000000000000..8d222903b6f29 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/PrimaryShardBatchAllocator.java @@ -0,0 +1,150 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway; + +import org.apache.logging.log4j.Logger; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.RoutingNodes; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.allocation.AllocateUnassignedDecision; +import org.opensearch.cluster.routing.allocation.RoutingAllocation; +import org.opensearch.gateway.AsyncShardFetch.FetchResult; +import org.opensearch.gateway.TransportNodesGatewayStartedShardHelper.NodeGatewayStartedShard; +import org.opensearch.gateway.TransportNodesListGatewayStartedShardsBatch.NodeGatewayStartedShardsBatch; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * PrimaryShardBatchAllocator is similar to {@link org.opensearch.gateway.PrimaryShardAllocator} only difference is + * that it can allocate multiple unassigned primary shards wherein PrimaryShardAllocator can only allocate single + * unassigned shard. + * The primary shard batch allocator allocates multiple unassigned primary shards to nodes that hold + * valid copies of the unassigned primaries. It does this by iterating over all unassigned + * primary shards in the routing table and fetching shard metadata from each node in the cluster + * that holds a copy of the shard. The shard metadata from each node is compared against the + * set of valid allocation IDs and for all valid shard copies (if any), the primary shard batch allocator + * executes the allocation deciders to chose a copy to assign the primary shard to. + * <p> + * Note that the PrimaryShardBatchAllocator does *not* allocate primaries on index creation + * (see {@link org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator}), + * nor does it allocate primaries when a primary shard failed and there is a valid replica + * copy that can immediately be promoted to primary, as this takes place in {@link RoutingNodes#failShard}. + * + * @opensearch.internal + */ +public abstract class PrimaryShardBatchAllocator extends PrimaryShardAllocator { + + abstract protected FetchResult<NodeGatewayStartedShardsBatch> fetchData( + List<ShardRouting> eligibleShards, + List<ShardRouting> inEligibleShards, + RoutingAllocation allocation + ); + + protected FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> fetchData( + ShardRouting shard, + RoutingAllocation allocation + ) { + logger.error("fetchData for single shard called via batch allocator, shard id {}", shard.shardId()); + throw new IllegalStateException("PrimaryShardBatchAllocator should only be used for a batch of shards"); + } + + @Override + public AllocateUnassignedDecision makeAllocationDecision(ShardRouting unassignedShard, RoutingAllocation allocation, Logger logger) { + return makeAllocationDecision(Collections.singletonList(unassignedShard), allocation, logger).get(unassignedShard); + } + + /** + * Build allocation decisions for all the shards present in the batch identified by batchId. + * + * @param shards set of shards given for allocation + * @param allocation current allocation of all the shards + * @param logger logger used for logging + * @return shard to allocation decision map + */ + @Override + public HashMap<ShardRouting, AllocateUnassignedDecision> makeAllocationDecision( + List<ShardRouting> shards, + RoutingAllocation allocation, + Logger logger + ) { + HashMap<ShardRouting, AllocateUnassignedDecision> shardAllocationDecisions = new HashMap<>(); + List<ShardRouting> eligibleShards = new ArrayList<>(); + List<ShardRouting> inEligibleShards = new ArrayList<>(); + // identify ineligible shards + for (ShardRouting shard : shards) { + AllocateUnassignedDecision decision = getInEligibleShardDecision(shard, allocation); + if (decision != null) { + inEligibleShards.add(shard); + shardAllocationDecisions.put(shard, decision); + } else { + eligibleShards.add(shard); + } + } + // Do not call fetchData if there are no eligible shards + if (eligibleShards.isEmpty()) { + return shardAllocationDecisions; + } + // only fetch data for eligible shards + final FetchResult<NodeGatewayStartedShardsBatch> shardsState = fetchData(eligibleShards, inEligibleShards, allocation); + + // process the received data + for (ShardRouting unassignedShard : eligibleShards) { + List<NodeGatewayStartedShard> nodeShardStates = adaptToNodeShardStates(unassignedShard, shardsState); + // get allocation decision for this shard + shardAllocationDecisions.put(unassignedShard, getAllocationDecision(unassignedShard, allocation, nodeShardStates, logger)); + } + return shardAllocationDecisions; + } + + /** + * Transforms {@link FetchResult} of {@link NodeGatewayStartedShardsBatch} to {@link List} of {@link TransportNodesListGatewayStartedShards.NodeGatewayStartedShards}. + * <p> + * Returns null if {@link FetchResult} does not have any data. + * <p> + * shardsState contain the Data, there key is DiscoveryNode but value is Map of ShardId + * and NodeGatewayStartedShardsBatch so to get one shard level data (from all the nodes), we'll traverse the map + * and construct the nodeShardState along the way before making any allocation decision. As metadata for a + * particular shard is needed from all the discovery nodes. + * + * @param unassignedShard unassigned shard + * @param shardsState fetch data result for the whole batch + * @return shard state returned from each node + */ + private static List<NodeGatewayStartedShard> adaptToNodeShardStates( + ShardRouting unassignedShard, + FetchResult<NodeGatewayStartedShardsBatch> shardsState + ) { + if (!shardsState.hasData()) { + return null; + } + List<NodeGatewayStartedShard> nodeShardStates = new ArrayList<>(); + Map<DiscoveryNode, NodeGatewayStartedShardsBatch> nodeResponses = shardsState.getData(); + + // build data for a shard from all the nodes + nodeResponses.forEach((node, nodeGatewayStartedShardsBatch) -> { + TransportNodesGatewayStartedShardHelper.GatewayStartedShard shardData = nodeGatewayStartedShardsBatch + .getNodeGatewayStartedShardsBatch() + .get(unassignedShard.shardId()); + nodeShardStates.add( + new NodeGatewayStartedShard( + shardData.allocationId(), + shardData.primary(), + shardData.replicationCheckpoint(), + shardData.storeException(), + node + ) + ); + }); + return nodeShardStates; + } +} diff --git a/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java b/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java index f530052c5bcd1..d9474b32bdbf6 100644 --- a/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java @@ -51,8 +51,8 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.index.store.StoreFileMetadata; -import org.opensearch.indices.store.TransportNodesListShardStoreMetadata; import org.opensearch.indices.store.TransportNodesListShardStoreMetadata.NodeStoreFilesMetadata; +import org.opensearch.indices.store.TransportNodesListShardStoreMetadataHelper.StoreFilesMetadata; import java.util.ArrayList; import java.util.Collections; @@ -61,6 +61,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; import static org.opensearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; @@ -70,93 +71,112 @@ * @opensearch.internal */ public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator { + protected boolean shouldSkipFetchForRecovery(ShardRouting shard) { + if (shard.primary()) { + return true; + } + if (shard.initializing() == false) { + return true; + } + if (shard.relocatingNodeId() != null) { + return true; + } + if (shard.unassignedInfo() != null && shard.unassignedInfo().getReason() == UnassignedInfo.Reason.INDEX_CREATED) { + // if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one... + return true; + } + return false; + } + + protected Runnable cancelExistingRecoveryForBetterMatch( + ShardRouting shard, + RoutingAllocation allocation, + Map<DiscoveryNode, StoreFilesMetadata> nodeShardStores + ) { + if (nodeShardStores == null) { + logger.trace("{}: fetching new stores for initializing shard", shard); + return null; + } + Metadata metadata = allocation.metadata(); + RoutingNodes routingNodes = allocation.routingNodes(); + ShardRouting primaryShard = allocation.routingNodes().activePrimary(shard.shardId()); + assert primaryShard != null : "the replica shard can be allocated on at least one node, so there must be an active primary"; + assert primaryShard.currentNodeId() != null; + final DiscoveryNode primaryNode = allocation.nodes().get(primaryShard.currentNodeId()); + + final StoreFilesMetadata primaryStore = findStore(primaryNode, nodeShardStores); + if (primaryStore == null) { + // if we can't find the primary data, it is probably because the primary shard is corrupted (and listing failed) + // just let the recovery find it out, no need to do anything about it for the initializing shard + logger.trace("{}: no primary shard store found or allocated, letting actual allocation figure it out", shard); + return null; + } + + MatchingNodes matchingNodes = findMatchingNodes(shard, allocation, true, primaryNode, primaryStore, nodeShardStores, false); + if (matchingNodes.getNodeWithHighestMatch() != null) { + DiscoveryNode currentNode = allocation.nodes().get(shard.currentNodeId()); + DiscoveryNode nodeWithHighestMatch = matchingNodes.getNodeWithHighestMatch(); + // current node will not be in matchingNodes as it is filtered away by SameShardAllocationDecider + if (currentNode.equals(nodeWithHighestMatch) == false + && matchingNodes.canPerformNoopRecovery(nodeWithHighestMatch) + && canPerformOperationBasedRecovery(primaryStore, nodeShardStores, currentNode) == false) { + // we found a better match that can perform noop recovery, cancel the existing allocation. + logger.debug( + "cancelling allocation of replica on [{}], can perform a noop recovery on node [{}]", + currentNode, + nodeWithHighestMatch + ); + final Set<String> failedNodeIds = shard.unassignedInfo() == null + ? Collections.emptySet() + : shard.unassignedInfo().getFailedNodeIds(); + UnassignedInfo unassignedInfo = new UnassignedInfo( + UnassignedInfo.Reason.REALLOCATED_REPLICA, + "existing allocation of replica to [" + + currentNode + + "] cancelled, can perform a noop recovery on [" + + nodeWithHighestMatch + + "]", + null, + 0, + allocation.getCurrentNanoTime(), + System.currentTimeMillis(), + false, + UnassignedInfo.AllocationStatus.NO_ATTEMPT, + failedNodeIds + ); + // don't cancel shard in the loop as it will cause a ConcurrentModificationException + return () -> routingNodes.failShard( + logger, + shard, + unassignedInfo, + metadata.getIndexSafe(shard.index()), + allocation.changes() + ); + } + } + return null; + } + /** * Process existing recoveries of replicas and see if we need to cancel them if we find a better * match. Today, a better match is one that can perform a no-op recovery while the previous recovery * has to copy segment files. */ public void processExistingRecoveries(RoutingAllocation allocation) { - Metadata metadata = allocation.metadata(); RoutingNodes routingNodes = allocation.routingNodes(); List<Runnable> shardCancellationActions = new ArrayList<>(); for (RoutingNode routingNode : routingNodes) { for (ShardRouting shard : routingNode) { - if (shard.primary()) { - continue; - } - if (shard.initializing() == false) { - continue; - } - if (shard.relocatingNodeId() != null) { - continue; - } - - // if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one... - if (shard.unassignedInfo() != null && shard.unassignedInfo().getReason() == UnassignedInfo.Reason.INDEX_CREATED) { + if (shouldSkipFetchForRecovery(shard)) { continue; } AsyncShardFetch.FetchResult<NodeStoreFilesMetadata> shardStores = fetchData(shard, allocation); - if (shardStores.hasData() == false) { - logger.trace("{}: fetching new stores for initializing shard", shard); - continue; // still fetching - } + Map<DiscoveryNode, StoreFilesMetadata> nodeShardStores = convertToNodeStoreFilesMetadataMap(shardStores); - ShardRouting primaryShard = allocation.routingNodes().activePrimary(shard.shardId()); - assert primaryShard != null : "the replica shard can be allocated on at least one node, so there must be an active primary"; - assert primaryShard.currentNodeId() != null; - final DiscoveryNode primaryNode = allocation.nodes().get(primaryShard.currentNodeId()); - final TransportNodesListShardStoreMetadata.StoreFilesMetadata primaryStore = findStore(primaryNode, shardStores); - if (primaryStore == null) { - // if we can't find the primary data, it is probably because the primary shard is corrupted (and listing failed) - // just let the recovery find it out, no need to do anything about it for the initializing shard - logger.trace("{}: no primary shard store found or allocated, letting actual allocation figure it out", shard); - continue; - } - - MatchingNodes matchingNodes = findMatchingNodes(shard, allocation, true, primaryNode, primaryStore, shardStores, false); - if (matchingNodes.getNodeWithHighestMatch() != null) { - DiscoveryNode currentNode = allocation.nodes().get(shard.currentNodeId()); - DiscoveryNode nodeWithHighestMatch = matchingNodes.getNodeWithHighestMatch(); - // current node will not be in matchingNodes as it is filtered away by SameShardAllocationDecider - if (currentNode.equals(nodeWithHighestMatch) == false - && matchingNodes.canPerformNoopRecovery(nodeWithHighestMatch) - && canPerformOperationBasedRecovery(primaryStore, shardStores, currentNode) == false) { - // we found a better match that can perform noop recovery, cancel the existing allocation. - logger.debug( - "cancelling allocation of replica on [{}], can perform a noop recovery on node [{}]", - currentNode, - nodeWithHighestMatch - ); - final Set<String> failedNodeIds = shard.unassignedInfo() == null - ? Collections.emptySet() - : shard.unassignedInfo().getFailedNodeIds(); - UnassignedInfo unassignedInfo = new UnassignedInfo( - UnassignedInfo.Reason.REALLOCATED_REPLICA, - "existing allocation of replica to [" - + currentNode - + "] cancelled, can perform a noop recovery on [" - + nodeWithHighestMatch - + "]", - null, - 0, - allocation.getCurrentNanoTime(), - System.currentTimeMillis(), - false, - UnassignedInfo.AllocationStatus.NO_ATTEMPT, - failedNodeIds - ); - // don't cancel shard in the loop as it will cause a ConcurrentModificationException - shardCancellationActions.add( - () -> routingNodes.failShard( - logger, - shard, - unassignedInfo, - metadata.getIndexSafe(shard.index()), - allocation.changes() - ) - ); - } + Runnable cancellationAction = cancelExistingRecoveryForBetterMatch(shard, allocation, nodeShardStores); + if (cancellationAction != null) { + shardCancellationActions.add(cancellationAction); } } } @@ -168,7 +188,7 @@ && canPerformOperationBasedRecovery(primaryStore, shardStores, currentNode) == f /** * Is the allocator responsible for allocating the given {@link ShardRouting}? */ - private static boolean isResponsibleFor(final ShardRouting shard) { + protected static boolean isResponsibleFor(final ShardRouting shard) { return shard.primary() == false // must be a replica && shard.unassigned() // must be unassigned // if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one... @@ -186,12 +206,11 @@ public AllocateUnassignedDecision makeAllocationDecision( return AllocateUnassignedDecision.NOT_TAKEN; } - final RoutingNodes routingNodes = allocation.routingNodes(); - final boolean explain = allocation.debugDecision(); // pre-check if it can be allocated to any node that currently exists, so we won't list the store for it for nothing Tuple<Decision, Map<String, NodeAllocationResult>> result = canBeAllocatedToAtLeastOneNode(unassignedShard, allocation); Decision allocateDecision = result.v1(); - if (allocateDecision.type() != Decision.Type.YES && (explain == false || hasInitiatedFetching(unassignedShard) == false)) { + if (allocateDecision.type() != Decision.Type.YES + && (allocation.debugDecision() == false || hasInitiatedFetching(unassignedShard) == false)) { // only return early if we are not in explain mode, or we are in explain mode but we have not // yet attempted to fetch any shard data logger.trace("{}: ignoring allocation, can't be allocated on any node", unassignedShard); @@ -202,28 +221,41 @@ public AllocateUnassignedDecision makeAllocationDecision( } AsyncShardFetch.FetchResult<NodeStoreFilesMetadata> shardStores = fetchData(unassignedShard, allocation); - if (shardStores.hasData() == false) { + Map<DiscoveryNode, StoreFilesMetadata> nodeShardStores = convertToNodeStoreFilesMetadataMap(shardStores); + return getAllocationDecision(unassignedShard, allocation, nodeShardStores, result, logger); + } + + protected AllocateUnassignedDecision getAllocationDecision( + ShardRouting unassignedShard, + RoutingAllocation allocation, + Map<DiscoveryNode, StoreFilesMetadata> nodeShardStores, + Tuple<Decision, Map<String, NodeAllocationResult>> allocationDecision, + Logger logger + ) { + if (nodeShardStores == null) { + // node shard stores is null when we don't have data yet and still fetching the shard stores logger.trace("{}: ignoring allocation, still fetching shard stores", unassignedShard); allocation.setHasPendingAsyncFetch(); List<NodeAllocationResult> nodeDecisions = null; - if (explain) { + if (allocation.debugDecision()) { nodeDecisions = buildDecisionsForAllNodes(unassignedShard, allocation); } return AllocateUnassignedDecision.no(AllocationStatus.FETCHING_SHARD_DATA, nodeDecisions); } - + final RoutingNodes routingNodes = allocation.routingNodes(); + final boolean explain = allocation.debugDecision(); ShardRouting primaryShard = routingNodes.activePrimary(unassignedShard.shardId()); if (primaryShard == null) { assert explain : "primary should only be null here if we are in explain mode, so we didn't " + "exit early when canBeAllocatedToAtLeastOneNode didn't return a YES decision"; return AllocateUnassignedDecision.no( - UnassignedInfo.AllocationStatus.fromDecision(allocateDecision.type()), - new ArrayList<>(result.v2().values()) + UnassignedInfo.AllocationStatus.fromDecision(allocationDecision.v1().type()), + new ArrayList<>(allocationDecision.v2().values()) ); } assert primaryShard.currentNodeId() != null; final DiscoveryNode primaryNode = allocation.nodes().get(primaryShard.currentNodeId()); - final TransportNodesListShardStoreMetadata.StoreFilesMetadata primaryStore = findStore(primaryNode, shardStores); + final StoreFilesMetadata primaryStore = findStore(primaryNode, nodeShardStores); if (primaryStore == null) { // if we can't find the primary data, it is probably because the primary shard is corrupted (and listing failed) // we want to let the replica be allocated in order to expose the actual problem with the primary that the replica @@ -239,14 +271,17 @@ public AllocateUnassignedDecision makeAllocationDecision( false, primaryNode, primaryStore, - shardStores, + nodeShardStores, explain ); assert explain == false || matchingNodes.nodeDecisions != null : "in explain mode, we must have individual node decisions"; - List<NodeAllocationResult> nodeDecisions = augmentExplanationsWithStoreInfo(result.v2(), matchingNodes.nodeDecisions); - if (allocateDecision.type() != Decision.Type.YES) { - return AllocateUnassignedDecision.no(UnassignedInfo.AllocationStatus.fromDecision(allocateDecision.type()), nodeDecisions); + List<NodeAllocationResult> nodeDecisions = augmentExplanationsWithStoreInfo(allocationDecision.v2(), matchingNodes.nodeDecisions); + if (allocationDecision.v1().type() != Decision.Type.YES) { + return AllocateUnassignedDecision.no( + UnassignedInfo.AllocationStatus.fromDecision(allocationDecision.v1().type()), + nodeDecisions + ); } else if (matchingNodes.getNodeWithHighestMatch() != null) { RoutingNode nodeWithHighestMatch = allocation.routingNodes().node(matchingNodes.getNodeWithHighestMatch().getId()); // we only check on THROTTLE since we checked before on NO @@ -301,7 +336,7 @@ public AllocateUnassignedDecision makeAllocationDecision( * YES or THROTTLE). If in explain mode, also returns the node-level explanations as the second element * in the returned tuple. */ - private static Tuple<Decision, Map<String, NodeAllocationResult>> canBeAllocatedToAtLeastOneNode( + protected static Tuple<Decision, Map<String, NodeAllocationResult>> canBeAllocatedToAtLeastOneNode( ShardRouting shard, RoutingAllocation allocation ) { @@ -357,15 +392,11 @@ private static List<NodeAllocationResult> augmentExplanationsWithStoreInfo( /** * Finds the store for the assigned shard in the fetched data, returns null if none is found. */ - private static TransportNodesListShardStoreMetadata.StoreFilesMetadata findStore( - DiscoveryNode node, - AsyncShardFetch.FetchResult<NodeStoreFilesMetadata> data - ) { - NodeStoreFilesMetadata nodeFilesStore = data.getData().get(node); - if (nodeFilesStore == null) { + private static StoreFilesMetadata findStore(DiscoveryNode node, Map<DiscoveryNode, StoreFilesMetadata> data) { + if (!data.containsKey(node)) { return null; } - return nodeFilesStore.storeFilesMetadata(); + return data.get(node); } private MatchingNodes findMatchingNodes( @@ -373,20 +404,20 @@ private MatchingNodes findMatchingNodes( RoutingAllocation allocation, boolean noMatchFailedNodes, DiscoveryNode primaryNode, - TransportNodesListShardStoreMetadata.StoreFilesMetadata primaryStore, - AsyncShardFetch.FetchResult<NodeStoreFilesMetadata> data, + StoreFilesMetadata primaryStore, + Map<DiscoveryNode, StoreFilesMetadata> data, boolean explain ) { Map<DiscoveryNode, MatchingNode> matchingNodes = new HashMap<>(); Map<String, NodeAllocationResult> nodeDecisions = explain ? new HashMap<>() : null; - for (Map.Entry<DiscoveryNode, NodeStoreFilesMetadata> nodeStoreEntry : data.getData().entrySet()) { + for (Map.Entry<DiscoveryNode, StoreFilesMetadata> nodeStoreEntry : data.entrySet()) { DiscoveryNode discoNode = nodeStoreEntry.getKey(); if (noMatchFailedNodes && shard.unassignedInfo() != null && shard.unassignedInfo().getFailedNodeIds().contains(discoNode.getId())) { continue; } - TransportNodesListShardStoreMetadata.StoreFilesMetadata storeFilesMetadata = nodeStoreEntry.getValue().storeFilesMetadata(); + StoreFilesMetadata storeFilesMetadata = nodeStoreEntry.getValue(); // we don't have any files at all, it is an empty index if (storeFilesMetadata.isEmpty()) { continue; @@ -441,10 +472,20 @@ private MatchingNodes findMatchingNodes( return new MatchingNodes(matchingNodes, nodeDecisions); } - private static long computeMatchingBytes( - TransportNodesListShardStoreMetadata.StoreFilesMetadata primaryStore, - TransportNodesListShardStoreMetadata.StoreFilesMetadata storeFilesMetadata + private Map<DiscoveryNode, StoreFilesMetadata> convertToNodeStoreFilesMetadataMap( + AsyncShardFetch.FetchResult<NodeStoreFilesMetadata> data ) { + if (data.hasData() == false) { + // if we don't have data yet return null + return null; + } + return data.getData() + .entrySet() + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().storeFilesMetadata())); + } + + private static long computeMatchingBytes(StoreFilesMetadata primaryStore, StoreFilesMetadata storeFilesMetadata) { long sizeMatched = 0; for (StoreFileMetadata storeFileMetadata : storeFilesMetadata) { String metadataFileName = storeFileMetadata.name(); @@ -455,19 +496,16 @@ private static long computeMatchingBytes( return sizeMatched; } - private static boolean hasMatchingSyncId( - TransportNodesListShardStoreMetadata.StoreFilesMetadata primaryStore, - TransportNodesListShardStoreMetadata.StoreFilesMetadata replicaStore - ) { + private static boolean hasMatchingSyncId(StoreFilesMetadata primaryStore, StoreFilesMetadata replicaStore) { String primarySyncId = primaryStore.syncId(); return primarySyncId != null && primarySyncId.equals(replicaStore.syncId()); } private static MatchingNode computeMatchingNode( DiscoveryNode primaryNode, - TransportNodesListShardStoreMetadata.StoreFilesMetadata primaryStore, + StoreFilesMetadata primaryStore, DiscoveryNode replicaNode, - TransportNodesListShardStoreMetadata.StoreFilesMetadata replicaStore + StoreFilesMetadata replicaStore ) { final long retainingSeqNoForPrimary = primaryStore.getPeerRecoveryRetentionLeaseRetainingSeqNo(primaryNode); final long retainingSeqNoForReplica = primaryStore.getPeerRecoveryRetentionLeaseRetainingSeqNo(replicaNode); @@ -478,15 +516,15 @@ private static MatchingNode computeMatchingNode( } private static boolean canPerformOperationBasedRecovery( - TransportNodesListShardStoreMetadata.StoreFilesMetadata primaryStore, - AsyncShardFetch.FetchResult<NodeStoreFilesMetadata> shardStores, + StoreFilesMetadata primaryStore, + Map<DiscoveryNode, StoreFilesMetadata> shardStores, DiscoveryNode targetNode ) { - final NodeStoreFilesMetadata targetNodeStore = shardStores.getData().get(targetNode); - if (targetNodeStore == null || targetNodeStore.storeFilesMetadata().isEmpty()) { + final StoreFilesMetadata targetNodeStore = shardStores.get(targetNode); + if (targetNodeStore == null || targetNodeStore.isEmpty()) { return false; } - if (hasMatchingSyncId(primaryStore, targetNodeStore.storeFilesMetadata())) { + if (hasMatchingSyncId(primaryStore, targetNodeStore)) { return true; } return primaryStore.getPeerRecoveryRetentionLeaseRetainingSeqNo(targetNode) >= 0; @@ -499,7 +537,10 @@ private static boolean canPerformOperationBasedRecovery( */ protected abstract boolean hasInitiatedFetching(ShardRouting shard); - private static class MatchingNode { + /** + * A class to enacapsulate the details regarding the a MatchNode for shard assignment + */ + protected static class MatchingNode { static final Comparator<MatchingNode> COMPARATOR = Comparator.<MatchingNode, Boolean>comparing(m -> m.isNoopRecovery) .thenComparing(m -> m.retainingSeqNo) .thenComparing(m -> m.matchingBytes); diff --git a/server/src/main/java/org/opensearch/gateway/TransportNodesGatewayStartedShardHelper.java b/server/src/main/java/org/opensearch/gateway/TransportNodesGatewayStartedShardHelper.java new file mode 100644 index 0000000000000..27cce76b1b694 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/TransportNodesGatewayStartedShardHelper.java @@ -0,0 +1,262 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.OpenSearchException; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.env.NodeEnvironment; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.ShardPath; +import org.opensearch.index.shard.ShardStateMetadata; +import org.opensearch.index.store.Store; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; + +import java.io.IOException; +import java.util.Objects; + +/** + * This class has the common code used in {@link TransportNodesListGatewayStartedShards} and + * {@link TransportNodesListGatewayStartedShardsBatch} to get the shard info on the local node. + * <p> + * This class should not be used to add more functions and will be removed when the + * {@link TransportNodesListGatewayStartedShards} will be deprecated and all the code will be moved to + * {@link TransportNodesListGatewayStartedShardsBatch} + * + * @opensearch.internal + */ +public class TransportNodesGatewayStartedShardHelper { + public static GatewayStartedShard getShardInfoOnLocalNode( + Logger logger, + final ShardId shardId, + NamedXContentRegistry namedXContentRegistry, + NodeEnvironment nodeEnv, + IndicesService indicesService, + String shardDataPathInRequest, + Settings settings, + ClusterService clusterService + ) throws IOException { + logger.trace("{} loading local shard state info", shardId); + ShardStateMetadata shardStateMetadata = ShardStateMetadata.FORMAT.loadLatestState( + logger, + namedXContentRegistry, + nodeEnv.availableShardPaths(shardId) + ); + if (shardStateMetadata != null) { + if (indicesService.getShardOrNull(shardId) == null + && shardStateMetadata.indexDataLocation == ShardStateMetadata.IndexDataLocation.LOCAL) { + final String customDataPath; + if (shardDataPathInRequest != null) { + customDataPath = shardDataPathInRequest; + } else { + // TODO: Fallback for BWC with older OpenSearch versions. + // Remove once request.getCustomDataPath() always returns non-null + final IndexMetadata metadata = clusterService.state().metadata().index(shardId.getIndex()); + if (metadata != null) { + customDataPath = new IndexSettings(metadata, settings).customDataPath(); + } else { + logger.trace("{} node doesn't have meta data for the requests index", shardId); + throw new OpenSearchException("node doesn't have meta data for index " + shardId.getIndex()); + } + } + // we don't have an open shard on the store, validate the files on disk are openable + ShardPath shardPath = null; + try { + shardPath = ShardPath.loadShardPath(logger, nodeEnv, shardId, customDataPath); + if (shardPath == null) { + throw new IllegalStateException(shardId + " no shard path found"); + } + Store.tryOpenIndex(shardPath.resolveIndex(), shardId, nodeEnv::shardLock, logger); + } catch (Exception exception) { + final ShardPath finalShardPath = shardPath; + logger.trace( + () -> new ParameterizedMessage( + "{} can't open index for shard [{}] in path [{}]", + shardId, + shardStateMetadata, + (finalShardPath != null) ? finalShardPath.resolveIndex() : "" + ), + exception + ); + String allocationId = shardStateMetadata.allocationId != null ? shardStateMetadata.allocationId.getId() : null; + return new GatewayStartedShard(allocationId, shardStateMetadata.primary, null, exception); + } + } + + logger.debug("{} shard state info found: [{}]", shardId, shardStateMetadata); + String allocationId = shardStateMetadata.allocationId != null ? shardStateMetadata.allocationId.getId() : null; + final IndexShard shard = indicesService.getShardOrNull(shardId); + return new GatewayStartedShard( + allocationId, + shardStateMetadata.primary, + shard != null ? shard.getLatestReplicationCheckpoint() : null + ); + } + logger.trace("{} no local shard info found", shardId); + return new GatewayStartedShard(null, false, null); + } + + /** + * This class encapsulates the metadata about a started shard that needs to be persisted or sent between nodes. + * This is used in {@link TransportNodesListGatewayStartedShardsBatch.NodeGatewayStartedShardsBatch} to construct the response for each node, instead of + * {@link TransportNodesListGatewayStartedShards.NodeGatewayStartedShards} because we don't need to save an extra + * {@link DiscoveryNode} object like in {@link TransportNodesListGatewayStartedShards.NodeGatewayStartedShards} + * which reduces memory footprint of its objects. + * + * @opensearch.internal + */ + public static class GatewayStartedShard { + private final String allocationId; + private final boolean primary; + private final Exception storeException; + private final ReplicationCheckpoint replicationCheckpoint; + + public GatewayStartedShard(StreamInput in) throws IOException { + allocationId = in.readOptionalString(); + primary = in.readBoolean(); + if (in.readBoolean()) { + storeException = in.readException(); + } else { + storeException = null; + } + if (in.readBoolean()) { + replicationCheckpoint = new ReplicationCheckpoint(in); + } else { + replicationCheckpoint = null; + } + } + + public GatewayStartedShard(String allocationId, boolean primary, ReplicationCheckpoint replicationCheckpoint) { + this(allocationId, primary, replicationCheckpoint, null); + } + + public GatewayStartedShard( + String allocationId, + boolean primary, + ReplicationCheckpoint replicationCheckpoint, + Exception storeException + ) { + this.allocationId = allocationId; + this.primary = primary; + this.replicationCheckpoint = replicationCheckpoint; + this.storeException = storeException; + } + + public String allocationId() { + return this.allocationId; + } + + public boolean primary() { + return this.primary; + } + + public ReplicationCheckpoint replicationCheckpoint() { + return this.replicationCheckpoint; + } + + public Exception storeException() { + return this.storeException; + } + + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalString(allocationId); + out.writeBoolean(primary); + if (storeException != null) { + out.writeBoolean(true); + out.writeException(storeException); + } else { + out.writeBoolean(false); + } + if (replicationCheckpoint != null) { + out.writeBoolean(true); + replicationCheckpoint.writeTo(out); + } else { + out.writeBoolean(false); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + GatewayStartedShard that = (GatewayStartedShard) o; + + return primary == that.primary + && Objects.equals(allocationId, that.allocationId) + && Objects.equals(storeException, that.storeException) + && Objects.equals(replicationCheckpoint, that.replicationCheckpoint); + } + + @Override + public int hashCode() { + int result = (allocationId != null ? allocationId.hashCode() : 0); + result = 31 * result + (primary ? 1 : 0); + result = 31 * result + (storeException != null ? storeException.hashCode() : 0); + result = 31 * result + (replicationCheckpoint != null ? replicationCheckpoint.hashCode() : 0); + return result; + } + + @Override + public String toString() { + StringBuilder buf = new StringBuilder(); + buf.append("NodeGatewayStartedShards[").append("allocationId=").append(allocationId).append(",primary=").append(primary); + if (storeException != null) { + buf.append(",storeException=").append(storeException); + } + if (replicationCheckpoint != null) { + buf.append(",ReplicationCheckpoint=").append(replicationCheckpoint.toString()); + } + buf.append("]"); + return buf.toString(); + } + } + + /** + * This class extends the {@link GatewayStartedShard} which contains all necessary shard metadata like + * allocationId and replication checkpoint. It also has DiscoveryNode which is needed by + * {@link PrimaryShardAllocator} and {@link PrimaryShardBatchAllocator} to make allocation decision. + * This class removes the dependency of + * {@link TransportNodesListGatewayStartedShards.NodeGatewayStartedShards} to make allocation decisions by + * {@link PrimaryShardAllocator} or {@link PrimaryShardBatchAllocator}. + */ + public static class NodeGatewayStartedShard extends GatewayStartedShard { + + private final DiscoveryNode node; + + public NodeGatewayStartedShard( + String allocationId, + boolean primary, + ReplicationCheckpoint replicationCheckpoint, + Exception storeException, + DiscoveryNode node + ) { + super(allocationId, primary, replicationCheckpoint, storeException); + this.node = node; + } + + public DiscoveryNode getNode() { + return node; + } + } +} diff --git a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java index 601a5c671d67c..4b1f611bb88ab 100644 --- a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java +++ b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java @@ -32,7 +32,6 @@ package org.opensearch.gateway; -import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.ActionType; @@ -43,7 +42,6 @@ import org.opensearch.action.support.nodes.BaseNodesResponse; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.cluster.ClusterName; -import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; @@ -55,11 +53,7 @@ import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.NodeEnvironment; -import org.opensearch.index.IndexSettings; -import org.opensearch.index.shard.IndexShard; -import org.opensearch.index.shard.ShardPath; -import org.opensearch.index.shard.ShardStateMetadata; -import org.opensearch.index.store.Store; +import org.opensearch.gateway.TransportNodesGatewayStartedShardHelper.GatewayStartedShard; import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.store.ShardAttributes; @@ -72,6 +66,8 @@ import java.util.Map; import java.util.Objects; +import static org.opensearch.gateway.TransportNodesGatewayStartedShardHelper.getShardInfoOnLocalNode; + /** * This transport action is used to fetch the shard version from each node during primary allocation in {@link GatewayAllocator}. * We use this to find out which node holds the latest shard version and which of them used to be a primary in order to allocate @@ -159,72 +155,25 @@ protected NodesGatewayStartedShards newResponse( @Override protected NodeGatewayStartedShards nodeOperation(NodeRequest request) { try { - final ShardId shardId = request.getShardId(); - logger.trace("{} loading local shard state info", shardId); - ShardStateMetadata shardStateMetadata = ShardStateMetadata.FORMAT.loadLatestState( + GatewayStartedShard shardInfo = getShardInfoOnLocalNode( logger, + request.getShardId(), namedXContentRegistry, - nodeEnv.availableShardPaths(request.shardId) + nodeEnv, + indicesService, + request.getCustomDataPath(), + settings, + clusterService + ); + return new NodeGatewayStartedShards( + clusterService.localNode(), + new GatewayStartedShard( + shardInfo.allocationId(), + shardInfo.primary(), + shardInfo.replicationCheckpoint(), + shardInfo.storeException() + ) ); - if (shardStateMetadata != null) { - if (indicesService.getShardOrNull(shardId) == null - && shardStateMetadata.indexDataLocation == ShardStateMetadata.IndexDataLocation.LOCAL) { - final String customDataPath; - if (request.getCustomDataPath() != null) { - customDataPath = request.getCustomDataPath(); - } else { - // TODO: Fallback for BWC with older OpenSearch versions. - // Remove once request.getCustomDataPath() always returns non-null - final IndexMetadata metadata = clusterService.state().metadata().index(shardId.getIndex()); - if (metadata != null) { - customDataPath = new IndexSettings(metadata, settings).customDataPath(); - } else { - logger.trace("{} node doesn't have meta data for the requests index", shardId); - throw new OpenSearchException("node doesn't have meta data for index " + shardId.getIndex()); - } - } - // we don't have an open shard on the store, validate the files on disk are openable - ShardPath shardPath = null; - try { - shardPath = ShardPath.loadShardPath(logger, nodeEnv, shardId, customDataPath); - if (shardPath == null) { - throw new IllegalStateException(shardId + " no shard path found"); - } - Store.tryOpenIndex(shardPath.resolveIndex(), shardId, nodeEnv::shardLock, logger); - } catch (Exception exception) { - final ShardPath finalShardPath = shardPath; - logger.trace( - () -> new ParameterizedMessage( - "{} can't open index for shard [{}] in path [{}]", - shardId, - shardStateMetadata, - (finalShardPath != null) ? finalShardPath.resolveIndex() : "" - ), - exception - ); - String allocationId = shardStateMetadata.allocationId != null ? shardStateMetadata.allocationId.getId() : null; - return new NodeGatewayStartedShards( - clusterService.localNode(), - allocationId, - shardStateMetadata.primary, - null, - exception - ); - } - } - - logger.debug("{} shard state info found: [{}]", shardId, shardStateMetadata); - String allocationId = shardStateMetadata.allocationId != null ? shardStateMetadata.allocationId.getId() : null; - final IndexShard shard = indicesService.getShardOrNull(shardId); - return new NodeGatewayStartedShards( - clusterService.localNode(), - allocationId, - shardStateMetadata.primary, - shard != null ? shard.getLatestReplicationCheckpoint() : null - ); - } - logger.trace("{} no local shard info found", shardId); - return new NodeGatewayStartedShards(clusterService.localNode(), null, false, null); } catch (Exception e) { throw new OpenSearchException("failed to load started shards", e); } @@ -356,81 +305,51 @@ public String getCustomDataPath() { * @opensearch.internal */ public static class NodeGatewayStartedShards extends BaseNodeResponse { - private final String allocationId; - private final boolean primary; - private final Exception storeException; - private final ReplicationCheckpoint replicationCheckpoint; + private final GatewayStartedShard gatewayStartedShard; public NodeGatewayStartedShards(StreamInput in) throws IOException { super(in); - allocationId = in.readOptionalString(); - primary = in.readBoolean(); + String allocationId = in.readOptionalString(); + boolean primary = in.readBoolean(); + Exception storeException; if (in.readBoolean()) { storeException = in.readException(); } else { storeException = null; } + ReplicationCheckpoint replicationCheckpoint; if (in.getVersion().onOrAfter(Version.V_2_3_0) && in.readBoolean()) { replicationCheckpoint = new ReplicationCheckpoint(in); } else { replicationCheckpoint = null; } + this.gatewayStartedShard = new GatewayStartedShard(allocationId, primary, replicationCheckpoint, storeException); } - public NodeGatewayStartedShards( - DiscoveryNode node, - String allocationId, - boolean primary, - ReplicationCheckpoint replicationCheckpoint - ) { - this(node, allocationId, primary, replicationCheckpoint, null); + public GatewayStartedShard getGatewayShardStarted() { + return gatewayStartedShard; } - public NodeGatewayStartedShards( - DiscoveryNode node, - String allocationId, - boolean primary, - ReplicationCheckpoint replicationCheckpoint, - Exception storeException - ) { + public NodeGatewayStartedShards(DiscoveryNode node, GatewayStartedShard gatewayStartedShard) { super(node); - this.allocationId = allocationId; - this.primary = primary; - this.replicationCheckpoint = replicationCheckpoint; - this.storeException = storeException; - } - - public String allocationId() { - return this.allocationId; - } - - public boolean primary() { - return this.primary; - } - - public ReplicationCheckpoint replicationCheckpoint() { - return this.replicationCheckpoint; - } - - public Exception storeException() { - return this.storeException; + this.gatewayStartedShard = gatewayStartedShard; } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeOptionalString(allocationId); - out.writeBoolean(primary); - if (storeException != null) { + out.writeOptionalString(gatewayStartedShard.allocationId()); + out.writeBoolean(gatewayStartedShard.primary()); + if (gatewayStartedShard.storeException() != null) { out.writeBoolean(true); - out.writeException(storeException); + out.writeException(gatewayStartedShard.storeException()); } else { out.writeBoolean(false); } if (out.getVersion().onOrAfter(Version.V_2_3_0)) { - if (replicationCheckpoint != null) { + if (gatewayStartedShard.replicationCheckpoint() != null) { out.writeBoolean(true); - replicationCheckpoint.writeTo(out); + gatewayStartedShard.replicationCheckpoint().writeTo(out); } else { out.writeBoolean(false); } @@ -448,33 +367,17 @@ public boolean equals(Object o) { NodeGatewayStartedShards that = (NodeGatewayStartedShards) o; - return primary == that.primary - && Objects.equals(allocationId, that.allocationId) - && Objects.equals(storeException, that.storeException) - && Objects.equals(replicationCheckpoint, that.replicationCheckpoint); + return gatewayStartedShard.equals(that.gatewayStartedShard); } @Override public int hashCode() { - int result = (allocationId != null ? allocationId.hashCode() : 0); - result = 31 * result + (primary ? 1 : 0); - result = 31 * result + (storeException != null ? storeException.hashCode() : 0); - result = 31 * result + (replicationCheckpoint != null ? replicationCheckpoint.hashCode() : 0); - return result; + return gatewayStartedShard.hashCode(); } @Override public String toString() { - StringBuilder buf = new StringBuilder(); - buf.append("NodeGatewayStartedShards[").append("allocationId=").append(allocationId).append(",primary=").append(primary); - if (storeException != null) { - buf.append(",storeException=").append(storeException); - } - if (replicationCheckpoint != null) { - buf.append(",ReplicationCheckpoint=").append(replicationCheckpoint.toString()); - } - buf.append("]"); - return buf.toString(); + return gatewayStartedShard.toString(); } } } diff --git a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShardsBatch.java b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShardsBatch.java new file mode 100644 index 0000000000000..dc5d85b17bc32 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShardsBatch.java @@ -0,0 +1,281 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway; + +import org.opensearch.OpenSearchException; +import org.opensearch.action.ActionType; +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.nodes.BaseNodeResponse; +import org.opensearch.action.support.nodes.BaseNodesRequest; +import org.opensearch.action.support.nodes.BaseNodesResponse; +import org.opensearch.action.support.nodes.TransportNodesAction; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.env.NodeEnvironment; +import org.opensearch.gateway.TransportNodesGatewayStartedShardHelper.GatewayStartedShard; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.store.ShardAttributes; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static org.opensearch.gateway.TransportNodesGatewayStartedShardHelper.getShardInfoOnLocalNode; + +/** + * This transport action is used to fetch batch of unassigned shard version from each node during primary allocation in {@link GatewayAllocator}. + * We use this to find out which node holds the latest shard version and which of them used to be a primary in order to allocate + * shards after node or cluster restarts. + * + * @opensearch.internal + */ +public class TransportNodesListGatewayStartedShardsBatch extends TransportNodesAction< + TransportNodesListGatewayStartedShardsBatch.Request, + TransportNodesListGatewayStartedShardsBatch.NodesGatewayStartedShardsBatch, + TransportNodesListGatewayStartedShardsBatch.NodeRequest, + TransportNodesListGatewayStartedShardsBatch.NodeGatewayStartedShardsBatch> + implements + AsyncShardFetch.Lister< + TransportNodesListGatewayStartedShardsBatch.NodesGatewayStartedShardsBatch, + TransportNodesListGatewayStartedShardsBatch.NodeGatewayStartedShardsBatch> { + + public static final String ACTION_NAME = "internal:gateway/local/started_shards_batch"; + public static final ActionType<NodesGatewayStartedShardsBatch> TYPE = new ActionType<>( + ACTION_NAME, + NodesGatewayStartedShardsBatch::new + ); + + private final Settings settings; + private final NodeEnvironment nodeEnv; + private final IndicesService indicesService; + private final NamedXContentRegistry namedXContentRegistry; + + @Inject + public TransportNodesListGatewayStartedShardsBatch( + Settings settings, + ThreadPool threadPool, + ClusterService clusterService, + TransportService transportService, + ActionFilters actionFilters, + NodeEnvironment env, + IndicesService indicesService, + NamedXContentRegistry namedXContentRegistry + ) { + super( + ACTION_NAME, + threadPool, + clusterService, + transportService, + actionFilters, + Request::new, + NodeRequest::new, + ThreadPool.Names.FETCH_SHARD_STARTED, + NodeGatewayStartedShardsBatch.class + ); + this.settings = settings; + this.nodeEnv = env; + this.indicesService = indicesService; + this.namedXContentRegistry = namedXContentRegistry; + } + + @Override + public void list( + Map<ShardId, ShardAttributes> shardAttributesMap, + DiscoveryNode[] nodes, + ActionListener<NodesGatewayStartedShardsBatch> listener + ) { + execute(new Request(nodes, shardAttributesMap), listener); + } + + @Override + protected NodeRequest newNodeRequest(Request request) { + return new NodeRequest(request); + } + + @Override + protected NodeGatewayStartedShardsBatch newNodeResponse(StreamInput in) throws IOException { + return new NodeGatewayStartedShardsBatch(in); + } + + @Override + protected NodesGatewayStartedShardsBatch newResponse( + Request request, + List<NodeGatewayStartedShardsBatch> responses, + List<FailedNodeException> failures + ) { + return new NodesGatewayStartedShardsBatch(clusterService.getClusterName(), responses, failures); + } + + /** + * This function is similar to nodeOperation method of {@link TransportNodesListGatewayStartedShards} we loop over + * the shards here and populate the data about the shards held by the local node. + * + * @param request Request containing the map shardIdsWithCustomDataPath. + * @return NodeGatewayStartedShardsBatch contains the data about the primary shards held by the local node + */ + @Override + protected NodeGatewayStartedShardsBatch nodeOperation(NodeRequest request) { + Map<ShardId, GatewayStartedShard> shardsOnNode = new HashMap<>(); + for (ShardAttributes shardAttr : request.shardAttributes.values()) { + final ShardId shardId = shardAttr.getShardId(); + try { + shardsOnNode.put( + shardId, + getShardInfoOnLocalNode( + logger, + shardId, + namedXContentRegistry, + nodeEnv, + indicesService, + shardAttr.getCustomDataPath(), + settings, + clusterService + ) + ); + } catch (Exception e) { + shardsOnNode.put( + shardId, + new GatewayStartedShard(null, false, null, new OpenSearchException("failed to load started shards", e)) + ); + } + } + return new NodeGatewayStartedShardsBatch(clusterService.localNode(), shardsOnNode); + } + + /** + * This is used in constructing the request for making the transport request to set of other node. + * Refer {@link TransportNodesAction} class start method. + * + * @opensearch.internal + */ + public static class Request extends BaseNodesRequest<Request> { + private final Map<ShardId, ShardAttributes> shardAttributes; + + public Request(StreamInput in) throws IOException { + super(in); + shardAttributes = in.readMap(ShardId::new, ShardAttributes::new); + } + + public Request(DiscoveryNode[] nodes, Map<ShardId, ShardAttributes> shardAttributes) { + super(nodes); + this.shardAttributes = Objects.requireNonNull(shardAttributes); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeMap(shardAttributes, (o, k) -> k.writeTo(o), (o, v) -> v.writeTo(o)); + } + + public Map<ShardId, ShardAttributes> getShardAttributes() { + return shardAttributes; + } + } + + /** + * Responses received from set of other nodes is clubbed into this class and sent back to the caller + * of this transport request. Refer {@link TransportNodesAction} + * + * @opensearch.internal + */ + public static class NodesGatewayStartedShardsBatch extends BaseNodesResponse<NodeGatewayStartedShardsBatch> { + + public NodesGatewayStartedShardsBatch(StreamInput in) throws IOException { + super(in); + } + + public NodesGatewayStartedShardsBatch( + ClusterName clusterName, + List<NodeGatewayStartedShardsBatch> nodes, + List<FailedNodeException> failures + ) { + super(clusterName, nodes, failures); + } + + @Override + protected List<NodeGatewayStartedShardsBatch> readNodesFrom(StreamInput in) throws IOException { + return in.readList(NodeGatewayStartedShardsBatch::new); + } + + @Override + protected void writeNodesTo(StreamOutput out, List<NodeGatewayStartedShardsBatch> nodes) throws IOException { + out.writeList(nodes); + } + } + + /** + * NodeRequest class is for deserializing the request received by this node from other node for this transport action. + * This is used in {@link TransportNodesAction} + * + * @opensearch.internal + */ + public static class NodeRequest extends TransportRequest { + private final Map<ShardId, ShardAttributes> shardAttributes; + + public NodeRequest(StreamInput in) throws IOException { + super(in); + shardAttributes = in.readMap(ShardId::new, ShardAttributes::new); + } + + public NodeRequest(Request request) { + this.shardAttributes = Objects.requireNonNull(request.getShardAttributes()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeMap(shardAttributes, (o, k) -> k.writeTo(o), (o, v) -> v.writeTo(o)); + } + } + + /** + * This is the response from a single node, this is used in {@link NodesGatewayStartedShardsBatch} for creating + * node to its response mapping for this transport request. + * Refer {@link TransportNodesAction} start method + * + * @opensearch.internal + */ + public static class NodeGatewayStartedShardsBatch extends BaseNodeResponse { + private final Map<ShardId, GatewayStartedShard> nodeGatewayStartedShardsBatch; + + public Map<ShardId, GatewayStartedShard> getNodeGatewayStartedShardsBatch() { + return nodeGatewayStartedShardsBatch; + } + + public NodeGatewayStartedShardsBatch(StreamInput in) throws IOException { + super(in); + this.nodeGatewayStartedShardsBatch = in.readMap(ShardId::new, GatewayStartedShard::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeMap(nodeGatewayStartedShardsBatch, (o, k) -> k.writeTo(o), (o, v) -> v.writeTo(o)); + } + + public NodeGatewayStartedShardsBatch(DiscoveryNode node, Map<ShardId, GatewayStartedShard> nodeGatewayStartedShardsBatch) { + super(node); + this.nodeGatewayStartedShardsBatch = nodeGatewayStartedShardsBatch; + } + } +} diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 00e765d73f77f..d750a13dece64 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -66,6 +66,7 @@ import static org.opensearch.Version.V_2_7_0; import static org.opensearch.common.util.FeatureFlags.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY; +import static org.opensearch.index.codec.fuzzy.FuzzySetParameters.DEFAULT_FALSE_POSITIVE_PROBABILITY; import static org.opensearch.index.mapper.MapperService.INDEX_MAPPING_DEPTH_LIMIT_SETTING; import static org.opensearch.index.mapper.MapperService.INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING; import static org.opensearch.index.mapper.MapperService.INDEX_MAPPING_NESTED_DOCS_LIMIT_SETTING; @@ -269,6 +270,17 @@ public static IndexMergePolicy fromString(String text) { Property.IndexScope ); + /** + * Index setting describing the maximum number of nested scopes in queries. + * The default maximum of 20. 1 means once nesting. + */ + public static final Setting<Integer> MAX_NESTED_QUERY_DEPTH_SETTING = Setting.intSetting( + "index.query.max_nested_depth", + 20, + 1, + Property.Dynamic, + Property.IndexScope + ); /** * Index setting describing for NGramTokenizer and NGramTokenFilter * the maximum difference between @@ -658,6 +670,22 @@ public static IndexMergePolicy fromString(String text) { Property.Dynamic ); + public static final Setting<Boolean> INDEX_DOC_ID_FUZZY_SET_ENABLED_SETTING = Setting.boolSetting( + "index.optimize_doc_id_lookup.fuzzy_set.enabled", + false, + Property.IndexScope, + Property.Dynamic + ); + + public static final Setting<Double> INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING = Setting.doubleSetting( + "index.optimize_doc_id_lookup.fuzzy_set.false_positive_probability", + DEFAULT_FALSE_POSITIVE_PROBABILITY, + 0.01, + 0.50, + Property.IndexScope, + Property.Dynamic + ); + public static final TimeValue DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL = new TimeValue(650, TimeUnit.MILLISECONDS); public static final TimeValue MINIMUM_REMOTE_TRANSLOG_BUFFER_INTERVAL = TimeValue.ZERO; public static final Setting<TimeValue> INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING = Setting.timeSetting( @@ -747,6 +775,8 @@ private void setRetentionLeaseMillis(final TimeValue retentionLease) { private volatile TimeValue searchIdleAfter; private volatile int maxAnalyzedOffset; private volatile int maxTermsCount; + + private volatile int maxNestedQueryDepth; private volatile String defaultPipeline; private volatile String requiredPipeline; private volatile boolean searchThrottled; @@ -787,6 +817,16 @@ private void setRetentionLeaseMillis(final TimeValue retentionLease) { */ private volatile UnaryOperator<MergePolicy> mergeOnFlushPolicy; + /** + * Is fuzzy set enabled for doc id + */ + private volatile boolean enableFuzzySetForDocId; + + /** + * False positive probability to use while creating fuzzy set. + */ + private volatile double docIdFuzzySetFalsePositiveProbability; + /** * Returns the default search fields for this index. */ @@ -902,6 +942,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti maxSlicesPerPit = scopedSettings.get(MAX_SLICES_PER_PIT); maxAnalyzedOffset = scopedSettings.get(MAX_ANALYZED_OFFSET_SETTING); maxTermsCount = scopedSettings.get(MAX_TERMS_COUNT_SETTING); + maxNestedQueryDepth = scopedSettings.get(MAX_NESTED_QUERY_DEPTH_SETTING); maxRegexLength = scopedSettings.get(MAX_REGEX_LENGTH_SETTING); this.tieredMergePolicyProvider = new TieredMergePolicyProvider(logger, this); this.logByteSizeMergePolicyProvider = new LogByteSizeMergePolicyProvider(logger, this); @@ -926,6 +967,10 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti * Now this sortField (IndexSort) is stored in SegmentInfo and we need to maintain backward compatibility for them. */ widenIndexSortType = IndexMetadata.SETTING_INDEX_VERSION_CREATED.get(settings).before(V_2_7_0); + + setEnableFuzzySetForDocId(scopedSettings.get(INDEX_DOC_ID_FUZZY_SET_ENABLED_SETTING)); + setDocIdFuzzySetFalsePositiveProbability(scopedSettings.get(INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING)); + scopedSettings.addSettingsUpdateConsumer( TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING, tieredMergePolicyProvider::setNoCFSRatio @@ -1007,6 +1052,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti scopedSettings.addSettingsUpdateConsumer(MAX_REFRESH_LISTENERS_PER_SHARD, this::setMaxRefreshListeners); scopedSettings.addSettingsUpdateConsumer(MAX_ANALYZED_OFFSET_SETTING, this::setHighlightMaxAnalyzedOffset); scopedSettings.addSettingsUpdateConsumer(MAX_TERMS_COUNT_SETTING, this::setMaxTermsCount); + scopedSettings.addSettingsUpdateConsumer(MAX_NESTED_QUERY_DEPTH_SETTING, this::setMaxNestedQueryDepth); scopedSettings.addSettingsUpdateConsumer(MAX_SLICES_PER_SCROLL, this::setMaxSlicesPerScroll); scopedSettings.addSettingsUpdateConsumer(MAX_SLICES_PER_PIT, this::setMaxSlicesPerPit); scopedSettings.addSettingsUpdateConsumer(DEFAULT_FIELD_SETTING, this::setDefaultFields); @@ -1032,6 +1078,11 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti this::setRemoteTranslogUploadBufferInterval ); scopedSettings.addSettingsUpdateConsumer(INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING, this::setRemoteTranslogKeepExtraGen); + scopedSettings.addSettingsUpdateConsumer(INDEX_DOC_ID_FUZZY_SET_ENABLED_SETTING, this::setEnableFuzzySetForDocId); + scopedSettings.addSettingsUpdateConsumer( + INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING, + this::setDocIdFuzzySetFalsePositiveProbability + ); } private void setSearchIdleAfter(TimeValue searchIdleAfter) { @@ -1517,6 +1568,17 @@ private void setMaxTermsCount(int maxTermsCount) { this.maxTermsCount = maxTermsCount; } + /** + * @return max level of nested queries and documents + */ + public int getMaxNestedQueryDepth() { + return this.maxNestedQueryDepth; + } + + private void setMaxNestedQueryDepth(int maxNestedQueryDepth) { + this.maxNestedQueryDepth = maxNestedQueryDepth; + } + /** * Returns the maximum number of allowed script_fields to retrieve in a search request */ @@ -1801,4 +1863,20 @@ public void setDefaultSearchPipeline(String defaultSearchPipeline) { public boolean shouldWidenIndexSortType() { return this.widenIndexSortType; } + + public boolean isEnableFuzzySetForDocId() { + return enableFuzzySetForDocId; + } + + public void setEnableFuzzySetForDocId(boolean enableFuzzySetForDocId) { + this.enableFuzzySetForDocId = enableFuzzySetForDocId; + } + + public double getDocIdFuzzySetFalsePositiveProbability() { + return docIdFuzzySetFalsePositiveProbability; + } + + public void setDocIdFuzzySetFalsePositiveProbability(double docIdFuzzySetFalsePositiveProbability) { + this.docIdFuzzySetFalsePositiveProbability = docIdFuzzySetFalsePositiveProbability; + } } diff --git a/server/src/main/java/org/opensearch/index/codec/PerFieldMappingPostingFormatCodec.java b/server/src/main/java/org/opensearch/index/codec/PerFieldMappingPostingFormatCodec.java index dc28ad2d6dc07..1ad17f121560c 100644 --- a/server/src/main/java/org/opensearch/index/codec/PerFieldMappingPostingFormatCodec.java +++ b/server/src/main/java/org/opensearch/index/codec/PerFieldMappingPostingFormatCodec.java @@ -39,10 +39,16 @@ import org.apache.lucene.codecs.lucene90.Lucene90DocValuesFormat; import org.apache.lucene.codecs.lucene99.Lucene99Codec; import org.opensearch.common.lucene.Lucene; +import org.opensearch.index.codec.fuzzy.FuzzyFilterPostingsFormat; +import org.opensearch.index.codec.fuzzy.FuzzySetFactory; +import org.opensearch.index.codec.fuzzy.FuzzySetParameters; import org.opensearch.index.mapper.CompletionFieldMapper; +import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; +import java.util.Map; + /** * {@link PerFieldMappingPostingFormatCodec This postings format} is the default * {@link PostingsFormat} for OpenSearch. It utilizes the @@ -57,6 +63,8 @@ public class PerFieldMappingPostingFormatCodec extends Lucene99Codec { private final Logger logger; private final MapperService mapperService; private final DocValuesFormat dvFormat = new Lucene90DocValuesFormat(); + private final FuzzySetFactory fuzzySetFactory; + private PostingsFormat docIdPostingsFormat; static { assert Codec.forName(Lucene.LATEST_CODEC).getClass().isAssignableFrom(PerFieldMappingPostingFormatCodec.class) @@ -67,6 +75,12 @@ public PerFieldMappingPostingFormatCodec(Mode compressionMode, MapperService map super(compressionMode); this.mapperService = mapperService; this.logger = logger; + fuzzySetFactory = new FuzzySetFactory( + Map.of( + IdFieldMapper.NAME, + new FuzzySetParameters(() -> mapperService.getIndexSettings().getDocIdFuzzySetFalsePositiveProbability()) + ) + ); } @Override @@ -76,6 +90,11 @@ public PostingsFormat getPostingsFormatForField(String field) { logger.warn("no index mapper found for field: [{}] returning default postings format", field); } else if (fieldType instanceof CompletionFieldMapper.CompletionFieldType) { return CompletionFieldMapper.CompletionFieldType.postingsFormat(); + } else if (IdFieldMapper.NAME.equals(field) && mapperService.getIndexSettings().isEnableFuzzySetForDocId()) { + if (docIdPostingsFormat == null) { + docIdPostingsFormat = new FuzzyFilterPostingsFormat(super.getPostingsFormatForField(field), fuzzySetFactory); + } + return docIdPostingsFormat; } return super.getPostingsFormatForField(field); } diff --git a/server/src/main/java/org/opensearch/index/codec/fuzzy/AbstractFuzzySet.java b/server/src/main/java/org/opensearch/index/codec/fuzzy/AbstractFuzzySet.java new file mode 100644 index 0000000000000..09976297361fa --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/fuzzy/AbstractFuzzySet.java @@ -0,0 +1,61 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.fuzzy; + +import org.apache.lucene.util.BytesRef; +import org.opensearch.common.CheckedSupplier; +import org.opensearch.common.hash.T1ha1; + +import java.io.IOException; +import java.util.Iterator; + +/** + * Encapsulates common behaviour implementation for a fuzzy set. + */ +public abstract class AbstractFuzzySet implements FuzzySet { + + /** + * Add an item to this fuzzy set. + * @param value The value to be added + */ + protected abstract void add(BytesRef value); + + /** + * Add all items to the underlying set. + * Implementations can choose to perform this using an optimized strategy based on the type of set. + * @param valuesIteratorProvider Supplier for an iterator over All values which should be added to the set. + */ + protected void addAll(CheckedSupplier<Iterator<BytesRef>, IOException> valuesIteratorProvider) throws IOException { + Iterator<BytesRef> values = valuesIteratorProvider.get(); + while (values.hasNext()) { + add(values.next()); + } + } + + public Result contains(BytesRef val) { + return containsHash(generateKey(val)); + } + + protected abstract Result containsHash(long hash); + + protected long generateKey(BytesRef value) { + return T1ha1.hash(value.bytes, value.offset, value.length, 0L); + } + + protected void assertAllElementsExist(CheckedSupplier<Iterator<BytesRef>, IOException> iteratorProvider) throws IOException { + Iterator<BytesRef> iter = iteratorProvider.get(); + int cnt = 0; + while (iter.hasNext()) { + BytesRef item = iter.next(); + assert contains(item) == Result.MAYBE + : "Expected Filter to return positive response for elements added to it. Elements matched: " + cnt; + cnt++; + } + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/fuzzy/BloomFilter.java b/server/src/main/java/org/opensearch/index/codec/fuzzy/BloomFilter.java new file mode 100644 index 0000000000000..b8a8352183ca8 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/fuzzy/BloomFilter.java @@ -0,0 +1,150 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Based on code from the Apache Lucene project (https://github.com/apache/lucene) under the Apache License, version 2.0. + * Copyright 2001-2022 The Apache Software Foundation + * Modifications (C) OpenSearch Contributors. All Rights Reserved. + */ + +package org.opensearch.index.codec.fuzzy; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.store.DataOutput; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.RamUsageEstimator; +import org.opensearch.common.CheckedSupplier; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.Assertions; + +import java.io.IOException; +import java.util.Iterator; + +/** + * The code is based on Lucene's implementation of Bloom Filter. + * It represents a subset of the Lucene implementation needed for OpenSearch use cases. + * Since the Lucene implementation is marked experimental, + * this aims to ensure we can provide a bwc implementation during upgrades. + */ +public class BloomFilter extends AbstractFuzzySet { + + private static final Logger logger = LogManager.getLogger(BloomFilter.class); + + // The sizes of BitSet used are all numbers that, when expressed in binary form, + // are all ones. This is to enable fast downsizing from one bitset to another + // by simply ANDing each set index in one bitset with the size of the target bitset + // - this provides a fast modulo of the number. Values previously accumulated in + // a large bitset and then mapped to a smaller set can be looked up using a single + // AND operation of the query term's hash rather than needing to perform a 2-step + // translation of the query term that mirrors the stored content's reprojections. + static final int[] usableBitSetSizes; + + static { + usableBitSetSizes = new int[26]; + for (int i = 0; i < usableBitSetSizes.length; i++) { + usableBitSetSizes[i] = (1 << (i + 6)) - 1; + } + } + + private final LongArrayBackedBitSet bitset; + private final int setSize; + private final int hashCount; + + BloomFilter(long maxDocs, double maxFpp, CheckedSupplier<Iterator<BytesRef>, IOException> fieldIteratorProvider) throws IOException { + int setSize = (int) Math.ceil((maxDocs * Math.log(maxFpp)) / Math.log(1 / Math.pow(2, Math.log(2)))); + setSize = getNearestSetSize(setSize < Integer.MAX_VALUE / 2 ? 2 * setSize : Integer.MAX_VALUE); + int optimalK = (int) Math.round(((double) setSize / maxDocs) * Math.log(2)); + this.bitset = new LongArrayBackedBitSet(setSize); + this.setSize = setSize; + this.hashCount = optimalK; + addAll(fieldIteratorProvider); + if (Assertions.ENABLED) { + assertAllElementsExist(fieldIteratorProvider); + } + logger.debug("Bloom filter created with fpp: {}, setSize: {}, hashCount: {}", maxFpp, setSize, hashCount); + } + + BloomFilter(IndexInput in) throws IOException { + hashCount = in.readInt(); + setSize = in.readInt(); + this.bitset = new LongArrayBackedBitSet(in); + } + + @Override + public void writeTo(DataOutput out) throws IOException { + out.writeInt(hashCount); + out.writeInt(setSize); + bitset.writeTo(out); + } + + private static int getNearestSetSize(int maxNumberOfBits) { + assert maxNumberOfBits > 0 : "Provided size estimate for bloom filter is illegal (<=0) : " + maxNumberOfBits; + int result = usableBitSetSizes[0]; + for (int i = 0; i < usableBitSetSizes.length; i++) { + if (usableBitSetSizes[i] <= maxNumberOfBits) { + result = usableBitSetSizes[i]; + } + } + return result; + } + + @Override + public SetType setType() { + return SetType.BLOOM_FILTER_V1; + } + + @Override + public Result containsHash(long hash) { + int msb = (int) (hash >>> Integer.SIZE); + int lsb = (int) hash; + for (int i = 0; i < hashCount; i++) { + int bloomPos = (lsb + i * msb); + if (!mayContainValue(bloomPos)) { + return Result.NO; + } + } + return Result.MAYBE; + } + + protected void add(BytesRef value) { + long hash = generateKey(value); + int msb = (int) (hash >>> Integer.SIZE); + int lsb = (int) hash; + for (int i = 0; i < hashCount; i++) { + // Bitmasking using bloomSize is effectively a modulo operation since set sizes are always power of 2 + int bloomPos = (lsb + i * msb) & setSize; + bitset.set(bloomPos); + } + } + + @Override + public boolean isSaturated() { + long numBitsSet = bitset.cardinality(); + // Don't bother saving bitsets if >90% of bits are set - we don't want to + // throw any more memory at this problem. + return (float) numBitsSet / (float) setSize > 0.9f; + } + + @Override + public long ramBytesUsed() { + return RamUsageEstimator.sizeOf(bitset.ramBytesUsed()); + } + + private boolean mayContainValue(int aHash) { + // Bloom sizes are always base 2 and so can be ANDed for a fast modulo + int pos = aHash & setSize; + return bitset.get(pos); + } + + @Override + public void close() throws IOException { + IOUtils.close(bitset); + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/fuzzy/FuzzyFilterPostingsFormat.java b/server/src/main/java/org/opensearch/index/codec/fuzzy/FuzzyFilterPostingsFormat.java new file mode 100644 index 0000000000000..01f8054fc91be --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/fuzzy/FuzzyFilterPostingsFormat.java @@ -0,0 +1,492 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Based on code from the Apache Lucene project (https://github.com/apache/lucene) under the Apache License, version 2.0. + * Copyright 2001-2022 The Apache Software Foundation + * Modifications (C) OpenSearch Contributors. All Rights Reserved. + */ + +package org.opensearch.index.codec.fuzzy; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.codecs.FieldsConsumer; +import org.apache.lucene.codecs.FieldsProducer; +import org.apache.lucene.codecs.NormsProducer; +import org.apache.lucene.codecs.PostingsFormat; +import org.apache.lucene.index.BaseTermsEnum; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.Fields; +import org.apache.lucene.index.ImpactsEnum; +import org.apache.lucene.index.IndexFileNames; +import org.apache.lucene.index.PostingsEnum; +import org.apache.lucene.index.SegmentReadState; +import org.apache.lucene.index.SegmentWriteState; +import org.apache.lucene.index.Terms; +import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.automaton.CompiledAutomaton; +import org.opensearch.common.util.io.IOUtils; + +import java.io.Closeable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +/** + * Based on Lucene's BloomFilterPostingsFormat. + * Discussion with Lucene community based on which the decision to have this in OpenSearch code was taken + * is captured here: https://github.com/apache/lucene/issues/12986 + * + * The class deals with persisting the bloom filter through the postings format, + * and reading the field via a bloom filter fronted terms enum (to reduce disk seeks in case of absence of requested values) + * The class should be handled during lucene upgrades. There are bwc tests present to verify the format continues to work after upgrade. + */ + +public final class FuzzyFilterPostingsFormat extends PostingsFormat { + + private static final Logger logger = LogManager.getLogger(FuzzyFilterPostingsFormat.class); + + /** + * This name is stored in headers. If changing the implementation for the format, this name/version should be updated + * so that reads can work as expected. + */ + public static final String FUZZY_FILTER_CODEC_NAME = "FuzzyFilterCodec99"; + + public static final int VERSION_START = 0; + public static final int VERSION_CURRENT = VERSION_START; + + /** Extension of Fuzzy Filters file */ + public static final String FUZZY_FILTER_FILE_EXTENSION = "fzd"; + + private final PostingsFormat delegatePostingsFormat; + private final FuzzySetFactory fuzzySetFactory; + + public FuzzyFilterPostingsFormat(PostingsFormat delegatePostingsFormat, FuzzySetFactory fuzzySetFactory) { + super(FUZZY_FILTER_CODEC_NAME); + this.delegatePostingsFormat = delegatePostingsFormat; + this.fuzzySetFactory = fuzzySetFactory; + } + + // Needed for SPI + public FuzzyFilterPostingsFormat() { + this(null, null); + } + + @Override + public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException { + if (delegatePostingsFormat == null) { + throw new UnsupportedOperationException( + "Error - " + getClass().getName() + " has been constructed without a choice of PostingsFormat" + ); + } + FieldsConsumer fieldsConsumer = delegatePostingsFormat.fieldsConsumer(state); + return new FuzzyFilteredFieldsConsumer(fieldsConsumer, state); + } + + @Override + public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { + return new FuzzyFilteredFieldsProducer(state); + } + + static class FuzzyFilteredFieldsProducer extends FieldsProducer { + private FieldsProducer delegateFieldsProducer; + HashMap<String, FuzzySet> fuzzySetsByFieldName = new HashMap<>(); + private List<Closeable> closeables = new ArrayList<>(); + + public FuzzyFilteredFieldsProducer(SegmentReadState state) throws IOException { + String fuzzyFilterFileName = IndexFileNames.segmentFileName( + state.segmentInfo.name, + state.segmentSuffix, + FUZZY_FILTER_FILE_EXTENSION + ); + IndexInput filterIn = null; + boolean success = false; + try { + // Using IndexInput directly instead of ChecksumIndexInput since we want to support RandomAccessInput + filterIn = state.directory.openInput(fuzzyFilterFileName, state.context); + + CodecUtil.checkIndexHeader( + filterIn, + FUZZY_FILTER_CODEC_NAME, + VERSION_START, + VERSION_CURRENT, + state.segmentInfo.getId(), + state.segmentSuffix + ); + // Load the delegate postings format + PostingsFormat delegatePostingsFormat = PostingsFormat.forName(filterIn.readString()); + this.delegateFieldsProducer = delegatePostingsFormat.fieldsProducer(state); + int numFilters = filterIn.readInt(); + for (int i = 0; i < numFilters; i++) { + int fieldNum = filterIn.readInt(); + FuzzySet set = FuzzySetFactory.deserializeFuzzySet(filterIn); + closeables.add(set); + FieldInfo fieldInfo = state.fieldInfos.fieldInfo(fieldNum); + fuzzySetsByFieldName.put(fieldInfo.name, set); + } + CodecUtil.retrieveChecksum(filterIn); + + // Can we disable it if we foresee performance issues? + CodecUtil.checksumEntireFile(filterIn); + success = true; + closeables.add(filterIn); + } finally { + if (!success) { + IOUtils.closeWhileHandlingException(filterIn, delegateFieldsProducer); + } + } + } + + @Override + public Iterator<String> iterator() { + return delegateFieldsProducer.iterator(); + } + + @Override + public void close() throws IOException { + // Why closing here? + IOUtils.closeWhileHandlingException(closeables); + delegateFieldsProducer.close(); + } + + @Override + public Terms terms(String field) throws IOException { + FuzzySet filter = fuzzySetsByFieldName.get(field); + if (filter == null) { + return delegateFieldsProducer.terms(field); + } else { + Terms result = delegateFieldsProducer.terms(field); + if (result == null) { + return null; + } + return new FuzzyFilteredTerms(result, filter); + } + } + + @Override + public int size() { + return delegateFieldsProducer.size(); + } + + static class FuzzyFilteredTerms extends Terms { + private Terms delegateTerms; + private FuzzySet filter; + + public FuzzyFilteredTerms(Terms terms, FuzzySet filter) { + this.delegateTerms = terms; + this.filter = filter; + } + + @Override + public TermsEnum intersect(CompiledAutomaton compiled, final BytesRef startTerm) throws IOException { + return delegateTerms.intersect(compiled, startTerm); + } + + @Override + public TermsEnum iterator() throws IOException { + return new FilterAppliedTermsEnum(delegateTerms, filter); + } + + @Override + public long size() throws IOException { + return delegateTerms.size(); + } + + @Override + public long getSumTotalTermFreq() throws IOException { + return delegateTerms.getSumTotalTermFreq(); + } + + @Override + public long getSumDocFreq() throws IOException { + return delegateTerms.getSumDocFreq(); + } + + @Override + public int getDocCount() throws IOException { + return delegateTerms.getDocCount(); + } + + @Override + public boolean hasFreqs() { + return delegateTerms.hasFreqs(); + } + + @Override + public boolean hasOffsets() { + return delegateTerms.hasOffsets(); + } + + @Override + public boolean hasPositions() { + return delegateTerms.hasPositions(); + } + + @Override + public boolean hasPayloads() { + return delegateTerms.hasPayloads(); + } + + @Override + public BytesRef getMin() throws IOException { + return delegateTerms.getMin(); + } + + @Override + public BytesRef getMax() throws IOException { + return delegateTerms.getMax(); + } + } + + static final class FilterAppliedTermsEnum extends BaseTermsEnum { + + private Terms delegateTerms; + private TermsEnum delegateTermsEnum; + private final FuzzySet filter; + + public FilterAppliedTermsEnum(Terms delegateTerms, FuzzySet filter) throws IOException { + this.delegateTerms = delegateTerms; + this.filter = filter; + } + + void reset(Terms delegateTerms) throws IOException { + this.delegateTerms = delegateTerms; + this.delegateTermsEnum = null; + } + + private TermsEnum delegate() throws IOException { + if (delegateTermsEnum == null) { + /* pull the iterator only if we really need it - + * this can be a relativly heavy operation depending on the + * delegate postings format and the underlying directory + * (clone IndexInput) */ + delegateTermsEnum = delegateTerms.iterator(); + } + return delegateTermsEnum; + } + + @Override + public BytesRef next() throws IOException { + return delegate().next(); + } + + @Override + public boolean seekExact(BytesRef text) throws IOException { + // The magical fail-fast speed up that is the entire point of all of + // this code - save a disk seek if there is a match on an in-memory + // structure + // that may occasionally give a false positive but guaranteed no false + // negatives + if (filter.contains(text) == FuzzySet.Result.NO) { + return false; + } + return delegate().seekExact(text); + } + + @Override + public SeekStatus seekCeil(BytesRef text) throws IOException { + return delegate().seekCeil(text); + } + + @Override + public void seekExact(long ord) throws IOException { + delegate().seekExact(ord); + } + + @Override + public BytesRef term() throws IOException { + return delegate().term(); + } + + @Override + public long ord() throws IOException { + return delegate().ord(); + } + + @Override + public int docFreq() throws IOException { + return delegate().docFreq(); + } + + @Override + public long totalTermFreq() throws IOException { + return delegate().totalTermFreq(); + } + + @Override + public PostingsEnum postings(PostingsEnum reuse, int flags) throws IOException { + return delegate().postings(reuse, flags); + } + + @Override + public ImpactsEnum impacts(int flags) throws IOException { + return delegate().impacts(flags); + } + + @Override + public String toString() { + return getClass().getSimpleName() + "(filter=" + filter.toString() + ")"; + } + } + + @Override + public void checkIntegrity() throws IOException { + delegateFieldsProducer.checkIntegrity(); + } + + @Override + public String toString() { + return getClass().getSimpleName() + "(fields=" + fuzzySetsByFieldName.size() + ",delegate=" + delegateFieldsProducer + ")"; + } + } + + class FuzzyFilteredFieldsConsumer extends FieldsConsumer { + private FieldsConsumer delegateFieldsConsumer; + private Map<FieldInfo, FuzzySet> fuzzySets = new HashMap<>(); + private SegmentWriteState state; + private List<Closeable> closeables = new ArrayList<>(); + + public FuzzyFilteredFieldsConsumer(FieldsConsumer fieldsConsumer, SegmentWriteState state) { + this.delegateFieldsConsumer = fieldsConsumer; + this.state = state; + } + + @Override + public void write(Fields fields, NormsProducer norms) throws IOException { + + // Delegate must write first: it may have opened files + // on creating the class + // (e.g. Lucene41PostingsConsumer), and write() will + // close them; alternatively, if we delayed pulling + // the fields consumer until here, we could do it + // afterwards: + delegateFieldsConsumer.write(fields, norms); + + for (String field : fields) { + Terms terms = fields.terms(field); + if (terms == null) { + continue; + } + FieldInfo fieldInfo = state.fieldInfos.fieldInfo(field); + FuzzySet fuzzySet = fuzzySetFactory.createFuzzySet(state.segmentInfo.maxDoc(), fieldInfo.name, () -> iterator(terms)); + if (fuzzySet == null) { + break; + } + assert fuzzySets.containsKey(fieldInfo) == false; + closeables.add(fuzzySet); + fuzzySets.put(fieldInfo, fuzzySet); + } + } + + private Iterator<BytesRef> iterator(Terms terms) throws IOException { + TermsEnum termIterator = terms.iterator(); + return new Iterator<>() { + + private BytesRef currentTerm; + private PostingsEnum postingsEnum; + + @Override + public boolean hasNext() { + try { + do { + currentTerm = termIterator.next(); + if (currentTerm == null) { + return false; + } + postingsEnum = termIterator.postings(postingsEnum, 0); + if (postingsEnum.nextDoc() != PostingsEnum.NO_MORE_DOCS) { + return true; + } + } while (true); + } catch (IOException ex) { + throw new IllegalStateException("Cannot read terms: " + termIterator.attributes()); + } + } + + @Override + public BytesRef next() { + return currentTerm; + } + }; + } + + private boolean closed; + + @Override + public void close() throws IOException { + if (closed) { + return; + } + closed = true; + delegateFieldsConsumer.close(); + + // Now we are done accumulating values for these fields + List<Map.Entry<FieldInfo, FuzzySet>> nonSaturatedSets = new ArrayList<>(); + + for (Map.Entry<FieldInfo, FuzzySet> entry : fuzzySets.entrySet()) { + FuzzySet fuzzySet = entry.getValue(); + if (!fuzzySet.isSaturated()) { + nonSaturatedSets.add(entry); + } + } + String fuzzyFilterFileName = IndexFileNames.segmentFileName( + state.segmentInfo.name, + state.segmentSuffix, + FUZZY_FILTER_FILE_EXTENSION + ); + try (IndexOutput fuzzyFilterFileOutput = state.directory.createOutput(fuzzyFilterFileName, state.context)) { + logger.trace( + "Writing fuzzy filter postings with version: {} for segment: {}", + VERSION_CURRENT, + state.segmentInfo.toString() + ); + CodecUtil.writeIndexHeader( + fuzzyFilterFileOutput, + FUZZY_FILTER_CODEC_NAME, + VERSION_CURRENT, + state.segmentInfo.getId(), + state.segmentSuffix + ); + + // remember the name of the postings format we will delegate to + fuzzyFilterFileOutput.writeString(delegatePostingsFormat.getName()); + + // First field in the output file is the number of fields+sets saved + fuzzyFilterFileOutput.writeInt(nonSaturatedSets.size()); + for (Map.Entry<FieldInfo, FuzzySet> entry : nonSaturatedSets) { + FieldInfo fieldInfo = entry.getKey(); + FuzzySet fuzzySet = entry.getValue(); + saveAppropriatelySizedFuzzySet(fuzzyFilterFileOutput, fuzzySet, fieldInfo); + } + CodecUtil.writeFooter(fuzzyFilterFileOutput); + } + // We are done with large bitsets so no need to keep them hanging around + fuzzySets.clear(); + IOUtils.closeWhileHandlingException(closeables); + } + + private void saveAppropriatelySizedFuzzySet(IndexOutput fileOutput, FuzzySet fuzzySet, FieldInfo fieldInfo) throws IOException { + fileOutput.writeInt(fieldInfo.number); + fileOutput.writeString(fuzzySet.setType().getSetName()); + fuzzySet.writeTo(fileOutput); + } + } + + @Override + public String toString() { + return "FuzzyFilterPostingsFormat(" + delegatePostingsFormat + ")"; + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/fuzzy/FuzzySet.java b/server/src/main/java/org/opensearch/index/codec/fuzzy/FuzzySet.java new file mode 100644 index 0000000000000..df443ffbca33d --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/fuzzy/FuzzySet.java @@ -0,0 +1,98 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.fuzzy; + +import org.apache.lucene.store.DataOutput; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.util.Accountable; +import org.apache.lucene.util.BytesRef; +import org.opensearch.common.CheckedFunction; + +import java.io.Closeable; +import java.io.IOException; +import java.util.List; + +/** + * Fuzzy Filter interface + */ +public interface FuzzySet extends Accountable, Closeable { + + /** + * Name used for a codec to be aware of what fuzzy set has been used. + */ + SetType setType(); + + /** + * @param value the item whose membership needs to be checked. + */ + Result contains(BytesRef value); + + boolean isSaturated(); + + void writeTo(DataOutput out) throws IOException; + + /** + * Enum to represent result of membership check on a fuzzy set. + */ + enum Result { + /** + * A definite no for the set membership of an item. + */ + NO, + + /** + * Fuzzy sets cannot guarantee that a given item is present in the set or not due the data being stored in + * a lossy format (e.g. fingerprint, hash). + * Hence, we return a response denoting that the item maybe present. + */ + MAYBE + } + + /** + * Enum to declare supported properties and mappings for a fuzzy set implementation. + */ + enum SetType { + BLOOM_FILTER_V1("bloom_filter_v1", BloomFilter::new, List.of("bloom_filter")); + + /** + * Name persisted in postings file. This will be used when reading to determine the bloom filter implementation. + */ + private final String setName; + + /** + * Interface for reading the actual fuzzy set implementation into java object. + */ + private final CheckedFunction<IndexInput, ? extends FuzzySet, IOException> deserializer; + + SetType(String setName, CheckedFunction<IndexInput, ? extends FuzzySet, IOException> deserializer, List<String> aliases) { + if (aliases.size() < 1) { + throw new IllegalArgumentException("Alias list is empty. Could not create Set Type: " + setName); + } + this.setName = setName; + this.deserializer = deserializer; + } + + public String getSetName() { + return setName; + } + + public CheckedFunction<IndexInput, ? extends FuzzySet, IOException> getDeserializer() { + return deserializer; + } + + public static SetType from(String name) { + for (SetType type : SetType.values()) { + if (type.setName.equals(name)) { + return type; + } + } + throw new IllegalArgumentException("There is no implementation for fuzzy set: " + name); + } + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/fuzzy/FuzzySetFactory.java b/server/src/main/java/org/opensearch/index/codec/fuzzy/FuzzySetFactory.java new file mode 100644 index 0000000000000..5d1fd03f099d4 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/fuzzy/FuzzySetFactory.java @@ -0,0 +1,49 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.fuzzy; + +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.util.BytesRef; +import org.opensearch.common.CheckedSupplier; + +import java.io.IOException; +import java.util.Iterator; +import java.util.Map; + +/** + * Factory class to create fuzzy set. + * Supports bloom filters for now. More sets can be added as required. + */ +public class FuzzySetFactory { + + private final Map<String, FuzzySetParameters> setTypeForField; + + public FuzzySetFactory(Map<String, FuzzySetParameters> setTypeForField) { + this.setTypeForField = setTypeForField; + } + + public FuzzySet createFuzzySet(int maxDocs, String fieldName, CheckedSupplier<Iterator<BytesRef>, IOException> iteratorProvider) + throws IOException { + FuzzySetParameters params = setTypeForField.get(fieldName); + if (params == null) { + throw new IllegalArgumentException("No fuzzy set defined for field: " + fieldName); + } + switch (params.getSetType()) { + case BLOOM_FILTER_V1: + return new BloomFilter(maxDocs, params.getFalsePositiveProbability(), iteratorProvider); + default: + throw new IllegalArgumentException("No Implementation for set type: " + params.getSetType()); + } + } + + public static FuzzySet deserializeFuzzySet(IndexInput in) throws IOException { + FuzzySet.SetType setType = FuzzySet.SetType.from(in.readString()); + return setType.getDeserializer().apply(in); + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/fuzzy/FuzzySetParameters.java b/server/src/main/java/org/opensearch/index/codec/fuzzy/FuzzySetParameters.java new file mode 100644 index 0000000000000..7bb96e7c34f0b --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/fuzzy/FuzzySetParameters.java @@ -0,0 +1,34 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.fuzzy; + +import java.util.function.Supplier; + +/** + * Wrapper for params to create a fuzzy set. + */ +public class FuzzySetParameters { + private final Supplier<Double> falsePositiveProbabilityProvider; + private final FuzzySet.SetType setType; + + public static final double DEFAULT_FALSE_POSITIVE_PROBABILITY = 0.2047d; + + public FuzzySetParameters(Supplier<Double> falsePositiveProbabilityProvider) { + this.falsePositiveProbabilityProvider = falsePositiveProbabilityProvider; + this.setType = FuzzySet.SetType.BLOOM_FILTER_V1; + } + + public double getFalsePositiveProbability() { + return falsePositiveProbabilityProvider.get(); + } + + public FuzzySet.SetType getSetType() { + return setType; + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/fuzzy/IndexInputImmutableLongArray.java b/server/src/main/java/org/opensearch/index/codec/fuzzy/IndexInputImmutableLongArray.java new file mode 100644 index 0000000000000..08d6059c1e82e --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/fuzzy/IndexInputImmutableLongArray.java @@ -0,0 +1,70 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.fuzzy; + +import org.apache.lucene.store.RandomAccessInput; +import org.apache.lucene.util.RamUsageEstimator; +import org.opensearch.OpenSearchException; +import org.opensearch.common.util.LongArray; + +import java.io.IOException; + +/** + * A Long array backed by RandomAccessInput. + * This implementation supports read operations only. + */ +class IndexInputImmutableLongArray implements LongArray { + + private final RandomAccessInput input; + private final long size; + + IndexInputImmutableLongArray(long size, RandomAccessInput input) { + this.size = size; + this.input = input; + } + + @Override + public void close() {} + + @Override + public long size() { + return size; + } + + @Override + public synchronized long get(long index) { + try { + // Multiplying by 8 since each long is 8 bytes, and we need to get the long value at (index * 8) in the + // RandomAccessInput being accessed. + return input.readLong(index << 3); + } catch (IOException ex) { + throw new OpenSearchException(ex); + } + } + + @Override + public long set(long index, long value) { + throw new UnsupportedOperationException(); + } + + @Override + public long increment(long index, long inc) { + throw new UnsupportedOperationException(); + } + + @Override + public void fill(long fromIndex, long toIndex, long value) { + throw new UnsupportedOperationException(); + } + + @Override + public long ramBytesUsed() { + return RamUsageEstimator.shallowSizeOfInstance(IndexInputImmutableLongArray.class); + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/fuzzy/LongArrayBackedBitSet.java b/server/src/main/java/org/opensearch/index/codec/fuzzy/LongArrayBackedBitSet.java new file mode 100644 index 0000000000000..bd4936aeec366 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/fuzzy/LongArrayBackedBitSet.java @@ -0,0 +1,105 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.fuzzy; + +import org.apache.lucene.store.DataOutput; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.util.Accountable; +import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.LongArray; +import org.opensearch.common.util.io.IOUtils; + +import java.io.Closeable; +import java.io.IOException; + +/** + * A bitset backed by a long-indexed array. + */ +class LongArrayBackedBitSet implements Accountable, Closeable { + + private long underlyingArrayLength = 0L; + private LongArray longArray; + + /** + * Constructor which uses an on heap array. This should be using during construction of the bitset. + * @param capacity The maximum capacity to provision for the bitset. + */ + LongArrayBackedBitSet(long capacity) { + // Since the bitset is backed by a long array, we only need 1 element for every 64 bits in the underlying array. + underlyingArrayLength = (capacity >> 6) + 1L; + this.longArray = BigArrays.NON_RECYCLING_INSTANCE.withCircuitBreaking().newLongArray(underlyingArrayLength); + } + + /** + * Constructor which uses Lucene's IndexInput to read the bitset into a read-only buffer. + * @param in IndexInput containing the serialized bitset. + * @throws IOException + */ + LongArrayBackedBitSet(IndexInput in) throws IOException { + underlyingArrayLength = in.readLong(); + // Multiplying by 8 since the length above is of the long array, so we will have + // 8 times the number of bytes in our stream. + long streamLength = underlyingArrayLength << 3; + this.longArray = new IndexInputImmutableLongArray(underlyingArrayLength, in.randomAccessSlice(in.getFilePointer(), streamLength)); + in.skipBytes(streamLength); + } + + public void writeTo(DataOutput out) throws IOException { + out.writeLong(underlyingArrayLength); + for (int idx = 0; idx < underlyingArrayLength; idx++) { + out.writeLong(longArray.get(idx)); + } + } + + /** + * This is an O(n) operation, and will iterate over all the elements in the underlying long array + * to determine cardinality of the set. + * @return number of set bits in the bitset. + */ + public long cardinality() { + long tot = 0; + for (int i = 0; i < underlyingArrayLength; ++i) { + tot += Long.bitCount(longArray.get(i)); + } + return tot; + } + + /** + * Retrieves whether the bit is set or not at the given index. + * @param index the index to look up for the bit + * @return true if bit is set, false otherwise + */ + public boolean get(long index) { + long i = index >> 6; // div 64 + long val = longArray.get(i); + long bitmask = 1L << index; + return (val & bitmask) != 0; + } + + /** + * Sets the bit at the given index. + * @param index the index to set the bit at. + */ + public void set(long index) { + long wordNum = index >> 6; // div 64 + long bitmask = 1L << index; + long val = longArray.get(wordNum); + longArray.set(wordNum, val | bitmask); + } + + @Override + public long ramBytesUsed() { + return 128L + longArray.ramBytesUsed(); + } + + @Override + public void close() throws IOException { + IOUtils.close(longArray); + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/fuzzy/package-info.java b/server/src/main/java/org/opensearch/index/codec/fuzzy/package-info.java new file mode 100644 index 0000000000000..7aeac68cd192a --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/fuzzy/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** classes responsible for handling all fuzzy codecs and operations */ +package org.opensearch.index.codec.fuzzy; diff --git a/server/src/main/java/org/opensearch/index/engine/ReplicaFileTracker.java b/server/src/main/java/org/opensearch/index/engine/ReplicaFileTracker.java index 19454967f9ee3..a9cc24abe3c01 100644 --- a/server/src/main/java/org/opensearch/index/engine/ReplicaFileTracker.java +++ b/server/src/main/java/org/opensearch/index/engine/ReplicaFileTracker.java @@ -17,7 +17,7 @@ import java.util.Map; import java.util.Optional; import java.util.Set; -import java.util.function.BiConsumer; +import java.util.function.Consumer; /** * This class is heavily influenced by Lucene's ReplicaFileDeleter class used to keep track of @@ -31,10 +31,10 @@ final class ReplicaFileTracker { public static final Logger logger = LogManager.getLogger(ReplicaFileTracker.class); private final Map<String, Integer> refCounts = new HashMap<>(); - private final BiConsumer<String, String> fileDeleter; + private final Consumer<String> fileDeleter; private final Set<String> EXCLUDE_FILES = Set.of("write.lock"); - public ReplicaFileTracker(BiConsumer<String, String> fileDeleter) { + public ReplicaFileTracker(Consumer<String> fileDeleter) { this.fileDeleter = fileDeleter; } @@ -82,7 +82,7 @@ private synchronized void delete(Collection<String> toDelete) { private synchronized void delete(String fileName) { assert canDelete(fileName); - fileDeleter.accept("delete unreferenced", fileName); + fileDeleter.accept(fileName); } private synchronized boolean canDelete(String fileName) { diff --git a/server/src/main/java/org/opensearch/index/engine/SegmentsStats.java b/server/src/main/java/org/opensearch/index/engine/SegmentsStats.java index d4a97f0267222..34aecfc62b8b2 100644 --- a/server/src/main/java/org/opensearch/index/engine/SegmentsStats.java +++ b/server/src/main/java/org/opensearch/index/engine/SegmentsStats.java @@ -41,6 +41,7 @@ import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.ReplicationStats; +import org.opensearch.index.codec.fuzzy.FuzzyFilterPostingsFormat; import org.opensearch.index.remote.RemoteSegmentStats; import java.io.IOException; @@ -95,7 +96,8 @@ public class SegmentsStats implements Writeable, ToXContentFragment { Map.entry("tvx", "Term Vector Index"), Map.entry("tvd", "Term Vector Documents"), Map.entry("tvf", "Term Vector Fields"), - Map.entry("liv", "Live Documents") + Map.entry("liv", "Live Documents"), + Map.entry(FuzzyFilterPostingsFormat.FUZZY_FILTER_FILE_EXTENSION, "Fuzzy Filter") ); public SegmentsStats() { diff --git a/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java b/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java index c1f69d1ef3638..dea389bb6a0ff 100644 --- a/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java +++ b/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java @@ -90,6 +90,7 @@ public final class TranslogLeafReader extends LeafReader { 0, VectorEncoding.FLOAT32, VectorSimilarityFunction.EUCLIDEAN, + false, false ); private static final FieldInfo FAKE_ROUTING_FIELD = new FieldInfo( @@ -108,6 +109,7 @@ public final class TranslogLeafReader extends LeafReader { 0, VectorEncoding.FLOAT32, VectorSimilarityFunction.EUCLIDEAN, + false, false ); private static final FieldInfo FAKE_ID_FIELD = new FieldInfo( @@ -126,6 +128,7 @@ public final class TranslogLeafReader extends LeafReader { 0, VectorEncoding.FLOAT32, VectorSimilarityFunction.EUCLIDEAN, + false, false ); public static Set<String> ALL_FIELD_NAMES = Sets.newHashSet(FAKE_SOURCE_FIELD.name, FAKE_ROUTING_FIELD.name, FAKE_ID_FIELD.name); diff --git a/server/src/main/java/org/opensearch/index/fielddata/AbstractNumericDocValues.java b/server/src/main/java/org/opensearch/index/fielddata/AbstractNumericDocValues.java index a2a70e280187a..3a2504ce92158 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/AbstractNumericDocValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/AbstractNumericDocValues.java @@ -43,6 +43,9 @@ * aggregations, which only use {@link #advanceExact(int)} and * {@link #longValue()}. * + * In case when optimizations based on point values are used, the {@link #advance(int)} + * and, optionally, {@link #cost()} have to be implemented as well. + * * @opensearch.internal */ public abstract class AbstractNumericDocValues extends NumericDocValues { diff --git a/server/src/main/java/org/opensearch/index/fielddata/FieldData.java b/server/src/main/java/org/opensearch/index/fielddata/FieldData.java index e09de53dc05f7..6db6bbccacae5 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/FieldData.java +++ b/server/src/main/java/org/opensearch/index/fielddata/FieldData.java @@ -37,6 +37,7 @@ import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.util.BytesRef; import org.opensearch.common.Numbers; import org.opensearch.common.geo.GeoPoint; @@ -76,6 +77,10 @@ public double doubleValue() throws IOException { throw new UnsupportedOperationException(); } + @Override + public int advance(int target) throws IOException { + return DocIdSetIterator.NO_MORE_DOCS; + } }; } @@ -561,6 +566,10 @@ public boolean advanceExact(int doc) throws IOException { return values.advanceExact(doc); } + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } } /** @@ -591,6 +600,10 @@ public int docValueCount() { return values.docValueCount(); } + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } } /** @@ -622,6 +635,12 @@ public long longValue() throws IOException { public int docID() { return docID; } + + @Override + public int advance(int target) throws IOException { + docID = values.advance(target); + return docID; + } } /** @@ -683,6 +702,11 @@ public boolean advanceExact(int target) throws IOException { public long longValue() throws IOException { return value; } + + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } }; } @@ -715,6 +739,11 @@ public boolean advanceExact(int target) throws IOException { public long longValue() throws IOException { return value.longValue(); } + + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } }; } @@ -742,6 +771,11 @@ public boolean advanceExact(int target) throws IOException { public double doubleValue() throws IOException { return value; } + + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } }; } } diff --git a/server/src/main/java/org/opensearch/index/fielddata/NumericDoubleValues.java b/server/src/main/java/org/opensearch/index/fielddata/NumericDoubleValues.java index b0f3400acfb3d..f69cfacaf35d4 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/NumericDoubleValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/NumericDoubleValues.java @@ -71,6 +71,11 @@ public long longValue() throws IOException { public int docID() { return docID; } + + @Override + public int advance(int target) throws IOException { + return NumericDoubleValues.this.advance(target); + } }; } @@ -95,6 +100,23 @@ public long longValue() throws IOException { public int docID() { return docID; } + + @Override + public int advance(int target) throws IOException { + return NumericDoubleValues.this.advance(target); + } }; } + + /** + * Advances to the first beyond the current whose document number is greater than or equal to + * <i>target</i>, and returns the document number itself. Exhausts the iterator and returns {@link + * org.apache.lucene.search.DocIdSetIterator#NO_MORE_DOCS} if <i>target</i> is greater than the highest document number in the set. + * + * This method is being used by {@link org.apache.lucene.search.comparators.NumericComparator.NumericLeafComparator} when point values optimization kicks + * in and is implemented by most numeric types. + */ + public int advance(int target) throws IOException { + throw new UnsupportedOperationException(); + } } diff --git a/server/src/main/java/org/opensearch/index/fielddata/SingletonSortedNumericDoubleValues.java b/server/src/main/java/org/opensearch/index/fielddata/SingletonSortedNumericDoubleValues.java index 4ee494ffb30aa..816445bb319f1 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/SingletonSortedNumericDoubleValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/SingletonSortedNumericDoubleValues.java @@ -69,4 +69,8 @@ public double nextValue() throws IOException { return in.doubleValue(); } + @Override + public int advance(int target) throws IOException { + return in.advance(target); + } } diff --git a/server/src/main/java/org/opensearch/index/fielddata/SortableLongBitsNumericDocValues.java b/server/src/main/java/org/opensearch/index/fielddata/SortableLongBitsNumericDocValues.java index 39aca38c331ea..e2739e462dea5 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/SortableLongBitsNumericDocValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/SortableLongBitsNumericDocValues.java @@ -74,4 +74,9 @@ public NumericDoubleValues getDoubleValues() { return values; } + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } + } diff --git a/server/src/main/java/org/opensearch/index/fielddata/SortableLongBitsToNumericDoubleValues.java b/server/src/main/java/org/opensearch/index/fielddata/SortableLongBitsToNumericDoubleValues.java index 150e114d342de..98a44c246f654 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/SortableLongBitsToNumericDoubleValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/SortableLongBitsToNumericDoubleValues.java @@ -67,4 +67,8 @@ public NumericDocValues getLongValues() { return values; } + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } } diff --git a/server/src/main/java/org/opensearch/index/fielddata/SortableLongBitsToSortedNumericDoubleValues.java b/server/src/main/java/org/opensearch/index/fielddata/SortableLongBitsToSortedNumericDoubleValues.java index 1bae845c9b0d2..279a78ac51adf 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/SortableLongBitsToSortedNumericDoubleValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/SortableLongBitsToSortedNumericDoubleValues.java @@ -72,4 +72,8 @@ public SortedNumericDocValues getLongValues() { return values; } + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } } diff --git a/server/src/main/java/org/opensearch/index/fielddata/SortedNumericDoubleValues.java b/server/src/main/java/org/opensearch/index/fielddata/SortedNumericDoubleValues.java index dce1aff9cc94f..be9064751b5f0 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/SortedNumericDoubleValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/SortedNumericDoubleValues.java @@ -70,4 +70,15 @@ protected SortedNumericDoubleValues() {} */ public abstract int docValueCount(); + /** + * Advances to the first beyond the current whose document number is greater than or equal to + * <i>target</i>, and returns the document number itself. Exhausts the iterator and returns {@link + * org.apache.lucene.search.DocIdSetIterator#NO_MORE_DOCS} if <i>target</i> is greater than the highest document number in the set. + * + * This method is being used by {@link org.apache.lucene.search.comparators.NumericComparator.NumericLeafComparator} when point values optimization kicks + * in and is implemented by most numeric types. + */ + public int advance(int target) throws IOException { + throw new UnsupportedOperationException(); + } } diff --git a/server/src/main/java/org/opensearch/index/fielddata/UnsignedLongToNumericDoubleValues.java b/server/src/main/java/org/opensearch/index/fielddata/UnsignedLongToNumericDoubleValues.java index 8d17146760d9e..d9e9dd6a293fd 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/UnsignedLongToNumericDoubleValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/UnsignedLongToNumericDoubleValues.java @@ -42,4 +42,8 @@ public NumericDocValues getLongValues() { return values; } + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } } diff --git a/server/src/main/java/org/opensearch/index/fielddata/UnsignedLongToSortedNumericDoubleValues.java b/server/src/main/java/org/opensearch/index/fielddata/UnsignedLongToSortedNumericDoubleValues.java index 90b49e19a8954..63c7e6162cc55 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/UnsignedLongToSortedNumericDoubleValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/UnsignedLongToSortedNumericDoubleValues.java @@ -47,4 +47,8 @@ public SortedNumericDocValues getLongValues() { return values; } + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } } diff --git a/server/src/main/java/org/opensearch/index/fielddata/plain/SortedNumericIndexFieldData.java b/server/src/main/java/org/opensearch/index/fielddata/plain/SortedNumericIndexFieldData.java index b70752df9e826..0019a41e67c02 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/plain/SortedNumericIndexFieldData.java +++ b/server/src/main/java/org/opensearch/index/fielddata/plain/SortedNumericIndexFieldData.java @@ -336,6 +336,11 @@ public double doubleValue() throws IOException { public boolean advanceExact(int doc) throws IOException { return in.advanceExact(doc); } + + @Override + public int advance(int target) throws IOException { + return in.advance(target); + } } /** @@ -364,6 +369,11 @@ public double nextValue() throws IOException { public int docValueCount() { return in.docValueCount(); } + + @Override + public int advance(int target) throws IOException { + return in.advance(target); + } } /** @@ -434,6 +444,11 @@ public double doubleValue() throws IOException { public boolean advanceExact(int doc) throws IOException { return in.advanceExact(doc); } + + @Override + public int advance(int target) throws IOException { + return in.advance(target); + } } /** @@ -462,6 +477,11 @@ public double nextValue() throws IOException { public int docValueCount() { return in.docValueCount(); } + + @Override + public int advance(int target) throws IOException { + return in.advance(target); + } } /** diff --git a/server/src/main/java/org/opensearch/index/get/ShardGetService.java b/server/src/main/java/org/opensearch/index/get/ShardGetService.java index 831bb8aff3be3..d4eeb8aae8e24 100644 --- a/server/src/main/java/org/opensearch/index/get/ShardGetService.java +++ b/server/src/main/java/org/opensearch/index/get/ShardGetService.java @@ -331,6 +331,7 @@ private GetResult innerGetLoadFromStoredFields( 0, VectorEncoding.FLOAT32, VectorSimilarityFunction.EUCLIDEAN, + false, false ); StoredFieldVisitor.Status status = fieldVisitor.needsField(fieldInfo); diff --git a/server/src/main/java/org/opensearch/index/mapper/IpFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/IpFieldMapper.java index 2a677d8bc1352..db8da8a949d6f 100644 --- a/server/src/main/java/org/opensearch/index/mapper/IpFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/IpFieldMapper.java @@ -36,7 +36,9 @@ import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.document.StoredField; import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.PointRangeQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; @@ -222,25 +224,48 @@ protected Object parseSourceValue(Object value) { @Override public Query termQuery(Object value, @Nullable QueryShardContext context) { - failIfNotIndexed(); + failIfNotIndexedAndNoDocValues(); + Query query; if (value instanceof InetAddress) { - return InetAddressPoint.newExactQuery(name(), (InetAddress) value); + query = InetAddressPoint.newExactQuery(name(), (InetAddress) value); } else { if (value instanceof BytesRef) { value = ((BytesRef) value).utf8ToString(); } + String term = value.toString(); + if (term.contains("/")) { + final Tuple<InetAddress, Integer> cidr = InetAddresses.parseCidr(term); + query = InetAddressPoint.newPrefixQuery(name(), cidr.v1(), cidr.v2()); + } else { + InetAddress address = InetAddresses.forString(term); + query = InetAddressPoint.newExactQuery(name(), address); + } + } + if (isSearchable() && hasDocValues()) { + String term = value.toString(); + if (term.contains("/")) { + final Tuple<InetAddress, Integer> cidr = InetAddresses.parseCidr(term); + return InetAddressPoint.newPrefixQuery(name(), cidr.v1(), cidr.v2()); + } + return new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowExactQuery(name(), new BytesRef(((PointRangeQuery) query).getLowerPoint())) + ); + } + if (hasDocValues()) { String term = value.toString(); if (term.contains("/")) { final Tuple<InetAddress, Integer> cidr = InetAddresses.parseCidr(term); return InetAddressPoint.newPrefixQuery(name(), cidr.v1(), cidr.v2()); } - InetAddress address = InetAddresses.forString(term); - return InetAddressPoint.newExactQuery(name(), address); + return SortedSetDocValuesField.newSlowExactQuery(name(), new BytesRef(((PointRangeQuery) query).getLowerPoint())); } + return query; } @Override public Query termsQuery(List<?> values, QueryShardContext context) { + failIfNotIndexedAndNoDocValues(); InetAddress[] addresses = new InetAddress[values.size()]; int i = 0; for (Object value : values) { @@ -265,14 +290,32 @@ public Query termsQuery(List<?> values, QueryShardContext context) { @Override public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, QueryShardContext context) { - failIfNotIndexed(); - return rangeQuery( - lowerTerm, - upperTerm, - includeLower, - includeUpper, - (lower, upper) -> InetAddressPoint.newRangeQuery(name(), lower, upper) - ); + failIfNotIndexedAndNoDocValues(); + return rangeQuery(lowerTerm, upperTerm, includeLower, includeUpper, (lower, upper) -> { + Query query = InetAddressPoint.newRangeQuery(name(), lower, upper); + if (isSearchable() && hasDocValues()) { + return new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ) + ); + } + if (hasDocValues()) { + return SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ); + } + return query; + }); } /** diff --git a/server/src/main/java/org/opensearch/index/mapper/MapperService.java b/server/src/main/java/org/opensearch/index/mapper/MapperService.java index 9b8fa7eec37b9..fc8654216e187 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/opensearch/index/mapper/MapperService.java @@ -237,6 +237,7 @@ public MapperService( ScriptService scriptService ) { super(indexSettings); + this.indexVersionCreated = indexSettings.getIndexVersionCreated(); this.indexAnalyzers = indexAnalyzers; this.documentParser = new DocumentMapperParser( @@ -261,7 +262,12 @@ public MapperService( this.idFieldDataEnabled = idFieldDataEnabled; if (INDEX_MAPPER_DYNAMIC_SETTING.exists(indexSettings.getSettings())) { - throw new IllegalArgumentException("Setting " + INDEX_MAPPER_DYNAMIC_SETTING.getKey() + " was removed after version 6.0.0"); + deprecationLogger.deprecate( + index().getName() + INDEX_MAPPER_DYNAMIC_SETTING.getKey(), + "Index [{}] has setting [{}] that is not supported in OpenSearch, its value will be ignored.", + index().getName(), + INDEX_MAPPER_DYNAMIC_SETTING.getKey() + ); } } diff --git a/server/src/main/java/org/opensearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/NumberFieldMapper.java index 524d2b0e0dd38..eb3a99b0e0388 100644 --- a/server/src/main/java/org/opensearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/NumberFieldMapper.java @@ -37,7 +37,6 @@ import org.apache.lucene.document.DoublePoint; import org.apache.lucene.document.Field; -import org.apache.lucene.document.FloatField; import org.apache.lucene.document.FloatPoint; import org.apache.lucene.document.IntPoint; import org.apache.lucene.document.LongPoint; @@ -372,7 +371,7 @@ public Query termsQuery(String field, List<Object> values, boolean hasDocValues, if (hasDocValues) { return SortedNumericDocValuesField.newSlowSetQuery(field, points); } - return FloatField.newSetQuery(field, v); + return FloatPoint.newSetQuery(field, v); } @Override diff --git a/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java index ac4fde7f06b16..3f97b3918a126 100644 --- a/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java @@ -322,10 +322,13 @@ protected Query doToQuery(QueryShardContext context) throws IOException { try { context.setParentFilter(parentFilter); context.nestedScope().nextLevel(nestedObjectMapper); - innerQuery = this.query.toQuery(context); + try { + innerQuery = this.query.toQuery(context); + } finally { + context.nestedScope().previousLevel(); + } } finally { context.setParentFilter(previousParentFilter); - context.nestedScope().previousLevel(); } // ToParentBlockJoinQuery requires that the inner query only matches documents diff --git a/server/src/main/java/org/opensearch/index/query/QueryShardContext.java b/server/src/main/java/org/opensearch/index/query/QueryShardContext.java index eba3ed076e82b..f3b392559d33e 100644 --- a/server/src/main/java/org/opensearch/index/query/QueryShardContext.java +++ b/server/src/main/java/org/opensearch/index/query/QueryShardContext.java @@ -256,7 +256,7 @@ private QueryShardContext( this.bitsetFilterCache = bitsetFilterCache; this.indexFieldDataService = indexFieldDataLookup; this.allowUnmappedFields = indexSettings.isDefaultAllowUnmappedFields(); - this.nestedScope = new NestedScope(); + this.nestedScope = new NestedScope(indexSettings); this.scriptService = scriptService; this.indexSettings = indexSettings; this.searcher = searcher; @@ -270,7 +270,7 @@ private void reset() { allowUnmappedFields = indexSettings.isDefaultAllowUnmappedFields(); this.lookup = null; this.namedQueries.clear(); - this.nestedScope = new NestedScope(); + this.nestedScope = new NestedScope(indexSettings); } public IndexAnalyzers getIndexAnalyzers() { @@ -423,7 +423,8 @@ public SearchLookup lookup() { if (this.lookup == null) { this.lookup = new SearchLookup( getMapperService(), - (fieldType, searchLookup) -> indexFieldDataService.apply(fieldType, fullyQualifiedIndex.getName(), searchLookup) + (fieldType, searchLookup) -> indexFieldDataService.apply(fieldType, fullyQualifiedIndex.getName(), searchLookup), + shardId ); } return this.lookup; @@ -439,7 +440,8 @@ public SearchLookup newFetchLookup() { */ return new SearchLookup( getMapperService(), - (fieldType, searchLookup) -> indexFieldDataService.apply(fieldType, fullyQualifiedIndex.getName(), searchLookup) + (fieldType, searchLookup) -> indexFieldDataService.apply(fieldType, fullyQualifiedIndex.getName(), searchLookup), + shardId ); } diff --git a/server/src/main/java/org/opensearch/index/query/functionscore/DecayFunctionBuilder.java b/server/src/main/java/org/opensearch/index/query/functionscore/DecayFunctionBuilder.java index cfc44d4434d3b..1c693f9761240 100644 --- a/server/src/main/java/org/opensearch/index/query/functionscore/DecayFunctionBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/functionscore/DecayFunctionBuilder.java @@ -560,6 +560,11 @@ public boolean needsScores() { protected NumericDoubleValues distance(LeafReaderContext context) { final SortedNumericDoubleValues doubleValues = fieldData.load(context).getDoubleValues(); return FieldData.replaceMissing(mode.select(new SortingNumericDoubleValues() { + @Override + public int advance(int target) throws IOException { + return doubleValues.advance(target); + } + @Override public boolean advanceExact(int docId) throws IOException { if (doubleValues.advanceExact(docId)) { diff --git a/server/src/main/java/org/opensearch/index/query/support/NestedScope.java b/server/src/main/java/org/opensearch/index/query/support/NestedScope.java index 51abe389ad686..488768c32d17f 100644 --- a/server/src/main/java/org/opensearch/index/query/support/NestedScope.java +++ b/server/src/main/java/org/opensearch/index/query/support/NestedScope.java @@ -33,6 +33,7 @@ package org.opensearch.index.query.support; import org.opensearch.common.annotation.PublicApi; +import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.ObjectMapper; import java.util.Deque; @@ -47,6 +48,11 @@ public final class NestedScope { private final Deque<ObjectMapper> levelStack = new LinkedList<>(); + private final IndexSettings indexSettings; + + public NestedScope(IndexSettings indexSettings) { + this.indexSettings = indexSettings; + } /** * @return For the current nested level returns the object mapper that belongs to that @@ -60,7 +66,21 @@ public ObjectMapper getObjectMapper() { */ public ObjectMapper nextLevel(ObjectMapper level) { ObjectMapper previous = levelStack.peek(); - levelStack.push(level); + if (levelStack.size() < indexSettings.getMaxNestedQueryDepth()) { + levelStack.push(level); + } else { + throw new IllegalArgumentException( + "The depth of Nested Query is [" + + (levelStack.size() + 1) + + "] has exceeded " + + "the allowed maximum of [" + + indexSettings.getMaxNestedQueryDepth() + + "]. " + + "This maximum can be set by changing the [" + + IndexSettings.MAX_NESTED_QUERY_DEPTH_SETTING.getKey() + + "] index level setting." + ); + } return previous; } diff --git a/server/src/main/java/org/opensearch/index/reindex/RetryListener.java b/server/src/main/java/org/opensearch/index/reindex/RetryListener.java index 2b092539555e6..e46e675977268 100644 --- a/server/src/main/java/org/opensearch/index/reindex/RetryListener.java +++ b/server/src/main/java/org/opensearch/index/reindex/RetryListener.java @@ -47,7 +47,7 @@ * * @opensearch.internal */ -class RetryListener implements RejectAwareActionListener<ScrollableHitSource.Response> { +public class RetryListener implements RejectAwareActionListener<ScrollableHitSource.Response> { private final Logger logger; private final Iterator<TimeValue> retries; private final ThreadPool threadPool; @@ -55,7 +55,7 @@ class RetryListener implements RejectAwareActionListener<ScrollableHitSource.Res private final ActionListener<ScrollableHitSource.Response> delegate; private int retryCount = 0; - RetryListener( + public RetryListener( Logger logger, ThreadPool threadPool, BackoffPolicy backoffPolicy, diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java index fe9440813b94f..f1843ea3eef38 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java @@ -260,7 +260,7 @@ public long getRefreshSeqNoLag() { } public long getTimeMsLag() { - if (remoteRefreshTimeMs == localRefreshTimeMs) { + if (remoteRefreshTimeMs == localRefreshTimeMs || bytesLag == 0) { return 0; } return currentTimeMsUsingSystemNanos() - remoteRefreshStartTimeMs; @@ -301,12 +301,17 @@ public Map<String, Long> getLatestLocalFileNameLengthMap() { } /** - * Updates the latestLocalFileNameLengthMap by adding file name and it's size to the map. The method is given a function as an argument which is used for determining the file size (length in bytes). This method is also provided the collection of segment files which are the latest refresh local segment files. This method also removes the stale segment files from the map that are not part of the input segment files. + * Updates the latestLocalFileNameLengthMap by adding file name and it's size to the map. + * The method is given a function as an argument which is used for determining the file size (length in bytes). + * This method is also provided the collection of segment files which are the latest refresh local segment files. + * This method also removes the stale segment files from the map that are not part of the input segment files. * * @param segmentFiles list of local refreshed segment files * @param fileSizeFunction function is used to determine the file size in bytes + * + * @return updated map of local segment files and filesize */ - public void updateLatestLocalFileNameLengthMap( + public Map<String, Long> updateLatestLocalFileNameLengthMap( Collection<String> segmentFiles, CheckedFunction<String, Long, IOException> fileSizeFunction ) { @@ -332,6 +337,7 @@ public void updateLatestLocalFileNameLengthMap( // Remove keys from the fileSizeMap that do not exist in the latest segment files latestLocalFileNameLengthMap.entrySet().removeIf(entry -> fileSet.contains(entry.getKey()) == false); computeBytesLag(); + return Collections.unmodifiableMap(latestLocalFileNameLengthMap); } public void addToLatestUploadedFiles(String file) { diff --git a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java index b13f1eb04a941..576e00f8f30d1 100644 --- a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java +++ b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java @@ -38,7 +38,6 @@ import org.opensearch.common.Nullable; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -466,12 +465,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.humanReadableField(Fields.QUERY_TIME_IN_MILLIS, Fields.QUERY_TIME, getQueryTime()); builder.field(Fields.QUERY_CURRENT, queryCurrent); - if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) { - builder.field(Fields.CONCURRENT_QUERY_TOTAL, concurrentQueryCount); - builder.humanReadableField(Fields.CONCURRENT_QUERY_TIME_IN_MILLIS, Fields.CONCURRENT_QUERY_TIME, getConcurrentQueryTime()); - builder.field(Fields.CONCURRENT_QUERY_CURRENT, concurrentQueryCurrent); - builder.field(Fields.CONCURRENT_AVG_SLICE_COUNT, getConcurrentAvgSliceCount()); - } + builder.field(Fields.CONCURRENT_QUERY_TOTAL, concurrentQueryCount); + builder.humanReadableField(Fields.CONCURRENT_QUERY_TIME_IN_MILLIS, Fields.CONCURRENT_QUERY_TIME, getConcurrentQueryTime()); + builder.field(Fields.CONCURRENT_QUERY_CURRENT, concurrentQueryCurrent); + builder.field(Fields.CONCURRENT_AVG_SLICE_COUNT, getConcurrentAvgSliceCount()); builder.field(Fields.FETCH_TOTAL, fetchCount); builder.humanReadableField(Fields.FETCH_TIME_IN_MILLIS, Fields.FETCH_TIME, getFetchTime()); diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index cbb246219546b..977155a1cbb72 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -1697,7 +1697,8 @@ ReplicationCheckpoint computeReplicationCheckpoint(SegmentInfos segmentInfos) th } final ReplicationCheckpoint latestReplicationCheckpoint = getLatestReplicationCheckpoint(); if (latestReplicationCheckpoint.getSegmentInfosVersion() == segmentInfos.getVersion() - && latestReplicationCheckpoint.getSegmentsGen() == segmentInfos.getGeneration()) { + && latestReplicationCheckpoint.getSegmentsGen() == segmentInfos.getGeneration() + && latestReplicationCheckpoint.getPrimaryTerm() == getOperationPrimaryTerm()) { return latestReplicationCheckpoint; } final Map<String, StoreFileMetadata> metadataMap = store.getSegmentMetadataMap(segmentInfos); @@ -2014,7 +2015,7 @@ public void close(String reason, boolean flushEngine, boolean deleted) throws IO /* ToDo : Fix this https://github.com/opensearch-project/OpenSearch/issues/8003 */ - private RemoteSegmentStoreDirectory getRemoteDirectory() { + public RemoteSegmentStoreDirectory getRemoteDirectory() { assert indexSettings.isRemoteStoreEnabled(); assert remoteStore.directory() instanceof FilterDirectory : "Store.directory is not an instance of FilterDirectory"; FilterDirectory remoteStoreDirectory = (FilterDirectory) remoteStore.directory(); @@ -2024,23 +2025,35 @@ private RemoteSegmentStoreDirectory getRemoteDirectory() { } /** - Returns true iff it is able to verify that remote segment store - is in sync with local + * Returns true iff it is able to verify that remote segment store + * is in sync with local */ boolean isRemoteSegmentStoreInSync() { assert indexSettings.isRemoteStoreEnabled(); try { RemoteSegmentStoreDirectory directory = getRemoteDirectory(); if (directory.readLatestMetadataFile() != null) { - // verifying that all files except EXCLUDE_FILES are uploaded to the remote Collection<String> uploadFiles = directory.getSegmentsUploadedToRemoteStore().keySet(); - SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo(); - Collection<String> localFiles = segmentInfos.files(true); - if (uploadFiles.containsAll(localFiles)) { - return true; + try (GatedCloseable<SegmentInfos> segmentInfosGatedCloseable = getSegmentInfosSnapshot()) { + Collection<String> localSegmentInfosFiles = segmentInfosGatedCloseable.get().files(true); + Set<String> localFiles = new HashSet<>(localSegmentInfosFiles); + // verifying that all files except EXCLUDE_FILES are uploaded to the remote + localFiles.removeAll(RemoteStoreRefreshListener.EXCLUDE_FILES); + if (uploadFiles.containsAll(localFiles)) { + return true; + } + logger.debug( + () -> new ParameterizedMessage( + "RemoteSegmentStoreSyncStatus localSize={} remoteSize={}", + localFiles.size(), + uploadFiles.size() + ) + ); } } - } catch (IOException e) { + } catch (AlreadyClosedException e) { + throw e; + } catch (Throwable e) { logger.error("Exception while reading latest metadata", e); } return false; diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java index d96a7e7c95ecf..7bb80b736693f 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java @@ -41,6 +41,7 @@ import java.util.Collection; import java.util.HashMap; import java.util.Iterator; +import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; @@ -171,13 +172,33 @@ private boolean shouldSync(boolean didRefresh, boolean skipPrimaryTermCheck) { // When the shouldSync is called the first time, then 1st condition on primary term is true. But after that // we update the primary term and the same condition would not evaluate to true again in syncSegments. // Below check ensures that if there is commit, then that gets picked up by both 1st and 2nd shouldSync call. - || isRefreshAfterCommitSafe(); + || isRefreshAfterCommitSafe() + || isRemoteSegmentStoreInSync() == false; if (shouldSync || skipPrimaryTermCheck) { return shouldSync; } return this.primaryTerm != indexShard.getOperationPrimaryTerm(); } + /** + * Checks if all files present in local store are uploaded to remote store or part of excluded files. + * + * Different from IndexShard#isRemoteSegmentStoreInSync as + * it uses files uploaded cache in RemoteDirector and it doesn't make a remote store call. + * Doesn't throw an exception on store getting closed as store will be open + * + * + * @return true iff all the local files are uploaded to remote store. + */ + boolean isRemoteSegmentStoreInSync() { + try (GatedCloseable<SegmentInfos> segmentInfosGatedCloseable = indexShard.getSegmentInfosSnapshot()) { + return segmentInfosGatedCloseable.get().files(true).stream().allMatch(this::skipUpload); + } catch (Throwable throwable) { + logger.error("Throwable thrown during isRemoteSegmentStoreInSync", throwable); + } + return false; + } + /* @return false if retry is needed */ @@ -209,13 +230,25 @@ private boolean syncSegments() { try (GatedCloseable<SegmentInfos> segmentInfosGatedCloseable = indexShard.getSegmentInfosSnapshot()) { SegmentInfos segmentInfos = segmentInfosGatedCloseable.get(); final ReplicationCheckpoint checkpoint = indexShard.computeReplicationCheckpoint(segmentInfos); + if (checkpoint.getPrimaryTerm() != indexShard.getOperationPrimaryTerm()) { + throw new IllegalStateException( + String.format( + Locale.ROOT, + "primaryTerm mismatch during segments upload to remote store [%s] != [%s]", + checkpoint.getPrimaryTerm(), + indexShard.getOperationPrimaryTerm() + ) + ); + } // Capture replication checkpoint before uploading the segments as upload can take some time and checkpoint can // move. long lastRefreshedCheckpoint = ((InternalEngine) indexShard.getEngine()).lastRefreshedCheckpoint(); Collection<String> localSegmentsPostRefresh = segmentInfos.files(true); // Create a map of file name to size and update the refresh segment tracker - updateLocalSizeMapAndTracker(localSegmentsPostRefresh); + Map<String, Long> localSegmentsSizeMap = updateLocalSizeMapAndTracker(localSegmentsPostRefresh).entrySet() + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); CountDownLatch latch = new CountDownLatch(1); ActionListener<Void> segmentUploadsCompletedListener = new LatchedActionListener<>(new ActionListener<>() { @Override @@ -231,6 +264,7 @@ public void onResponse(Void unused) { refreshClockTimeMs, refreshSeqNo, lastRefreshedCheckpoint, + localSegmentsSizeMap, checkpoint ); // At this point since we have uploaded new segments, segment infos and segment metadata file, @@ -251,7 +285,7 @@ public void onFailure(Exception e) { }, latch); // Start the segments files upload - uploadNewSegments(localSegmentsPostRefresh, segmentUploadsCompletedListener); + uploadNewSegments(localSegmentsPostRefresh, localSegmentsSizeMap, segmentUploadsCompletedListener); latch.await(); } catch (EngineException e) { logger.warn("Exception while reading SegmentInfosSnapshot", e); @@ -295,10 +329,11 @@ private void onSuccessfulSegmentsSync( long refreshClockTimeMs, long refreshSeqNo, long lastRefreshedCheckpoint, + Map<String, Long> localFileSizeMap, ReplicationCheckpoint checkpoint ) { // Update latest uploaded segment files name in segment tracker - segmentTracker.setLatestUploadedFiles(segmentTracker.getLatestLocalFileNameLengthMap().keySet()); + segmentTracker.setLatestUploadedFiles(localFileSizeMap.keySet()); // Update the remote refresh time and refresh seq no updateRemoteRefreshTimeAndSeqNo(refreshTimeMs, refreshClockTimeMs, refreshSeqNo); // Reset the backoffDelayIterator for the future failures @@ -371,7 +406,11 @@ void uploadMetadata(Collection<String> localSegmentsPostRefresh, SegmentInfos se } } - private void uploadNewSegments(Collection<String> localSegmentsPostRefresh, ActionListener<Void> listener) { + private void uploadNewSegments( + Collection<String> localSegmentsPostRefresh, + Map<String, Long> localSegmentsSizeMap, + ActionListener<Void> listener + ) { Collection<String> filteredFiles = localSegmentsPostRefresh.stream().filter(file -> !skipUpload(file)).collect(Collectors.toList()); if (filteredFiles.size() == 0) { logger.debug("No new segments to upload in uploadNewSegments"); @@ -385,7 +424,7 @@ private void uploadNewSegments(Collection<String> localSegmentsPostRefresh, Acti for (String src : filteredFiles) { // Initializing listener here to ensure that the stats increment operations are thread-safe - UploadListener statsListener = createUploadListener(); + UploadListener statsListener = createUploadListener(localSegmentsSizeMap); ActionListener<Void> aggregatedListener = ActionListener.wrap(resp -> { statsListener.onSuccess(src); batchUploadListener.onResponse(resp); @@ -444,9 +483,11 @@ private void updateRemoteRefreshTimeAndSeqNo(long refreshTimeMs, long refreshClo * Updates map of file name to size of the input segment files in the segment tracker. Uses {@code storeDirectory.fileLength(file)} to get the size. * * @param segmentFiles list of segment files that are part of the most recent local refresh. + * + * @return updated map of local segment files and filesize */ - private void updateLocalSizeMapAndTracker(Collection<String> segmentFiles) { - segmentTracker.updateLatestLocalFileNameLengthMap(segmentFiles, storeDirectory::fileLength); + private Map<String, Long> updateLocalSizeMapAndTracker(Collection<String> segmentFiles) { + return segmentTracker.updateLatestLocalFileNameLengthMap(segmentFiles, storeDirectory::fileLength); } private void updateFinalStatusInSegmentTracker(boolean uploadStatus, long bytesBeforeUpload, long startTimeInNS) { @@ -521,22 +562,24 @@ private boolean isLocalOrSnapshotRecovery() { /** * Creates an {@link UploadListener} containing the stats population logic which would be triggered before and after segment upload events + * + * @param fileSizeMap updated map of current snapshot of local segments to their sizes */ - private UploadListener createUploadListener() { + private UploadListener createUploadListener(Map<String, Long> fileSizeMap) { return new UploadListener() { private long uploadStartTime = 0; @Override public void beforeUpload(String file) { // Start tracking the upload bytes started - segmentTracker.addUploadBytesStarted(segmentTracker.getLatestLocalFileNameLengthMap().get(file)); + segmentTracker.addUploadBytesStarted(fileSizeMap.get(file)); uploadStartTime = System.currentTimeMillis(); } @Override public void onSuccess(String file) { // Track upload success - segmentTracker.addUploadBytesSucceeded(segmentTracker.getLatestLocalFileNameLengthMap().get(file)); + segmentTracker.addUploadBytesSucceeded(fileSizeMap.get(file)); segmentTracker.addToLatestUploadedFiles(file); segmentTracker.addUploadTimeInMillis(Math.max(1, System.currentTimeMillis() - uploadStartTime)); } @@ -544,7 +587,7 @@ public void onSuccess(String file) { @Override public void onFailure(String file) { // Track upload failure - segmentTracker.addUploadBytesFailed(segmentTracker.getLatestLocalFileNameLengthMap().get(file)); + segmentTracker.addUploadBytesFailed(fileSizeMap.get(file)); segmentTracker.addUploadTimeInMillis(Math.max(1, System.currentTimeMillis() - uploadStartTime)); } }; diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java index 5b1940bb1d9a5..3faef2da05320 100644 --- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java @@ -38,11 +38,13 @@ import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.search.Sort; +import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.opensearch.ExceptionsHelper; +import org.opensearch.OpenSearchException; import org.opensearch.action.StepListener; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MappingMetadata; @@ -191,7 +193,8 @@ void recoverFromLocalShards( // just trigger a merge to do housekeeping on the // copied segments - we will also see them in stats etc. indexShard.getEngine().forceMerge(false, -1, false, false, false, UUIDs.randomBase64UUID()); - if (indexShard.isRemoteTranslogEnabled()) { + if (indexShard.isRemoteTranslogEnabled() && indexShard.shardRouting.primary()) { + waitForRemoteStoreSync(indexShard); if (indexShard.isRemoteSegmentStoreInSync() == false) { throw new IndexShardRecoveryException( indexShard.shardId(), @@ -432,7 +435,8 @@ void recoverFromSnapshotAndRemoteStore( } indexShard.getEngine().fillSeqNoGaps(indexShard.getPendingPrimaryTerm()); indexShard.finalizeRecovery(); - if (indexShard.isRemoteTranslogEnabled()) { + if (indexShard.isRemoteTranslogEnabled() && indexShard.shardRouting.primary()) { + waitForRemoteStoreSync(indexShard); if (indexShard.isRemoteSegmentStoreInSync() == false) { listener.onFailure(new IndexShardRestoreFailedException(shardId, "Failed to upload to remote segment store")); return; @@ -717,7 +721,8 @@ private void restore( } indexShard.getEngine().fillSeqNoGaps(indexShard.getPendingPrimaryTerm()); indexShard.finalizeRecovery(); - if (indexShard.isRemoteTranslogEnabled()) { + if (indexShard.isRemoteTranslogEnabled() && indexShard.shardRouting.primary()) { + waitForRemoteStoreSync(indexShard); if (indexShard.isRemoteSegmentStoreInSync() == false) { listener.onFailure(new IndexShardRestoreFailedException(shardId, "Failed to upload to remote segment store")); return; @@ -791,4 +796,31 @@ private void bootstrap(final IndexShard indexShard, final Store store) throws IO ); store.associateIndexWithNewTranslog(translogUUID); } + + /* + Blocks the calling thread, waiting for the remote store to get synced till internal Remote Upload Timeout + */ + private void waitForRemoteStoreSync(IndexShard indexShard) { + if (indexShard.shardRouting.primary() == false) { + return; + } + long startNanos = System.nanoTime(); + + while (System.nanoTime() - startNanos < indexShard.getRecoverySettings().internalRemoteUploadTimeout().nanos()) { + try { + if (indexShard.isRemoteSegmentStoreInSync()) { + break; + } else { + try { + Thread.sleep(TimeValue.timeValueMinutes(1).seconds()); + } catch (InterruptedException ie) { + throw new OpenSearchException("Interrupted waiting for completion of [{}]", ie); + } + } + } catch (AlreadyClosedException e) { + // There is no point in waiting as shard is now closed . + return; + } + } + } } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index 9c1e902606cab..c9a238c6e3350 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -45,6 +45,7 @@ import java.io.IOException; import java.io.InputStream; import java.nio.file.NoSuchFileException; +import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -164,7 +165,7 @@ public RemoteSegmentMetadata init() throws IOException { */ public RemoteSegmentMetadata initializeToSpecificCommit(long primaryTerm, long commitGeneration, String acquirerId) throws IOException { String metadataFilePrefix = MetadataFilenameUtils.getMetadataFilePrefixForCommit(primaryTerm, commitGeneration); - String metadataFile = ((RemoteStoreMetadataLockManager) mdLockManager).fetchLock(metadataFilePrefix, acquirerId); + String metadataFile = ((RemoteStoreMetadataLockManager) mdLockManager).fetchLockedMetadataFile(metadataFilePrefix, acquirerId); RemoteSegmentMetadata remoteSegmentMetadata = readMetadataFile(metadataFile); if (remoteSegmentMetadata != null) { this.segmentsUploadedToRemoteStore = new ConcurrentHashMap<>(remoteSegmentMetadata.getMetadata()); @@ -717,10 +718,49 @@ public Map<String, UploadedSegmentMetadata> getSegmentsUploadedToRemoteStore() { return Collections.unmodifiableMap(this.segmentsUploadedToRemoteStore); } + // Visible for testing + Set<String> getMetadataFilesToFilterActiveSegments( + final int lastNMetadataFilesToKeep, + final List<String> sortedMetadataFiles, + final Set<String> lockedMetadataFiles + ) { + // the idea here is for each deletable md file, we can consider the segments present in non-deletable md file + // before this and non-deletable md file after this to compute the active segment files. + // For ex: + // lastNMetadataFilesToKeep = 3 + // sortedMetadataFiles = [m1, m2, m3, m4, m5, m6(locked), m7(locked), m8(locked), m9(locked), m10] + // lockedMetadataFiles = m6, m7, m8, m9 + // then the returned set will be (m3, m6, m9) + final Set<String> metadataFilesToFilterActiveSegments = new HashSet<>(); + for (int idx = lastNMetadataFilesToKeep; idx < sortedMetadataFiles.size(); idx++) { + if (lockedMetadataFiles.contains(sortedMetadataFiles.get(idx)) == false) { + String prevMetadata = (idx - 1) >= 0 ? sortedMetadataFiles.get(idx - 1) : null; + String nextMetadata = (idx + 1) < sortedMetadataFiles.size() ? sortedMetadataFiles.get(idx + 1) : null; + + if (prevMetadata != null && (lockedMetadataFiles.contains(prevMetadata) || idx == lastNMetadataFilesToKeep)) { + // if previous metadata of deletable md is locked, add it to md files for active segments. + metadataFilesToFilterActiveSegments.add(prevMetadata); + } + if (nextMetadata != null && lockedMetadataFiles.contains(nextMetadata)) { + // if next metadata of deletable md is locked, add it to md files for active segments. + metadataFilesToFilterActiveSegments.add(nextMetadata); + } + } + } + return metadataFilesToFilterActiveSegments; + } + /** * Delete stale segment and metadata files * One metadata file is kept per commit (refresh updates the same file). To read segments uploaded to remote store, - * we just need to read the latest metadata file. All the stale metadata files can be safely deleted. + * we just need to read the latest metadata file. + * Assumptions: + * (1) if a segment file is not present in a md file, it will never be present in any md file created after that, and + * (2) if (md1, md2, md3) are in sorted order, it is not possible that a segment file will be in md1 and md3 but not in md2. + * <p> + * for each deletable md file, segments present in non-deletable md file before this and non-deletable md file + * after this are sufficient to compute the list of active or non-deletable segment files referenced by a deletable + * md file * * @param lastNMetadataFilesToKeep number of metadata files to keep * @throws IOException in case of I/O error while reading from / writing to remote segment store @@ -745,35 +785,36 @@ public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException return; } - List<String> metadataFilesEligibleToDelete = sortedMetadataFileList.subList( - lastNMetadataFilesToKeep, - sortedMetadataFileList.size() + List<String> metadataFilesEligibleToDelete = new ArrayList<>( + sortedMetadataFileList.subList(lastNMetadataFilesToKeep, sortedMetadataFileList.size()) ); - List<String> metadataFilesToBeDeleted = metadataFilesEligibleToDelete.stream().filter(metadataFile -> { - try { - return !isLockAcquired(metadataFile); - } catch (IOException e) { - logger.error( - "skipping metadata file (" - + metadataFile - + ") deletion for this run," - + " as checking lock for metadata is failing with error: " - + e - ); - return false; - } - }).collect(Collectors.toList()); + Set<String> allLockFiles; + try { + allLockFiles = ((RemoteStoreMetadataLockManager) mdLockManager).fetchLockedMetadataFiles(MetadataFilenameUtils.METADATA_PREFIX); + } catch (Exception e) { + logger.error("Exception while fetching segment metadata lock files, skipping deleteStaleSegments", e); + return; + } + List<String> metadataFilesToBeDeleted = metadataFilesEligibleToDelete.stream() + .filter(metadataFile -> allLockFiles.contains(metadataFile) == false) + .collect(Collectors.toList()); - sortedMetadataFileList.removeAll(metadataFilesToBeDeleted); logger.debug( "metadataFilesEligibleToDelete={} metadataFilesToBeDeleted={}", metadataFilesEligibleToDelete, - metadataFilesEligibleToDelete + metadataFilesToBeDeleted ); Map<String, UploadedSegmentMetadata> activeSegmentFilesMetadataMap = new HashMap<>(); Set<String> activeSegmentRemoteFilenames = new HashSet<>(); - for (String metadataFile : sortedMetadataFileList) { + + final Set<String> metadataFilesToFilterActiveSegments = getMetadataFilesToFilterActiveSegments( + lastNMetadataFilesToKeep, + sortedMetadataFileList, + allLockFiles + ); + + for (String metadataFile : metadataFilesToFilterActiveSegments) { Map<String, UploadedSegmentMetadata> segmentMetadataMap = readMetadataFile(metadataFile).getMetadata(); activeSegmentFilesMetadataMap.putAll(segmentMetadataMap); activeSegmentRemoteFilenames.addAll( @@ -852,6 +893,25 @@ public void deleteStaleSegmentsAsync(int lastNMetadataFilesToKeep, ActionListene } } + public static void remoteDirectoryCleanup( + RemoteSegmentStoreDirectoryFactory remoteDirectoryFactory, + String remoteStoreRepoForIndex, + String indexUUID, + ShardId shardId + ) { + try { + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = (RemoteSegmentStoreDirectory) remoteDirectoryFactory.newDirectory( + remoteStoreRepoForIndex, + indexUUID, + shardId + ); + remoteSegmentStoreDirectory.deleteStaleSegments(0); + remoteSegmentStoreDirectory.deleteIfEmpty(); + } catch (Exception e) { + staticLogger.error("Exception occurred while deleting directory", e); + } + } + /* Tries to delete shard level directory if it is empty Return true if it deleted it successfully @@ -874,7 +934,6 @@ private boolean deleteIfEmpty() throws IOException { logger.error("Exception occurred while deleting directory", e); return false; } - return true; } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java index a5e89ec6a8327..eca8d9ec702e1 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java @@ -76,4 +76,5 @@ public Directory newDirectory(String repositoryName, String indexUUID, ShardId s throw new IllegalArgumentException("Repository should be created before creating index with remote_store enabled setting", e); } } + } diff --git a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreMetadataLockManager.java b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreMetadataLockManager.java index 5ebd00f59ef49..9c29e03c225e4 100644 --- a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreMetadataLockManager.java +++ b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreMetadataLockManager.java @@ -21,6 +21,7 @@ import java.util.Collection; import java.util.List; import java.util.Objects; +import java.util.Set; import java.util.stream.Collectors; /** @@ -75,7 +76,7 @@ public void release(LockInfo lockInfo) throws IOException { } } - public String fetchLock(String filenamePrefix, String acquirerId) throws IOException { + public String fetchLockedMetadataFile(String filenamePrefix, String acquirerId) throws IOException { Collection<String> lockFiles = lockDirectory.listFilesByPrefix(filenamePrefix); List<String> lockFilesForAcquirer = lockFiles.stream() .filter(lockFile -> acquirerId.equals(FileLockInfo.LockFileUtils.getAcquirerIdFromLock(lockFile))) @@ -88,6 +89,11 @@ public String fetchLock(String filenamePrefix, String acquirerId) throws IOExcep return lockFilesForAcquirer.get(0); } + public Set<String> fetchLockedMetadataFiles(String filenamePrefix) throws IOException { + Collection<String> lockFiles = lockDirectory.listFilesByPrefix(filenamePrefix); + return lockFiles.stream().map(FileLockInfo.LockFileUtils::getFileToLockNameFromLock).collect(Collectors.toSet()); + } + /** * Checks whether a given file have any lock on it or not. * @param lockInfo File Lock Info instance for which we need to check if lock is acquired. diff --git a/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInput.java b/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInput.java index 7166e9aa482e3..8097fd08da50a 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInput.java +++ b/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInput.java @@ -15,6 +15,8 @@ import org.opensearch.index.store.remote.utils.TransferManager; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; /** * This is an implementation of {@link OnDemandBlockIndexInput} where this class provides the main IndexInput using shard snapshot files. @@ -136,25 +138,45 @@ protected IndexInput fetchBlock(int blockId) throws IOException { final long blockStart = getBlockStart(blockId); final long blockEnd = blockStart + getActualBlockSize(blockId); - // If the snapshot file is chunked, we must account for this by - // choosing the appropriate file part and updating the position - // accordingly. - final int part = (int) (blockStart / partSize); - final long partStart = part * partSize; - - final long position = blockStart - partStart; - final long length = blockEnd - blockStart; - + // Block may be present on multiple chunks of a file, so we need + // to fetch each chunk/blob part separately to fetch an entire block. BlobFetchRequest blobFetchRequest = BlobFetchRequest.builder() - .position(position) - .length(length) - .blobName(fileInfo.partName(part)) + .blobParts(getBlobParts(blockStart, blockEnd)) .directory(directory) .fileName(blockFileName) .build(); return transferManager.fetchBlob(blobFetchRequest); } + /** + * Returns list of blob parts/chunks in a file for a given block. + */ + protected List<BlobFetchRequest.BlobPart> getBlobParts(long blockStart, long blockEnd) { + // If the snapshot file is chunked, we must account for this by + // choosing the appropriate file part and updating the position + // accordingly. + int partNum = (int) (blockStart / partSize); + long pos = blockStart; + long diff = (blockEnd - blockStart); + + List<BlobFetchRequest.BlobPart> blobParts = new ArrayList<>(); + while (diff > 0) { + long partStart = pos % partSize; + long partEnd; + if ((partStart + diff) > partSize) { + partEnd = partSize; + } else { + partEnd = (partStart + diff); + } + long fetchBytes = partEnd - partStart; + blobParts.add(new BlobFetchRequest.BlobPart(fileInfo.partName(partNum), partStart, fetchBytes)); + partNum++; + pos = pos + fetchBytes; + diff = (blockEnd - pos); + } + return blobParts; + } + @Override public OnDemandBlockSnapshotIndexInput clone() { OnDemandBlockSnapshotIndexInput clone = buildSlice("clone", 0L, this.length); diff --git a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheCleaner.java b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheCleaner.java index fb89e651e7616..0261ab24dfa7a 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheCleaner.java +++ b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheCleaner.java @@ -11,16 +11,13 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.common.settings.Settings; +import org.opensearch.common.inject.Provider; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.env.NodeEnvironment; -import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; -import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.ShardPath; -import org.opensearch.indices.cluster.IndicesClusterStateService; import java.io.IOException; import java.nio.file.DirectoryStream; @@ -30,79 +27,90 @@ import static org.opensearch.index.store.remote.directory.RemoteSnapshotDirectoryFactory.LOCAL_STORE_LOCATION; /** - * IndexEventListener to clean up file cache when the index is deleted. The cached entries will be eligible + * IndexStoreListener to clean up file cache when the index is deleted. The cached entries will be eligible * for eviction when the shard is deleted, but this listener deterministically removes entries from memory and * from disk at the time of shard deletion as opposed to waiting for the cache to need to perform eviction. * * @opensearch.internal */ -public class FileCacheCleaner implements IndexEventListener { - private static final Logger log = LogManager.getLogger(FileCacheCleaner.class); +public class FileCacheCleaner implements NodeEnvironment.IndexStoreListener { + private static final Logger logger = LogManager.getLogger(FileCacheCleaner.class); - private final NodeEnvironment nodeEnvironment; - private final FileCache fileCache; + private final Provider<FileCache> fileCacheProvider; - public FileCacheCleaner(NodeEnvironment nodeEnvironment, FileCache fileCache) { - this.nodeEnvironment = nodeEnvironment; - this.fileCache = fileCache; + public FileCacheCleaner(Provider<FileCache> fileCacheProvider) { + this.fileCacheProvider = fileCacheProvider; } /** - * before shard deleted and after shard closed, cleans up the corresponding index file path entries from FC. - * @param shardId The shard id - * @param settings the shards index settings + * before shard path deleted, cleans up the corresponding index file path entries from FC and delete the corresponding shard file + * cache path. + * + * @param shardId the shard id + * @param indexSettings the index settings + * @param nodeEnvironment the node environment */ @Override - public void beforeIndexShardDeleted(ShardId shardId, Settings settings) { + public void beforeShardPathDeleted(ShardId shardId, IndexSettings indexSettings, NodeEnvironment nodeEnvironment) { + if (indexSettings.isRemoteSnapshot()) { + final ShardPath shardPath = ShardPath.loadFileCachePath(nodeEnvironment, shardId); + cleanupShardFileCache(shardPath); + deleteShardFileCacheDirectory(shardPath); + } + } + + /** + * Cleans up the corresponding index file path entries from FileCache + * + * @param shardPath the shard path + */ + private void cleanupShardFileCache(ShardPath shardPath) { try { - if (isRemoteSnapshot(settings)) { - final ShardPath shardPath = ShardPath.loadFileCachePath(nodeEnvironment, shardId); - final Path localStorePath = shardPath.getDataPath().resolve(LOCAL_STORE_LOCATION); - try (DirectoryStream<Path> ds = Files.newDirectoryStream(localStorePath)) { - for (Path subPath : ds) { - fileCache.remove(subPath.toRealPath()); - } + final FileCache fc = fileCacheProvider.get(); + assert fc != null; + final Path localStorePath = shardPath.getDataPath().resolve(LOCAL_STORE_LOCATION); + try (DirectoryStream<Path> ds = Files.newDirectoryStream(localStorePath)) { + for (Path subPath : ds) { + fc.remove(subPath.toRealPath()); } } } catch (IOException ioe) { - log.error(() -> new ParameterizedMessage("Error removing items from cache during shard deletion {}", shardId), ioe); + logger.error( + () -> new ParameterizedMessage("Error removing items from cache during shard deletion {}", shardPath.getShardId()), + ioe + ); } } - @Override - public void afterIndexShardDeleted(ShardId shardId, Settings settings) { - if (isRemoteSnapshot(settings)) { - final Path path = ShardPath.loadFileCachePath(nodeEnvironment, shardId).getDataPath(); - try { - if (Files.exists(path)) { - IOUtils.rm(path); - } - } catch (IOException e) { - log.error(() -> new ParameterizedMessage("Failed to delete cache path for shard {}", shardId), e); + private void deleteShardFileCacheDirectory(ShardPath shardPath) { + final Path path = shardPath.getDataPath(); + try { + if (Files.exists(path)) { + IOUtils.rm(path); } + } catch (IOException e) { + logger.error(() -> new ParameterizedMessage("Failed to delete cache path for shard {}", shardPath.getShardId()), e); } } + /** + * before index path deleted, delete the corresponding index file cache path. + * + * @param index the index + * @param indexSettings the index settings + * @param nodeEnvironment the node environment + */ @Override - public void afterIndexRemoved( - Index index, - IndexSettings indexSettings, - IndicesClusterStateService.AllocatedIndices.IndexRemovalReason reason - ) { - if (isRemoteSnapshot(indexSettings.getSettings()) - && reason == IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.DELETED) { + public void beforeIndexPathDeleted(Index index, IndexSettings indexSettings, NodeEnvironment nodeEnvironment) { + if (indexSettings.isRemoteSnapshot()) { final Path indexCachePath = nodeEnvironment.fileCacheNodePath().fileCachePath.resolve(index.getUUID()); if (Files.exists(indexCachePath)) { try { IOUtils.rm(indexCachePath); } catch (IOException e) { - log.error(() -> new ParameterizedMessage("Failed to delete cache path for index {}", index), e); + logger.error(() -> new ParameterizedMessage("Failed to delete cache path for index {}", index), e); } } } } - - private static boolean isRemoteSnapshot(Settings settings) { - return IndexModule.Type.REMOTE_SNAPSHOT.match(settings.get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey())); - } } diff --git a/server/src/main/java/org/opensearch/index/store/remote/utils/BlobFetchRequest.java b/server/src/main/java/org/opensearch/index/store/remote/utils/BlobFetchRequest.java index d0508e9c6f4c7..f7e6545b5010e 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/utils/BlobFetchRequest.java +++ b/server/src/main/java/org/opensearch/index/store/remote/utils/BlobFetchRequest.java @@ -12,6 +12,7 @@ import org.apache.lucene.store.FSDirectory; import java.nio.file.Path; +import java.util.List; /** * The specification to fetch specific block from blob store @@ -20,37 +21,22 @@ */ public class BlobFetchRequest { - private final long position; - - private final long length; - - private final String blobName; - private final Path filePath; private final Directory directory; private final String fileName; + private final List<BlobPart> blobParts; + + private final long blobLength; + private BlobFetchRequest(Builder builder) { - this.position = builder.position; - this.length = builder.length; - this.blobName = builder.blobName; this.fileName = builder.fileName; this.filePath = builder.directory.getDirectory().resolve(fileName); this.directory = builder.directory; - } - - public long getPosition() { - return position; - } - - public long getLength() { - return length; - } - - public String getBlobName() { - return blobName; + this.blobParts = builder.blobParts; + this.blobLength = builder.blobParts.stream().mapToLong(o -> o.getLength()).sum(); } public Path getFilePath() { @@ -65,6 +51,14 @@ public String getFileName() { return fileName; } + public List<BlobPart> blobParts() { + return blobParts; + } + + public long getBlobLength() { + return blobLength; + } + public static Builder builder() { return new Builder(); } @@ -72,12 +66,8 @@ public static Builder builder() { @Override public String toString() { return "BlobFetchRequest{" - + "position=" - + position - + ", length=" - + length - + ", blobName='" - + blobName + + "blobParts=" + + blobParts + '\'' + ", filePath=" + filePath @@ -90,35 +80,45 @@ public String toString() { } /** - * Builder for BlobFetchRequest + * BlobPart represents a single chunk of a file */ - public static final class Builder { + public static class BlobPart { + private String blobName; private long position; private long length; - private String blobName; - private FSDirectory directory; - private String fileName; - - private Builder() {} - public Builder position(long position) { - this.position = position; - return this; - } - - public Builder length(long length) { + public BlobPart(String blobName, long position, long length) { + this.blobName = blobName; if (length <= 0) { - throw new IllegalArgumentException("Length for blob fetch request needs to be non-negative"); + throw new IllegalArgumentException("Length for blob part fetch request needs to be non-negative"); } this.length = length; - return this; + this.position = position; } - public Builder blobName(String blobName) { - this.blobName = blobName; - return this; + public String getBlobName() { + return blobName; + } + + public long getPosition() { + return position; } + public long getLength() { + return length; + } + } + + /** + * Builder for BlobFetchRequest + */ + public static final class Builder { + private List<BlobPart> blobParts; + private FSDirectory directory; + private String fileName; + + private Builder() {} + public Builder directory(FSDirectory directory) { this.directory = directory; return this; @@ -129,6 +129,11 @@ public Builder fileName(String fileName) { return this; } + public Builder blobParts(List<BlobPart> blobParts) { + this.blobParts = blobParts; + return this; + } + public BlobFetchRequest build() { return new BlobFetchRequest(this); } diff --git a/server/src/main/java/org/opensearch/index/store/remote/utils/TransferManager.java b/server/src/main/java/org/opensearch/index/store/remote/utils/TransferManager.java index c9469283ee921..98cad7bfadb09 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/utils/TransferManager.java +++ b/server/src/main/java/org/opensearch/index/store/remote/utils/TransferManager.java @@ -48,11 +48,12 @@ public TransferManager(final BlobContainer blobContainer, final FileCache fileCa } /** - * Given a blobFetchRequest, return it's corresponding IndexInput. + * Given a blobFetchRequestList, return it's corresponding IndexInput. * @param blobFetchRequest to fetch * @return future of IndexInput augmented with internal caching maintenance tasks */ public IndexInput fetchBlob(BlobFetchRequest blobFetchRequest) throws IOException { + final Path key = blobFetchRequest.getFilePath(); final CachedIndexInput cacheEntry = fileCache.compute(key, (path, cachedIndexInput) -> { @@ -75,6 +76,7 @@ public IndexInput fetchBlob(BlobFetchRequest blobFetchRequest) throws IOExceptio } } + @SuppressWarnings("removal") private static FileCachedIndexInput createIndexInput(FileCache fileCache, BlobContainer blobContainer, BlobFetchRequest request) { // We need to do a privileged action here in order to fetch from remote // and write to the local file cache in case this is invoked as a side @@ -84,15 +86,20 @@ private static FileCachedIndexInput createIndexInput(FileCache fileCache, BlobCo try { if (Files.exists(request.getFilePath()) == false) { try ( - InputStream snapshotFileInputStream = blobContainer.readBlob( - request.getBlobName(), - request.getPosition(), - request.getLength() - ); OutputStream fileOutputStream = Files.newOutputStream(request.getFilePath()); OutputStream localFileOutputStream = new BufferedOutputStream(fileOutputStream) ) { - snapshotFileInputStream.transferTo(localFileOutputStream); + for (BlobFetchRequest.BlobPart blobPart : request.blobParts()) { + try ( + InputStream snapshotFileInputStream = blobContainer.readBlob( + blobPart.getBlobName(), + blobPart.getPosition(), + blobPart.getLength() + ); + ) { + snapshotFileInputStream.transferTo(localFileOutputStream); + } + } } } final IndexInput luceneIndexInput = request.getDirectory().openInput(request.getFileName(), IOContext.READ); @@ -152,7 +159,7 @@ public IndexInput getIndexInput() throws IOException { @Override public long length() { - return request.getLength(); + return request.getBlobLength(); } @Override diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/FileTransferTracker.java b/server/src/main/java/org/opensearch/index/translog/transfer/FileTransferTracker.java index 9c2304f809f46..f3c17cdaa0054 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/FileTransferTracker.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/FileTransferTracker.java @@ -8,6 +8,8 @@ package org.opensearch.index.translog.transfer; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.logging.Loggers; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; @@ -33,11 +35,13 @@ public class FileTransferTracker implements FileTransferListener { private final RemoteTranslogTransferTracker remoteTranslogTransferTracker; private Map<String, Long> bytesForTlogCkpFileToUpload; private long fileTransferStartTime = -1; + private final Logger logger; public FileTransferTracker(ShardId shardId, RemoteTranslogTransferTracker remoteTranslogTransferTracker) { this.shardId = shardId; this.fileTransferTracker = new ConcurrentHashMap<>(); this.remoteTranslogTransferTracker = remoteTranslogTransferTracker; + this.logger = Loggers.getLogger(getClass(), shardId); } void recordFileTransferStartTime(long uploadStartTime) { @@ -64,9 +68,14 @@ long getTotalBytesToUpload() { @Override public void onSuccess(TransferFileSnapshot fileSnapshot) { - long durationInMillis = (System.nanoTime() - fileTransferStartTime) / 1_000_000L; - remoteTranslogTransferTracker.addUploadTimeInMillis(durationInMillis); - remoteTranslogTransferTracker.addUploadBytesSucceeded(bytesForTlogCkpFileToUpload.get(fileSnapshot.getName())); + try { + long durationInMillis = (System.nanoTime() - fileTransferStartTime) / 1_000_000L; + remoteTranslogTransferTracker.addUploadTimeInMillis(durationInMillis); + remoteTranslogTransferTracker.addUploadBytesSucceeded(bytesForTlogCkpFileToUpload.get(fileSnapshot.getName())); + } catch (Exception ex) { + logger.error("Failure to update translog upload success stats", ex); + } + add(fileSnapshot.getName(), TransferState.SUCCESS); } diff --git a/server/src/main/java/org/opensearch/indices/IRCKeyWriteableSerializer.java b/server/src/main/java/org/opensearch/indices/IRCKeyWriteableSerializer.java new file mode 100644 index 0000000000000..781f5765d8da8 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/IRCKeyWriteableSerializer.java @@ -0,0 +1,64 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices; + +import org.opensearch.OpenSearchException; +import org.opensearch.common.cache.serializer.Serializer; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.BytesStreamInput; + +import java.io.IOException; +import java.util.Arrays; + +/** + * This class serializes the IndicesRequestCache.Key using its writeTo method. + */ +public class IRCKeyWriteableSerializer implements Serializer<IndicesRequestCache.Key, byte[]> { + + public IRCKeyWriteableSerializer() {} + + @Override + public byte[] serialize(IndicesRequestCache.Key object) { + if (object == null) { + return null; + } + try { + BytesStreamOutput os = new BytesStreamOutput(); + object.writeTo(os); + return BytesReference.toBytes(os.bytes()); + } catch (IOException e) { + throw new OpenSearchException("Unable to serialize IndicesRequestCache.Key", e); + } + } + + @Override + public IndicesRequestCache.Key deserialize(byte[] bytes) { + if (bytes == null) { + return null; + } + try { + BytesStreamInput is = new BytesStreamInput(bytes, 0, bytes.length); + return new IndicesRequestCache.Key(is); + } catch (IOException e) { + throw new OpenSearchException("Unable to deserialize byte[] to IndicesRequestCache.Key", e); + } + } + + @Override + public boolean equals(IndicesRequestCache.Key object, byte[] bytes) { + // Deserialization is much slower than serialization for keys of order 1 KB, + // while time to serialize is fairly constant (per byte) + if (bytes.length < 5000) { + return Arrays.equals(serialize(object), bytes); + } else { + return object.equals(deserialize(bytes)); + } + } +} diff --git a/server/src/main/java/org/opensearch/indices/IndicesModule.java b/server/src/main/java/org/opensearch/indices/IndicesModule.java index eea5dbbf57f6c..b86e98f4ebcbc 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesModule.java +++ b/server/src/main/java/org/opensearch/indices/IndicesModule.java @@ -81,6 +81,7 @@ import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.store.IndicesStore; import org.opensearch.indices.store.TransportNodesListShardStoreMetadata; +import org.opensearch.indices.store.TransportNodesListShardStoreMetadataBatch; import org.opensearch.plugins.MapperPlugin; import java.util.ArrayList; @@ -281,6 +282,7 @@ protected void configure() { bind(IndicesStore.class).asEagerSingleton(); bind(IndicesClusterStateService.class).asEagerSingleton(); bind(TransportNodesListShardStoreMetadata.class).asEagerSingleton(); + bind(TransportNodesListShardStoreMetadataBatch.class).asEagerSingleton(); bind(GlobalCheckpointSyncAction.class).asEagerSingleton(); bind(TransportResyncReplicationAction.class).asEagerSingleton(); bind(PrimaryReplicaSyncer.class).asEagerSingleton(); diff --git a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java index 6d5c23274dbd6..34c8d6cf5e840 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java +++ b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java @@ -38,16 +38,23 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.RamUsageEstimator; +import org.opensearch.OpenSearchParseException; import org.opensearch.common.CheckedSupplier; -import org.opensearch.common.cache.Cache; -import org.opensearch.common.cache.CacheBuilder; -import org.opensearch.common.cache.CacheLoader; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.ICache; +import org.opensearch.common.cache.LoadAwareCacheLoader; import org.opensearch.common.cache.RemovalListener; import org.opensearch.common.cache.RemovalNotification; +import org.opensearch.common.cache.policy.CachedQueryResult; +import org.opensearch.common.cache.serializer.BytesReferenceSerializer; +import org.opensearch.common.cache.service.CacheService; +import org.opensearch.common.cache.store.config.CacheConfig; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.RatioValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.core.common.bytes.BytesReference; @@ -57,18 +64,25 @@ import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.IndexShard; +import org.opensearch.threadpool.ThreadPool; import java.io.Closeable; import java.io.IOException; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Function; +import java.util.function.ToLongBiFunction; + +import static org.opensearch.indices.IndicesService.INDICES_CACHE_CLEAN_INTERVAL_SETTING; /** * The indices request cache allows to cache a shard level request stage responses, helping with improving @@ -109,46 +123,93 @@ public final class IndicesRequestCache implements RemovalListener<IndicesRequest new TimeValue(0), Property.NodeScope ); + public static final Setting<TimeValue> INDICES_REQUEST_CACHE_CLEAN_INTERVAL_SETTING = Setting.positiveTimeSetting( + "indices.requests.cache.cleanup.interval", + INDICES_CACHE_CLEAN_INTERVAL_SETTING, + Property.NodeScope + ); + public static final Setting<String> INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING = new Setting<>( + "indices.requests.cache.cleanup.staleness_threshold", + "0%", + IndicesRequestCache::validateStalenessSetting, + Property.NodeScope + ); private final static long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(Key.class); private final ConcurrentMap<CleanupKey, Boolean> registeredClosedListeners = ConcurrentCollections.newConcurrentMap(); - private final Set<CleanupKey> keysToClean = ConcurrentCollections.newConcurrentSet(); private final ByteSizeValue size; private final TimeValue expire; - private final Cache<Key, BytesReference> cache; + private final ICache<Key, BytesReference> cache; private final Function<ShardId, Optional<CacheEntity>> cacheEntityLookup; - - IndicesRequestCache(Settings settings, Function<ShardId, Optional<CacheEntity>> cacheEntityFunction) { + // pkg-private for testing + final IndicesRequestCacheCleanupManager cacheCleanupManager; + + IndicesRequestCache( + Settings settings, + Function<ShardId, Optional<CacheEntity>> cacheEntityFunction, + CacheService cacheService, + ThreadPool threadPool + ) { this.size = INDICES_CACHE_QUERY_SIZE.get(settings); this.expire = INDICES_CACHE_QUERY_EXPIRE.exists(settings) ? INDICES_CACHE_QUERY_EXPIRE.get(settings) : null; long sizeInBytes = size.getBytes(); - CacheBuilder<Key, BytesReference> cacheBuilder = CacheBuilder.<Key, BytesReference>builder() - .setMaximumWeight(sizeInBytes) - .weigher((k, v) -> k.ramBytesUsed() + v.ramBytesUsed()) - .removalListener(this); - if (expire != null) { - cacheBuilder.setExpireAfterAccess(expire); - } - cache = cacheBuilder.build(); + ToLongBiFunction<Key, BytesReference> weigher = (k, v) -> k.ramBytesUsed() + v.ramBytesUsed(); + this.cacheCleanupManager = new IndicesRequestCacheCleanupManager( + threadPool, + INDICES_REQUEST_CACHE_CLEAN_INTERVAL_SETTING.get(settings), + getStalenessThreshold(settings) + ); this.cacheEntityLookup = cacheEntityFunction; + this.cache = cacheService.createCache( + new CacheConfig.Builder<Key, BytesReference>().setSettings(settings) + .setWeigher(weigher) + .setValueType(BytesReference.class) + .setKeyType(Key.class) + .setRemovalListener(this) + .setMaxSizeInBytes(sizeInBytes) // for backward compatibility + .setExpireAfterAccess(expire) // for backward compatibility + .setCachedResultParser((bytesReference) -> { + try { + return CachedQueryResult.getPolicyValues(bytesReference); + } catch (IOException e) { + // Set took time to -1, which will always be rejected by the policy. + return new CachedQueryResult.PolicyValues(-1); + } + }) + .setKeySerializer(new IRCKeyWriteableSerializer()) + .setValueSerializer(new BytesReferenceSerializer()) + .build(), + CacheType.INDICES_REQUEST_CACHE + ); } @Override - public void close() { + public void close() throws IOException { cache.invalidateAll(); + cache.close(); + cacheCleanupManager.close(); + } + + private double getStalenessThreshold(Settings settings) { + String threshold = INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING.get(settings); + return RatioValue.parseRatioValue(threshold).getAsRatio(); } void clear(CacheEntity entity) { - keysToClean.add(new CleanupKey(entity, null)); - cleanCache(); + cacheCleanupManager.enqueueCleanupKey(new CleanupKey(entity, null)); + cacheCleanupManager.forceCleanCache(); } @Override public void onRemoval(RemovalNotification<Key, BytesReference> notification) { // In case this event happens for an old shard, we can safely ignore this as we don't keep track for old // shards as part of request cache. - cacheEntityLookup.apply(notification.getKey().shardId).ifPresent(entity -> entity.onRemoval(notification)); + Key key = notification.getKey(); + cacheEntityLookup.apply(key.shardId).ifPresent(entity -> entity.onRemoval(notification)); + cacheCleanupManager.updateCleanupKeyToCountMapOnCacheEviction( + new CleanupKey(cacheEntityLookup.apply(key.shardId).orElse(null), key.readerCacheKeyId) + ); } BytesReference getOrCompute( @@ -169,7 +230,7 @@ BytesReference getOrCompute( BytesReference value = cache.computeIfAbsent(key, cacheLoader); if (cacheLoader.isLoaded()) { cacheEntity.onMiss(); - // see if its the first time we see this reader, and make sure to register a cleanup key + // see if it's the first time we see this reader, and make sure to register a cleanup key CleanupKey cleanupKey = new CleanupKey(cacheEntity, readerCacheKeyId); if (!registeredClosedListeners.containsKey(cleanupKey)) { Boolean previous = registeredClosedListeners.putIfAbsent(cleanupKey, Boolean.TRUE); @@ -177,6 +238,7 @@ BytesReference getOrCompute( OpenSearchDirectoryReader.addReaderCloseListener(reader, cleanupKey); } } + cacheCleanupManager.updateCleanupKeyToCountMapOnCacheInsertion(cleanupKey); } else { cacheEntity.onHit(); } @@ -204,7 +266,7 @@ void invalidate(IndicesService.IndexShardCacheEntity cacheEntity, DirectoryReade * * @opensearch.internal */ - private static class Loader implements CacheLoader<Key, BytesReference> { + private static class Loader implements LoadAwareCacheLoader<Key, BytesReference> { private final CacheEntity entity; private final CheckedSupplier<BytesReference, IOException> loader; @@ -338,9 +400,11 @@ private CleanupKey(CacheEntity entity, String readerCacheKeyId) { @Override public void onClose(IndexReader.CacheKey cacheKey) { - Boolean remove = registeredClosedListeners.remove(this); - if (remove != null) { - keysToClean.add(this); + // Remove the current CleanupKey from the registeredClosedListeners map + // If the key was present, enqueue it for cleanup + Boolean wasRegistered = registeredClosedListeners.remove(this); + if (wasRegistered != null) { + cacheCleanupManager.enqueueCleanupKey(this); } } @@ -364,50 +428,312 @@ public int hashCode() { } } - /** - * Logic to clean up in-memory cache. - */ - synchronized void cleanCache() { - final Set<CleanupKey> currentKeysToClean = new HashSet<>(); - final Set<Object> currentFullClean = new HashSet<>(); - currentKeysToClean.clear(); - currentFullClean.clear(); - for (Iterator<CleanupKey> iterator = keysToClean.iterator(); iterator.hasNext();) { - CleanupKey cleanupKey = iterator.next(); - iterator.remove(); - if (cleanupKey.readerCacheKeyId == null || !cleanupKey.entity.isOpen()) { - // null indicates full cleanup, as does a closed shard - currentFullClean.add(((IndexShard) cleanupKey.entity.getCacheIdentity()).shardId()); - } else { - currentKeysToClean.add(cleanupKey); + /* + * The IndicesRequestCacheCleanupManager manages the cleanup of stale keys in IndicesRequestCache. + * + * It also keeps track of the number of stale keys in the cache (staleKeysCount) and a staleness threshold, + * which is used to determine when the cache should be cleaned. + * + * If Staleness threshold is 0, we do not keep track of stale keys in the cache + * */ + class IndicesRequestCacheCleanupManager implements Closeable { + private final Set<CleanupKey> keysToClean; + private final ConcurrentMap<ShardId, HashMap<String, Integer>> cleanupKeyToCountMap; + private final AtomicInteger staleKeysCount; + private final double stalenessThreshold; + private final IndicesRequestCacheCleaner cacheCleaner; + + IndicesRequestCacheCleanupManager(ThreadPool threadpool, TimeValue cleanInterval, double stalenessThreshold) { + this.stalenessThreshold = stalenessThreshold; + this.keysToClean = ConcurrentCollections.newConcurrentSet(); + this.cleanupKeyToCountMap = ConcurrentCollections.newConcurrentMap(); + this.staleKeysCount = new AtomicInteger(0); + this.cacheCleaner = new IndicesRequestCacheCleaner(this, threadpool, cleanInterval); + threadpool.schedule(cacheCleaner, cleanInterval, ThreadPool.Names.SAME); + } + + /** + * Enqueue cleanup key. + * + * @param cleanupKey the cleanup key + */ + void enqueueCleanupKey(CleanupKey cleanupKey) { + keysToClean.add(cleanupKey); + incrementStaleKeysCount(cleanupKey); + } + + /** + * Updates the cleanupKeyToCountMap with the given CleanupKey. + * If the ShardId associated with the CleanupKey does not exist in the map, a new entry is created. + * The method increments the count of the CleanupKey in the map. + * <p> + * Why use ShardID as the key ? + * CacheEntity mainly contains IndexShard, both of these classes do not override equals() and hashCode() methods. + * ShardID class properly overrides equals() and hashCode() methods. + * Therefore, to avoid modifying CacheEntity and IndexShard classes to override these methods, we use ShardID as the key. + * + * @param cleanupKey the CleanupKey to be updated in the map + */ + private void updateCleanupKeyToCountMapOnCacheInsertion(CleanupKey cleanupKey) { + if (stalenessThreshold == 0.0 || cleanupKey.entity == null) { + return; + } + IndexShard indexShard = (IndexShard) cleanupKey.entity.getCacheIdentity(); + if (indexShard == null) { + logger.warn("IndexShard is null for CleanupKey: {} while cleaning Indices Request Cache", cleanupKey.readerCacheKeyId); + return; } + ShardId shardId = indexShard.shardId(); + + // If the key doesn't exist, it's added with a value of 1. + // If the key exists, its value is incremented by 1. + cleanupKeyToCountMap.computeIfAbsent(shardId, k -> new HashMap<>()).merge(cleanupKey.readerCacheKeyId, 1, Integer::sum); + } + + private void updateCleanupKeyToCountMapOnCacheEviction(CleanupKey cleanupKey) { + if (stalenessThreshold == 0.0 || cleanupKey.entity == null) { + return; + } + IndexShard indexShard = (IndexShard) cleanupKey.entity.getCacheIdentity(); + if (indexShard == null) { + logger.warn("IndexShard is null for CleanupKey: {} while cleaning Indices Request Cache", cleanupKey.readerCacheKeyId); + return; + } + ShardId shardId = indexShard.shardId(); + + cleanupKeyToCountMap.computeIfPresent(shardId, (shard, keyCountMap) -> { + keyCountMap.computeIfPresent(cleanupKey.readerCacheKeyId, (key, currentValue) -> { + // decrement the stale key count + staleKeysCount.decrementAndGet(); + int newValue = currentValue - 1; + // Remove the key if the new value is zero by returning null; otherwise, update with the new value. + return newValue == 0 ? null : newValue; + }); + return keyCountMap; + }); + } + + /** + * Updates the count of stale keys in the cache. + * This method is called when a CleanupKey is added to the keysToClean set. + * + * It increments the staleKeysCount by the count of the CleanupKey in the cleanupKeyToCountMap. + * If the CleanupKey's readerCacheKeyId is null or the CleanupKey's entity is not open, it increments the staleKeysCount + * by the total count of keys associated with the CleanupKey's ShardId in the cleanupKeyToCountMap and removes the ShardId from the map. + * + * @param cleanupKey the CleanupKey that has been marked for cleanup + */ + private void incrementStaleKeysCount(CleanupKey cleanupKey) { + if (stalenessThreshold == 0.0 || cleanupKey.entity == null) { + return; + } + IndexShard indexShard = (IndexShard) cleanupKey.entity.getCacheIdentity(); + if (indexShard == null) { + logger.warn("IndexShard is null for CleanupKey: {}", cleanupKey.readerCacheKeyId); + return; + } + ShardId shardId = indexShard.shardId(); + + // Using computeIfPresent to atomically operate on the countMap for a given shardId + cleanupKeyToCountMap.computeIfPresent(shardId, (key, countMap) -> { + if (cleanupKey.readerCacheKeyId == null) { + // Aggregate and add to staleKeysCount atomically if readerCacheKeyId is null + int totalSum = countMap.values().stream().mapToInt(Integer::intValue).sum(); + staleKeysCount.addAndGet(totalSum); + // Return null to automatically remove the mapping for shardId + return null; + } else { + // Update staleKeysCount based on specific readerCacheKeyId, then remove it from the countMap + countMap.computeIfPresent(cleanupKey.readerCacheKeyId, (k, v) -> { + staleKeysCount.addAndGet(v); + // Return null to remove the key after updating staleKeysCount + return null; + }); + + // Check if countMap is empty after removal to decide if we need to remove the shardId entry + if (countMap.isEmpty()) { + return null; // Returning null removes the entry for shardId + } + } + return countMap; // Return the modified countMap to keep the mapping + }); + } + + // package private for testing + AtomicInteger getStaleKeysCount() { + return staleKeysCount; + } + + /** + * Clean cache based on stalenessThreshold + */ + void cleanCache() { + cleanCache(stalenessThreshold); + } + + /** + * Force Clean cache without checking stalenessThreshold + */ + private void forceCleanCache() { + cleanCache(0); } - if (!currentKeysToClean.isEmpty() || !currentFullClean.isEmpty()) { + + /** + * Cleans the cache based on the provided staleness threshold. + * <p>If the percentage of stale keys in the cache is less than this threshold,the cache cleanup process is skipped. + * @param stalenessThreshold The staleness threshold as a double. + */ + private synchronized void cleanCache(double stalenessThreshold) { + if (logger.isDebugEnabled()) { + logger.debug("Cleaning Indices Request Cache with threshold : " + stalenessThreshold); + } + if (canSkipCacheCleanup(stalenessThreshold)) { + return; + } + // Contains CleanupKey objects with open shard but invalidated readerCacheKeyId. + final Set<CleanupKey> cleanupKeysFromOutdatedReaders = new HashSet<>(); + // Contains CleanupKey objects of a closed shard. + final Set<Object> cleanupKeysFromClosedShards = new HashSet<>(); + + for (Iterator<CleanupKey> iterator = keysToClean.iterator(); iterator.hasNext();) { + CleanupKey cleanupKey = iterator.next(); + iterator.remove(); + if (cleanupKey.readerCacheKeyId == null || !cleanupKey.entity.isOpen()) { + // null indicates full cleanup, as does a closed shard + cleanupKeysFromClosedShards.add(((IndexShard) cleanupKey.entity.getCacheIdentity()).shardId()); + } else { + cleanupKeysFromOutdatedReaders.add(cleanupKey); + } + } + + if (cleanupKeysFromOutdatedReaders.isEmpty() && cleanupKeysFromClosedShards.isEmpty()) { + return; + } + for (Iterator<Key> iterator = cache.keys().iterator(); iterator.hasNext();) { Key key = iterator.next(); - if (currentFullClean.contains(key.shardId)) { + if (cleanupKeysFromClosedShards.contains(key.shardId)) { iterator.remove(); } else { - // If the flow comes here, then we should have a open shard available on node. - if (currentKeysToClean.contains( - new CleanupKey(cacheEntityLookup.apply(key.shardId).orElse(null), key.readerCacheKeyId) - )) { + CleanupKey cleanupKey = new CleanupKey(cacheEntityLookup.apply(key.shardId).orElse(null), key.readerCacheKeyId); + if (cleanupKeysFromOutdatedReaders.contains(cleanupKey)) { iterator.remove(); } } } + cache.refresh(); + } + + /** + * Determines whether the cache cleanup process can be skipped based on the staleness threshold. + * + * <p>If the percentage of stale keys is less than the provided staleness threshold returns true, + * indicating that the cache cleanup process can be skipped. + * + * @param cleanThresholdPercent The staleness threshold as a percentage. + * @return true if the cache cleanup process can be skipped, false otherwise. + */ + private synchronized boolean canSkipCacheCleanup(double cleanThresholdPercent) { + if (cleanThresholdPercent == 0.0) { + return false; + } + double staleKeysInCachePercentage = staleKeysInCachePercentage(); + if (staleKeysInCachePercentage < cleanThresholdPercent) { + if (logger.isDebugEnabled()) { + logger.debug( + "Skipping Indices Request cache cleanup since the percentage of stale keys : " + + staleKeysInCachePercentage + + " is less than the threshold : " + + stalenessThreshold + ); + } + return true; + } + return false; + } + + /** + * Calculates and returns the percentage of stale keys in the cache. + * + * @return The percentage of stale keys in the cache as a double. Returns 0 if there are no keys in the cache or no stale keys. + */ + private synchronized double staleKeysInCachePercentage() { + long totalKeysInCache = count(); + if (totalKeysInCache == 0 || staleKeysCount.get() == 0) { + return 0; + } + return ((double) staleKeysCount.get() / totalKeysInCache); + } + + @Override + public void close() { + this.cacheCleaner.close(); + } + + private final class IndicesRequestCacheCleaner implements Runnable, Releasable { + + private final IndicesRequestCacheCleanupManager cacheCleanupManager; + private final ThreadPool threadPool; + private final TimeValue interval; + + IndicesRequestCacheCleaner(IndicesRequestCacheCleanupManager cacheCleanupManager, ThreadPool threadPool, TimeValue interval) { + this.cacheCleanupManager = cacheCleanupManager; + this.threadPool = threadPool; + this.interval = interval; + } + + private final AtomicBoolean closed = new AtomicBoolean(false); + + @Override + public void run() { + try { + this.cacheCleanupManager.cleanCache(); + } catch (Exception e) { + logger.warn("Exception during periodic indices request cache cleanup:", e); + } + // Reschedule itself to run again if not closed + if (closed.get() == false) { + threadPool.scheduleUnlessShuttingDown(interval, ThreadPool.Names.SAME, this); + } + } + + @Override + public void close() { + closed.compareAndSet(false, true); + } } - cache.refresh(); } /** * Returns the current size of the cache */ - int count() { + long count() { return cache.count(); } int numRegisteredCloseListeners() { // for testing return registeredClosedListeners.size(); } + + /** + * Validates the staleness setting for the cache cleanup threshold. + * + * <p>This method checks if the provided staleness threshold is a valid percentage or a valid double value. + * If the staleness threshold is not valid, it throws an OpenSearchParseException. + * + * @param staleThreshold The staleness threshold to validate. + * @return The validated staleness threshold. + * @throws OpenSearchParseException If the staleness threshold is not a valid percentage or double value. + * + * <p>package private for testing + */ + static String validateStalenessSetting(String staleThreshold) { + try { + RatioValue.parseRatioValue(staleThreshold); + } catch (OpenSearchParseException e) { + e.addSuppressed(e); + throw e; + } + return staleThreshold; + } } diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index db5b93f073b03..40c10e3a2fe96 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -62,6 +62,8 @@ import org.opensearch.common.CheckedSupplier; import org.opensearch.common.Nullable; import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.cache.policy.CachedQueryResult; +import org.opensearch.common.cache.service.CacheService; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; @@ -81,9 +83,7 @@ import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.core.common.breaker.CircuitBreaker; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; @@ -135,7 +135,6 @@ import org.opensearch.index.shard.IndexingOperationListener; import org.opensearch.index.shard.IndexingStats; import org.opensearch.index.shard.IndexingStats.Stats.DocStatusStats; -import org.opensearch.index.store.remote.filecache.FileCacheCleaner; import org.opensearch.index.translog.InternalTranslogFactory; import org.opensearch.index.translog.RemoteBlobStoreInternalTranslogFactory; import org.opensearch.index.translog.TranslogFactory; @@ -362,7 +361,6 @@ public class IndicesService extends AbstractLifecycleComponent private final BiFunction<IndexSettings, ShardRouting, TranslogFactory> translogFactorySupplier; private volatile TimeValue clusterDefaultRefreshInterval; private volatile TimeValue clusterRemoteTranslogBufferInterval; - private final FileCacheCleaner fileCacheCleaner; private final SearchRequestStats searchRequestStats; @@ -395,10 +393,10 @@ public IndicesService( Map<String, IndexStorePlugin.RecoveryStateFactory> recoveryStateFactories, IndexStorePlugin.DirectoryFactory remoteDirectoryFactory, Supplier<RepositoriesService> repositoriesServiceSupplier, - FileCacheCleaner fileCacheCleaner, SearchRequestStats searchRequestStats, @Nullable RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, - RecoverySettings recoverySettings + RecoverySettings recoverySettings, + CacheService cacheService ) { this.settings = settings; this.threadPool = threadPool; @@ -415,7 +413,7 @@ public IndicesService( return Optional.empty(); } return Optional.of(new IndexShardCacheEntity(indexService.getShard(shardId.id()))); - })); + }), cacheService, threadPool); this.indicesQueryCache = new IndicesQueryCache(settings); this.mapperRegistry = mapperRegistry; this.namedWriteableRegistry = namedWriteableRegistry; @@ -444,13 +442,12 @@ public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, lon } }); this.cleanInterval = INDICES_CACHE_CLEAN_INTERVAL_SETTING.get(settings); - this.cacheCleaner = new CacheCleaner(indicesFieldDataCache, indicesRequestCache, logger, threadPool, this.cleanInterval); + this.cacheCleaner = new CacheCleaner(indicesFieldDataCache, logger, threadPool, this.cleanInterval); this.metaStateService = metaStateService; this.engineFactoryProviders = engineFactoryProviders; this.directoryFactories = directoryFactories; this.recoveryStateFactories = recoveryStateFactories; - this.fileCacheCleaner = fileCacheCleaner; // doClose() is called when shutting down a node, yet there might still be ongoing requests // that we need to wait for before closing some resources such as the caches. In order to // avoid closing these resources while ongoing requests are still being processed, we use a @@ -766,7 +763,6 @@ public void onStoreClosed(ShardId shardId) { }; finalListeners.add(onStoreClose); finalListeners.add(oldShardsStats); - finalListeners.add(fileCacheCleaner); final IndexService indexService = createIndexService( CREATE_INDEX, indexMetadata, @@ -1590,17 +1586,9 @@ private static final class CacheCleaner implements Runnable, Releasable { private final ThreadPool threadPool; private final TimeValue interval; private final AtomicBoolean closed = new AtomicBoolean(false); - private final IndicesRequestCache requestCache; - - CacheCleaner( - IndicesFieldDataCache cache, - IndicesRequestCache requestCache, - Logger logger, - ThreadPool threadPool, - TimeValue interval - ) { + + CacheCleaner(IndicesFieldDataCache cache, Logger logger, ThreadPool threadPool, TimeValue interval) { this.cache = cache; - this.requestCache = requestCache; this.logger = logger; this.threadPool = threadPool; this.interval = interval; @@ -1623,12 +1611,6 @@ public void run() { TimeValue.nsecToMSec(System.nanoTime() - startTimeNS) ); } - - try { - this.requestCache.cleanCache(); - } catch (Exception e) { - logger.warn("Exception during periodic request cache cleanup:", e); - } // Reschedule itself to run again if not closed if (closed.get() == false) { threadPool.scheduleUnlessShuttingDown(interval, ThreadPool.Names.SAME, this); @@ -1702,16 +1684,20 @@ public void loadIntoContext(ShardSearchRequest request, SearchContext context, Q boolean[] loadedFromCache = new boolean[] { true }; BytesReference bytesReference = cacheShardLevelResult(context.indexShard(), directoryReader, request.cacheKey(), out -> { + long beforeQueryPhase = System.nanoTime(); queryPhase.execute(context); - context.queryResult().writeToNoId(out); + // Write relevant info for cache tier policies before the whole QuerySearchResult, so we don't have to read + // the whole QSR into memory when we decide whether to allow it into a particular cache tier based on took time/other info + CachedQueryResult cachedQueryResult = new CachedQueryResult(context.queryResult(), System.nanoTime() - beforeQueryPhase); + cachedQueryResult.writeToNoId(out); loadedFromCache[0] = false; }); if (loadedFromCache[0]) { // restore the cached query result into the context final QuerySearchResult result = context.queryResult(); - StreamInput in = new NamedWriteableAwareStreamInput(bytesReference.streamInput(), namedWriteableRegistry); - result.readFromWithId(context.id(), in); + // Load the cached QSR into result, discarding values used only in the cache + CachedQueryResult.loadQSR(bytesReference, result, context.id(), namedWriteableRegistry); result.setSearchShardTarget(context.shardTarget()); } else if (context.queryResult().searchTimedOut()) { // we have to invalidate the cache entry if we cached a query result form a request that timed out. diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java b/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java index 5351ae7fe08dd..2b41eb125d808 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java @@ -46,6 +46,8 @@ import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.common.unit.ByteSizeValue; +import java.util.concurrent.TimeUnit; + /** * Settings for the recovery mechanism * @@ -176,6 +178,13 @@ public class RecoverySettings { Property.Dynamic ); + public static final Setting<TimeValue> INDICES_INTERNAL_REMOTE_UPLOAD_TIMEOUT = Setting.timeSetting( + "indices.recovery.internal_remote_upload_timeout", + new TimeValue(1, TimeUnit.HOURS), + Property.Dynamic, + Property.NodeScope + ); + // choose 512KB-16B to ensure that the resulting byte[] is not a humongous allocation in G1. public static final ByteSizeValue DEFAULT_CHUNK_SIZE = new ByteSizeValue(512 * 1024 - 16, ByteSizeUnit.BYTES); @@ -193,6 +202,7 @@ public class RecoverySettings { private volatile int minRemoteSegmentMetadataFiles; private volatile ByteSizeValue chunkSize = DEFAULT_CHUNK_SIZE; + private volatile TimeValue internalRemoteUploadTimeout; public RecoverySettings(Settings settings, ClusterSettings clusterSettings) { this.retryDelayStateSync = INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.get(settings); @@ -216,6 +226,7 @@ public RecoverySettings(Settings settings, ClusterSettings clusterSettings) { } logger.debug("using max_bytes_per_sec[{}]", maxBytesPerSec); + this.internalRemoteUploadTimeout = INDICES_INTERNAL_REMOTE_UPLOAD_TIMEOUT.get(settings); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING, this::setMaxBytesPerSec); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_MAX_CONCURRENT_FILE_CHUNKS_SETTING, this::setMaxConcurrentFileChunks); @@ -237,6 +248,8 @@ public RecoverySettings(Settings settings, ClusterSettings clusterSettings) { CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING, this::setMinRemoteSegmentMetadataFiles ); + clusterSettings.addSettingsUpdateConsumer(INDICES_INTERNAL_REMOTE_UPLOAD_TIMEOUT, this::setInternalRemoteUploadTimeout); + } public RateLimiter rateLimiter() { @@ -267,6 +280,10 @@ public TimeValue internalActionLongTimeout() { return internalActionLongTimeout; } + public TimeValue internalRemoteUploadTimeout() { + return internalRemoteUploadTimeout; + } + public ByteSizeValue getChunkSize() { return chunkSize; } @@ -298,6 +315,10 @@ public void setInternalActionLongTimeout(TimeValue internalActionLongTimeout) { this.internalActionLongTimeout = internalActionLongTimeout; } + public void setInternalRemoteUploadTimeout(TimeValue internalRemoteUploadTimeout) { + this.internalRemoteUploadTimeout = internalRemoteUploadTimeout; + } + private void setMaxBytesPerSec(ByteSizeValue maxBytesPerSec) { this.maxBytesPerSec = maxBytesPerSec; if (maxBytesPerSec.getBytes() <= 0) { diff --git a/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java index 02fc8feefd698..a17779810239a 100644 --- a/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java +++ b/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java @@ -8,16 +8,14 @@ package org.opensearch.indices.replication; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; +import org.opensearch.action.ActionListenerResponseHandler; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.core.action.ActionListener; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.recovery.RecoverySettings; -import org.opensearch.indices.recovery.RetryableTransportClient; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportRequestOptions; import org.opensearch.transport.TransportService; @@ -35,9 +33,7 @@ */ public class PrimaryShardReplicationSource implements SegmentReplicationSource { - private static final Logger logger = LogManager.getLogger(PrimaryShardReplicationSource.class); - - private final RetryableTransportClient transportClient; + private final TransportService transportService; private final DiscoveryNode sourceNode; private final DiscoveryNode targetNode; @@ -52,12 +48,7 @@ public PrimaryShardReplicationSource( DiscoveryNode sourceNode ) { this.targetAllocationId = targetAllocationId; - this.transportClient = new RetryableTransportClient( - transportService, - sourceNode, - recoverySettings.internalActionRetryTimeout(), - logger - ); + this.transportService = transportService; this.sourceNode = sourceNode; this.targetNode = targetNode; this.recoverySettings = recoverySettings; @@ -69,10 +60,14 @@ public void getCheckpointMetadata( ReplicationCheckpoint checkpoint, ActionListener<CheckpointInfoResponse> listener ) { - final Writeable.Reader<CheckpointInfoResponse> reader = CheckpointInfoResponse::new; - final ActionListener<CheckpointInfoResponse> responseListener = ActionListener.map(listener, r -> r); final CheckpointInfoRequest request = new CheckpointInfoRequest(replicationId, targetAllocationId, targetNode, checkpoint); - transportClient.executeRetryableAction(GET_CHECKPOINT_INFO, request, responseListener, reader); + transportService.sendRequest( + sourceNode, + GET_CHECKPOINT_INFO, + request, + TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionRetryTimeout()).build(), + new ActionListenerResponseHandler<>(listener, CheckpointInfoResponse::new, ThreadPool.Names.GENERIC) + ); } @Override @@ -88,8 +83,6 @@ public void getSegmentFiles( // MultiFileWriter takes care of progress tracking for downloads in this scenario // TODO: Move state management and tracking into replication methods and use chunking and data // copy mechanisms only from MultiFileWriter - final Writeable.Reader<GetSegmentFilesResponse> reader = GetSegmentFilesResponse::new; - final ActionListener<GetSegmentFilesResponse> responseListener = ActionListener.map(listener, r -> r); final GetSegmentFilesRequest request = new GetSegmentFilesRequest( replicationId, targetAllocationId, @@ -97,20 +90,17 @@ public void getSegmentFiles( filesToFetch, checkpoint ); - final TransportRequestOptions options = TransportRequestOptions.builder() - .withTimeout(recoverySettings.internalActionLongTimeout()) - .build(); - transportClient.executeRetryableAction(GET_SEGMENT_FILES, request, options, responseListener, reader); + transportService.sendRequest( + sourceNode, + GET_SEGMENT_FILES, + request, + TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionLongTimeout()).build(), + new ActionListenerResponseHandler<>(listener, GetSegmentFilesResponse::new, ThreadPool.Names.GENERIC) + ); } @Override public String getDescription() { return sourceNode.getName(); } - - @Override - public void cancel() { - transportClient.cancel(); - } - } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java index cc71ef816e525..af764556b7549 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java @@ -83,6 +83,16 @@ protected void closeInternal() { } } + @Override + protected void onCancel(String reason) { + try { + notifyListener(new ReplicationFailedException(reason), false); + } finally { + source.cancel(); + cancellableThreads.cancel(reason); + } + } + @Override protected String getPrefix() { return REPLICATION_PREFIX + UUIDs.randomBase64UUID() + "."; @@ -320,16 +330,4 @@ private void finalizeReplication(CheckpointInfoResponse checkpointInfoResponse) } } } - - /** - * Trigger a cancellation, this method will not close the target a subsequent call to #fail is required from target service. - */ - @Override - public void cancel(String reason) { - if (finished.get() == false) { - logger.trace(new ParameterizedMessage("Cancelling replication for target {}", description())); - cancellableThreads.cancel(reason); - source.cancel(); - } - } } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java index d6db154a4e0e3..f28f829545d59 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java @@ -84,10 +84,6 @@ public class SegmentReplicationTargetService extends AbstractLifecycleComponent private final ClusterService clusterService; private final TransportService transportService; - public ReplicationRef<SegmentReplicationTarget> get(long replicationId) { - return onGoingReplications.get(replicationId); - } - /** * The internal actions * @@ -158,6 +154,7 @@ protected void doStart() { @Override protected void doStop() { if (DiscoveryNode.isDataNode(clusterService.getSettings())) { + assert onGoingReplications.size() == 0 : "Replication collection should be empty on shutdown"; clusterService.removeListener(this); } } @@ -201,7 +198,7 @@ public void clusterChanged(ClusterChangedEvent event) { @Override public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, Settings indexSettings) { if (indexShard != null && indexShard.indexSettings().isSegRepEnabled()) { - onGoingReplications.requestCancel(indexShard.shardId(), "Shard closing"); + onGoingReplications.cancelForShard(indexShard.shardId(), "Shard closing"); latestReceivedCheckpoint.remove(shardId); } } @@ -223,7 +220,7 @@ public void afterIndexShardStarted(IndexShard indexShard) { @Override public void shardRoutingChanged(IndexShard indexShard, @Nullable ShardRouting oldRouting, ShardRouting newRouting) { if (oldRouting != null && indexShard.indexSettings().isSegRepEnabled() && oldRouting.primary() == false && newRouting.primary()) { - onGoingReplications.requestCancel(indexShard.shardId(), "Shard has been promoted to primary"); + onGoingReplications.cancelForShard(indexShard.shardId(), "Shard has been promoted to primary"); latestReceivedCheckpoint.remove(indexShard.shardId()); } } @@ -255,6 +252,14 @@ public SegmentReplicationState getSegmentReplicationState(ShardId shardId) { .orElseGet(() -> getlatestCompletedEventSegmentReplicationState(shardId)); } + public ReplicationRef<SegmentReplicationTarget> get(long replicationId) { + return onGoingReplications.get(replicationId); + } + + public SegmentReplicationTarget get(ShardId shardId) { + return onGoingReplications.getOngoingReplicationTarget(shardId); + } + /** * Invoked when a new checkpoint is received from a primary shard. * It checks if a new checkpoint should be processed or not and starts replication if needed. @@ -454,7 +459,13 @@ protected boolean processLatestReceivedCheckpoint(IndexShard replicaShard, Threa latestPublishedCheckpoint ) ); - Runnable runnable = () -> onNewCheckpoint(latestReceivedCheckpoint.get(replicaShard.shardId()), replicaShard); + Runnable runnable = () -> { + // if we retry ensure the shard is not in the process of being closed. + // it will be removed from indexService's collection before the shard is actually marked as closed. + if (indicesService.getShardOrNull(replicaShard.shardId()) != null) { + onNewCheckpoint(latestReceivedCheckpoint.get(replicaShard.shardId()), replicaShard); + } + }; // Checks if we are using same thread and forks if necessary. if (thread == Thread.currentThread()) { threadPool.generic().execute(runnable); @@ -548,9 +559,6 @@ public ReplicationRunner(long replicationId) { @Override public void onFailure(Exception e) { - try (final ReplicationRef<SegmentReplicationTarget> ref = onGoingReplications.get(replicationId)) { - logger.error(() -> new ParameterizedMessage("Error during segment replication, {}", ref.get().description()), e); - } onGoingReplications.fail(replicationId, new ReplicationFailedException("Unexpected Error during replication", e), false); } diff --git a/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java b/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java index 5a3c1038cd5f0..eeee5d8a409aa 100644 --- a/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java +++ b/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java @@ -32,7 +32,6 @@ package org.opensearch.indices.store; -import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionType; import org.opensearch.action.FailedNodeException; @@ -42,40 +41,29 @@ import org.opensearch.action.support.nodes.BaseNodesResponse; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.cluster.ClusterName; -import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; import org.opensearch.common.inject.Inject; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.index.shard.ShardId; import org.opensearch.env.NodeEnvironment; import org.opensearch.gateway.AsyncShardFetch; -import org.opensearch.index.IndexService; -import org.opensearch.index.IndexSettings; -import org.opensearch.index.seqno.ReplicationTracker; -import org.opensearch.index.seqno.RetentionLease; -import org.opensearch.index.shard.IndexShard; -import org.opensearch.index.shard.ShardPath; -import org.opensearch.index.store.Store; -import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.store.TransportNodesListShardStoreMetadataHelper.StoreFilesMetadata; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; -import java.util.Collections; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.concurrent.TimeUnit; + +import static org.opensearch.indices.store.TransportNodesListShardStoreMetadataHelper.listShardMetadataInternal; /** * Metadata for shard stores from a list of transport nodes @@ -167,166 +155,7 @@ protected NodeStoreFilesMetadata nodeOperation(NodeRequest request) { private StoreFilesMetadata listStoreMetadata(NodeRequest request) throws IOException { final ShardId shardId = request.getShardId(); - logger.trace("listing store meta data for {}", shardId); - long startTimeNS = System.nanoTime(); - boolean exists = false; - try { - IndexService indexService = indicesService.indexService(shardId.getIndex()); - if (indexService != null) { - IndexShard indexShard = indexService.getShardOrNull(shardId.id()); - if (indexShard != null) { - try { - final StoreFilesMetadata storeFilesMetadata = new StoreFilesMetadata( - shardId, - indexShard.snapshotStoreMetadata(), - indexShard.getPeerRecoveryRetentionLeases() - ); - exists = true; - return storeFilesMetadata; - } catch (org.apache.lucene.index.IndexNotFoundException e) { - logger.trace(new ParameterizedMessage("[{}] node is missing index, responding with empty", shardId), e); - return new StoreFilesMetadata(shardId, Store.MetadataSnapshot.EMPTY, Collections.emptyList()); - } catch (IOException e) { - logger.warn(new ParameterizedMessage("[{}] can't read metadata from store, responding with empty", shardId), e); - return new StoreFilesMetadata(shardId, Store.MetadataSnapshot.EMPTY, Collections.emptyList()); - } - } - } - final String customDataPath; - if (request.getCustomDataPath() != null) { - customDataPath = request.getCustomDataPath(); - } else { - // TODO: Fallback for BWC with older predecessor (ES) versions. - // Remove this once request.getCustomDataPath() always returns non-null - if (indexService != null) { - customDataPath = indexService.getIndexSettings().customDataPath(); - } else { - IndexMetadata metadata = clusterService.state().metadata().index(shardId.getIndex()); - if (metadata != null) { - customDataPath = new IndexSettings(metadata, settings).customDataPath(); - } else { - logger.trace("{} node doesn't have meta data for the requests index", shardId); - throw new OpenSearchException("node doesn't have meta data for index " + shardId.getIndex()); - } - } - } - final ShardPath shardPath = ShardPath.loadShardPath(logger, nodeEnv, shardId, customDataPath); - if (shardPath == null) { - return new StoreFilesMetadata(shardId, Store.MetadataSnapshot.EMPTY, Collections.emptyList()); - } - // note that this may fail if it can't get access to the shard lock. Since we check above there is an active shard, this means: - // 1) a shard is being constructed, which means the cluster-manager will not use a copy of this replica - // 2) A shard is shutting down and has not cleared it's content within lock timeout. In this case the cluster-manager may not - // reuse local resources. - final Store.MetadataSnapshot metadataSnapshot = Store.readMetadataSnapshot( - shardPath.resolveIndex(), - shardId, - nodeEnv::shardLock, - logger - ); - // We use peer recovery retention leases from the primary for allocating replicas. We should always have retention leases when - // we refresh shard info after the primary has started. Hence, we can ignore retention leases if there is no active shard. - return new StoreFilesMetadata(shardId, metadataSnapshot, Collections.emptyList()); - } finally { - TimeValue took = new TimeValue(System.nanoTime() - startTimeNS, TimeUnit.NANOSECONDS); - if (exists) { - logger.debug("{} loaded store meta data (took [{}])", shardId, took); - } else { - logger.trace("{} didn't find any store meta data to load (took [{}])", shardId, took); - } - } - } - - /** - * Metadata for store files - * - * @opensearch.internal - */ - public static class StoreFilesMetadata implements Iterable<StoreFileMetadata>, Writeable { - private final ShardId shardId; - private final Store.MetadataSnapshot metadataSnapshot; - private final List<RetentionLease> peerRecoveryRetentionLeases; - - public StoreFilesMetadata( - ShardId shardId, - Store.MetadataSnapshot metadataSnapshot, - List<RetentionLease> peerRecoveryRetentionLeases - ) { - this.shardId = shardId; - this.metadataSnapshot = metadataSnapshot; - this.peerRecoveryRetentionLeases = peerRecoveryRetentionLeases; - } - - public StoreFilesMetadata(StreamInput in) throws IOException { - this.shardId = new ShardId(in); - this.metadataSnapshot = new Store.MetadataSnapshot(in); - this.peerRecoveryRetentionLeases = in.readList(RetentionLease::new); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - shardId.writeTo(out); - metadataSnapshot.writeTo(out); - out.writeList(peerRecoveryRetentionLeases); - } - - public ShardId shardId() { - return this.shardId; - } - - public boolean isEmpty() { - return metadataSnapshot.size() == 0; - } - - @Override - public Iterator<StoreFileMetadata> iterator() { - return metadataSnapshot.iterator(); - } - - public boolean fileExists(String name) { - return metadataSnapshot.asMap().containsKey(name); - } - - public StoreFileMetadata file(String name) { - return metadataSnapshot.asMap().get(name); - } - - /** - * Returns the retaining sequence number of the peer recovery retention lease for a given node if exists; otherwise, returns -1. - */ - public long getPeerRecoveryRetentionLeaseRetainingSeqNo(DiscoveryNode node) { - assert node != null; - final String retentionLeaseId = ReplicationTracker.getPeerRecoveryRetentionLeaseId(node.getId()); - return peerRecoveryRetentionLeases.stream() - .filter(lease -> lease.id().equals(retentionLeaseId)) - .mapToLong(RetentionLease::retainingSequenceNumber) - .findFirst() - .orElse(-1L); - } - - public List<RetentionLease> peerRecoveryRetentionLeases() { - return peerRecoveryRetentionLeases; - } - - /** - * @return commit sync id if exists, else null - */ - public String syncId() { - return metadataSnapshot.getSyncId(); - } - - @Override - public String toString() { - return "StoreFilesMetadata{" - + ", shardId=" - + shardId - + ", metadataSnapshot{size=" - + metadataSnapshot.size() - + ", syncId=" - + metadataSnapshot.getSyncId() - + "}" - + '}'; - } + return listShardMetadataInternal(logger, shardId, nodeEnv, indicesService, request.getCustomDataPath(), settings, clusterService); } /** diff --git a/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadataBatch.java b/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadataBatch.java new file mode 100644 index 0000000000000..3f151fe1c5ca0 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadataBatch.java @@ -0,0 +1,346 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.store; + +import org.opensearch.OpenSearchException; +import org.opensearch.action.ActionType; +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.nodes.BaseNodeResponse; +import org.opensearch.action.support.nodes.BaseNodesRequest; +import org.opensearch.action.support.nodes.BaseNodesResponse; +import org.opensearch.action.support.nodes.TransportNodesAction; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.env.NodeEnvironment; +import org.opensearch.gateway.AsyncShardFetch; +import org.opensearch.index.store.Store; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.store.TransportNodesListShardStoreMetadataHelper.StoreFilesMetadata; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static org.opensearch.indices.store.TransportNodesListShardStoreMetadataHelper.INDEX_NOT_FOUND; + +/** + * Transport action for fetching the batch of shard stores Metadata from a list of transport nodes + * + * @opensearch.internal + */ +public class TransportNodesListShardStoreMetadataBatch extends TransportNodesAction< + TransportNodesListShardStoreMetadataBatch.Request, + TransportNodesListShardStoreMetadataBatch.NodesStoreFilesMetadataBatch, + TransportNodesListShardStoreMetadataBatch.NodeRequest, + TransportNodesListShardStoreMetadataBatch.NodeStoreFilesMetadataBatch> + implements + AsyncShardFetch.Lister< + TransportNodesListShardStoreMetadataBatch.NodesStoreFilesMetadataBatch, + TransportNodesListShardStoreMetadataBatch.NodeStoreFilesMetadataBatch> { + + public static final String ACTION_NAME = "internal:cluster/nodes/indices/shard/store/batch"; + public static final ActionType<TransportNodesListShardStoreMetadataBatch.NodesStoreFilesMetadataBatch> TYPE = new ActionType<>( + ACTION_NAME, + TransportNodesListShardStoreMetadataBatch.NodesStoreFilesMetadataBatch::new + ); + + private final Settings settings; + private final IndicesService indicesService; + private final NodeEnvironment nodeEnv; + + @Inject + public TransportNodesListShardStoreMetadataBatch( + Settings settings, + ThreadPool threadPool, + ClusterService clusterService, + TransportService transportService, + IndicesService indicesService, + NodeEnvironment nodeEnv, + ActionFilters actionFilters + ) { + super( + ACTION_NAME, + threadPool, + clusterService, + transportService, + actionFilters, + Request::new, + NodeRequest::new, + ThreadPool.Names.FETCH_SHARD_STORE, + NodeStoreFilesMetadataBatch.class + ); + this.settings = settings; + this.indicesService = indicesService; + this.nodeEnv = nodeEnv; + } + + @Override + public void list( + Map<ShardId, ShardAttributes> shardAttributes, + DiscoveryNode[] nodes, + ActionListener<NodesStoreFilesMetadataBatch> listener + ) { + execute(new TransportNodesListShardStoreMetadataBatch.Request(shardAttributes, nodes), listener); + } + + @Override + protected NodeRequest newNodeRequest(Request request) { + return new NodeRequest(request); + } + + @Override + protected NodeStoreFilesMetadataBatch newNodeResponse(StreamInput in) throws IOException { + return new NodeStoreFilesMetadataBatch(in); + } + + @Override + protected NodesStoreFilesMetadataBatch newResponse( + Request request, + List<NodeStoreFilesMetadataBatch> responses, + List<FailedNodeException> failures + ) { + return new NodesStoreFilesMetadataBatch(clusterService.getClusterName(), responses, failures); + } + + @Override + protected NodeStoreFilesMetadataBatch nodeOperation(NodeRequest request) { + try { + return new NodeStoreFilesMetadataBatch(clusterService.localNode(), listStoreMetadata(request)); + } catch (IOException e) { + throw new OpenSearchException( + "Failed to list store metadata for shards [" + request.getShardAttributes().keySet().stream().map(ShardId::toString) + "]", + e + ); + } + } + + /** + * This method is similar to listStoreMetadata method of {@link TransportNodesListShardStoreMetadata} + * In this case we fetch the shard store files for batch of shards instead of one shard. + */ + private Map<ShardId, NodeStoreFilesMetadata> listStoreMetadata(NodeRequest request) throws IOException { + Map<ShardId, NodeStoreFilesMetadata> shardStoreMetadataMap = new HashMap<ShardId, NodeStoreFilesMetadata>(); + for (Map.Entry<ShardId, ShardAttributes> shardAttributes : request.getShardAttributes().entrySet()) { + final ShardId shardId = shardAttributes.getKey(); + try { + StoreFilesMetadata storeFilesMetadata = TransportNodesListShardStoreMetadataHelper.listShardMetadataInternal( + logger, + shardId, + nodeEnv, + indicesService, + shardAttributes.getValue().getCustomDataPath(), + settings, + clusterService + ); + shardStoreMetadataMap.put(shardId, new NodeStoreFilesMetadata(storeFilesMetadata, null)); + } catch (Exception e) { + // should return null in case of known exceptions being returned from listShardMetadataInternal method. + if (e.getMessage().contains(INDEX_NOT_FOUND)) { + shardStoreMetadataMap.put(shardId, null); + } else { + // return actual exception as it is for unknown exceptions + shardStoreMetadataMap.put( + shardId, + new NodeStoreFilesMetadata( + new StoreFilesMetadata(shardId, Store.MetadataSnapshot.EMPTY, Collections.emptyList()), + e + ) + ); + } + } + } + return shardStoreMetadataMap; + } + + /** + * Request is used in constructing the request for making the transport request to set of other node. + * Refer {@link TransportNodesAction} class start method. + * + * @opensearch.internal + */ + public static class Request extends BaseNodesRequest<Request> { + + private final Map<ShardId, ShardAttributes> shardAttributes; + + public Request(StreamInput in) throws IOException { + super(in); + shardAttributes = in.readMap(ShardId::new, ShardAttributes::new); + } + + public Request(Map<ShardId, ShardAttributes> shardAttributes, DiscoveryNode[] nodes) { + super(nodes); + this.shardAttributes = Objects.requireNonNull(shardAttributes); + } + + public Map<ShardId, ShardAttributes> getShardAttributes() { + return shardAttributes; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeMap(shardAttributes, (o, k) -> k.writeTo(o), (o, v) -> v.writeTo(o)); + } + } + + /** + * Metadata for the nodes store files + * + * @opensearch.internal + */ + public static class NodesStoreFilesMetadataBatch extends BaseNodesResponse<NodeStoreFilesMetadataBatch> { + + public NodesStoreFilesMetadataBatch(StreamInput in) throws IOException { + super(in); + } + + public NodesStoreFilesMetadataBatch( + ClusterName clusterName, + List<NodeStoreFilesMetadataBatch> nodes, + List<FailedNodeException> failures + ) { + super(clusterName, nodes, failures); + } + + @Override + protected List<NodeStoreFilesMetadataBatch> readNodesFrom(StreamInput in) throws IOException { + return in.readList(NodeStoreFilesMetadataBatch::new); + } + + @Override + protected void writeNodesTo(StreamOutput out, List<NodeStoreFilesMetadataBatch> nodes) throws IOException { + out.writeList(nodes); + } + } + + /** + * The metadata for the node store files + * + * @opensearch.internal + */ + public static class NodeStoreFilesMetadata { + + private StoreFilesMetadata storeFilesMetadata; + private Exception storeFileFetchException; + + public NodeStoreFilesMetadata(StoreFilesMetadata storeFilesMetadata) { + this.storeFilesMetadata = storeFilesMetadata; + this.storeFileFetchException = null; + } + + public NodeStoreFilesMetadata(StreamInput in) throws IOException { + storeFilesMetadata = new StoreFilesMetadata(in); + if (in.readBoolean()) { + this.storeFileFetchException = in.readException(); + } else { + this.storeFileFetchException = null; + } + } + + public NodeStoreFilesMetadata(StoreFilesMetadata storeFilesMetadata, Exception storeFileFetchException) { + this.storeFilesMetadata = storeFilesMetadata; + this.storeFileFetchException = storeFileFetchException; + } + + public StoreFilesMetadata storeFilesMetadata() { + return storeFilesMetadata; + } + + public void writeTo(StreamOutput out) throws IOException { + storeFilesMetadata.writeTo(out); + if (storeFileFetchException != null) { + out.writeBoolean(true); + out.writeException(storeFileFetchException); + } else { + out.writeBoolean(false); + } + } + + public Exception getStoreFileFetchException() { + return storeFileFetchException; + } + + @Override + public String toString() { + return "[[" + storeFilesMetadata + "]]"; + } + } + + /** + * NodeRequest class is for deserializing the request received by this node from other node for this transport action. + * This is used in {@link TransportNodesAction} + * @opensearch.internal + */ + public static class NodeRequest extends TransportRequest { + + private final Map<ShardId, ShardAttributes> shardAttributes; + + public NodeRequest(StreamInput in) throws IOException { + super(in); + shardAttributes = in.readMap(ShardId::new, ShardAttributes::new); + } + + public NodeRequest(Request request) { + this.shardAttributes = Objects.requireNonNull(request.getShardAttributes()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeMap(shardAttributes, (o, k) -> k.writeTo(o), (o, v) -> v.writeTo(o)); + } + + public Map<ShardId, ShardAttributes> getShardAttributes() { + return shardAttributes; + } + } + + /** + * NodeStoreFilesMetadataBatch Response received by the node from other node for this transport action. + * Refer {@link TransportNodesAction} + */ + public static class NodeStoreFilesMetadataBatch extends BaseNodeResponse { + private final Map<ShardId, NodeStoreFilesMetadata> nodeStoreFilesMetadataBatch; + + protected NodeStoreFilesMetadataBatch(StreamInput in) throws IOException { + super(in); + this.nodeStoreFilesMetadataBatch = in.readMap(ShardId::new, NodeStoreFilesMetadata::new); + } + + public NodeStoreFilesMetadataBatch(DiscoveryNode node, Map<ShardId, NodeStoreFilesMetadata> nodeStoreFilesMetadataBatch) { + super(node); + this.nodeStoreFilesMetadataBatch = nodeStoreFilesMetadataBatch; + } + + public Map<ShardId, NodeStoreFilesMetadata> getNodeStoreFilesMetadataBatch() { + return this.nodeStoreFilesMetadataBatch; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeMap(nodeStoreFilesMetadataBatch, (o, k) -> k.writeTo(o), (o, v) -> v.writeTo(o)); + } + } + +} diff --git a/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadataHelper.java b/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadataHelper.java new file mode 100644 index 0000000000000..74b04d6c6d494 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadataHelper.java @@ -0,0 +1,221 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.store; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.OpenSearchException; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.env.NodeEnvironment; +import org.opensearch.index.IndexService; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.seqno.ReplicationTracker; +import org.opensearch.index.seqno.RetentionLease; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.ShardPath; +import org.opensearch.index.store.Store; +import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.indices.IndicesService; + +import java.io.IOException; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.TimeUnit; + +/** + * This class has the common code used in {@link TransportNodesListShardStoreMetadata} and + * {@link TransportNodesListShardStoreMetadataBatch} to get the shard info on the local node. + * <p> + * This class should not be used to add more functions and will be removed when the + * {@link TransportNodesListShardStoreMetadata} will be deprecated and all the code will be moved to + * {@link TransportNodesListShardStoreMetadataBatch} + * + * @opensearch.internal + */ +public class TransportNodesListShardStoreMetadataHelper { + + public static final String INDEX_NOT_FOUND = "node doesn't have meta data for index "; + + public static StoreFilesMetadata listShardMetadataInternal( + Logger logger, + final ShardId shardId, + NodeEnvironment nodeEnv, + IndicesService indicesService, + String customDataPath, + Settings settings, + ClusterService clusterService + ) throws IOException { + logger.trace("listing store meta data for {}", shardId); + long startTimeNS = System.nanoTime(); + boolean exists = false; + try { + IndexService indexService = indicesService.indexService(shardId.getIndex()); + if (indexService != null) { + IndexShard indexShard = indexService.getShardOrNull(shardId.id()); + if (indexShard != null) { + try { + final StoreFilesMetadata storeFilesMetadata = new StoreFilesMetadata( + shardId, + indexShard.snapshotStoreMetadata(), + indexShard.getPeerRecoveryRetentionLeases() + ); + exists = true; + return storeFilesMetadata; + } catch (org.apache.lucene.index.IndexNotFoundException e) { + logger.trace(new ParameterizedMessage("[{}] node is missing index, responding with empty", shardId), e); + return new StoreFilesMetadata(shardId, Store.MetadataSnapshot.EMPTY, Collections.emptyList()); + } catch (IOException e) { + logger.warn(new ParameterizedMessage("[{}] can't read metadata from store, responding with empty", shardId), e); + return new StoreFilesMetadata(shardId, Store.MetadataSnapshot.EMPTY, Collections.emptyList()); + } + } + } + if (customDataPath == null) { + // TODO: Fallback for BWC with older predecessor (ES) versions. + // Remove this once request.getCustomDataPath() always returns non-null + if (indexService != null) { + customDataPath = indexService.getIndexSettings().customDataPath(); + } else { + IndexMetadata metadata = clusterService.state().metadata().index(shardId.getIndex()); + if (metadata != null) { + customDataPath = new IndexSettings(metadata, settings).customDataPath(); + } else { + logger.trace("{} node doesn't have meta data for the requests index", shardId); + throw new OpenSearchException(INDEX_NOT_FOUND + shardId.getIndex()); + } + } + } + final ShardPath shardPath = ShardPath.loadShardPath(logger, nodeEnv, shardId, customDataPath); + if (shardPath == null) { + return new StoreFilesMetadata(shardId, Store.MetadataSnapshot.EMPTY, Collections.emptyList()); + } + // note that this may fail if it can't get access to the shard lock. Since we check above there is an active shard, this means: + // 1) a shard is being constructed, which means the cluster-manager will not use a copy of this replica + // 2) A shard is shutting down and has not cleared it's content within lock timeout. In this case the cluster-manager may not + // reuse local resources. + final Store.MetadataSnapshot metadataSnapshot = Store.readMetadataSnapshot( + shardPath.resolveIndex(), + shardId, + nodeEnv::shardLock, + logger + ); + // We use peer recovery retention leases from the primary for allocating replicas. We should always have retention leases when + // we refresh shard info after the primary has started. Hence, we can ignore retention leases if there is no active shard. + return new StoreFilesMetadata(shardId, metadataSnapshot, Collections.emptyList()); + } finally { + TimeValue took = new TimeValue(System.nanoTime() - startTimeNS, TimeUnit.NANOSECONDS); + if (exists) { + logger.debug("{} loaded store meta data (took [{}])", shardId, took); + } else { + logger.trace("{} didn't find any store meta data to load (took [{}])", shardId, took); + } + } + } + + /** + * Metadata for store files + * + * @opensearch.internal + */ + public static class StoreFilesMetadata implements Iterable<StoreFileMetadata>, Writeable { + private final ShardId shardId; + private final Store.MetadataSnapshot metadataSnapshot; + private final List<RetentionLease> peerRecoveryRetentionLeases; + + public StoreFilesMetadata( + ShardId shardId, + Store.MetadataSnapshot metadataSnapshot, + List<RetentionLease> peerRecoveryRetentionLeases + ) { + this.shardId = shardId; + this.metadataSnapshot = metadataSnapshot; + this.peerRecoveryRetentionLeases = peerRecoveryRetentionLeases; + } + + public StoreFilesMetadata(StreamInput in) throws IOException { + this.shardId = new ShardId(in); + this.metadataSnapshot = new Store.MetadataSnapshot(in); + this.peerRecoveryRetentionLeases = in.readList(RetentionLease::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + shardId.writeTo(out); + metadataSnapshot.writeTo(out); + out.writeList(peerRecoveryRetentionLeases); + } + + public ShardId shardId() { + return this.shardId; + } + + public boolean isEmpty() { + return metadataSnapshot.size() == 0; + } + + @Override + public Iterator<StoreFileMetadata> iterator() { + return metadataSnapshot.iterator(); + } + + public boolean fileExists(String name) { + return metadataSnapshot.asMap().containsKey(name); + } + + public StoreFileMetadata file(String name) { + return metadataSnapshot.asMap().get(name); + } + + /** + * Returns the retaining sequence number of the peer recovery retention lease for a given node if exists; otherwise, returns -1. + */ + public long getPeerRecoveryRetentionLeaseRetainingSeqNo(DiscoveryNode node) { + assert node != null; + final String retentionLeaseId = ReplicationTracker.getPeerRecoveryRetentionLeaseId(node.getId()); + return peerRecoveryRetentionLeases.stream() + .filter(lease -> lease.id().equals(retentionLeaseId)) + .mapToLong(RetentionLease::retainingSequenceNumber) + .findFirst() + .orElse(-1L); + } + + public List<RetentionLease> peerRecoveryRetentionLeases() { + return peerRecoveryRetentionLeases; + } + + /** + * @return commit sync id if exists, else null + */ + public String syncId() { + return metadataSnapshot.getSyncId(); + } + + @Override + public String toString() { + return "StoreFilesMetadata{" + + ", shardId=" + + shardId + + ", metadataSnapshot{size=" + + metadataSnapshot.size() + + ", syncId=" + + metadataSnapshot.getSyncId() + + "}" + + '}'; + } + } +} diff --git a/server/src/main/java/org/opensearch/monitor/fs/FsInfo.java b/server/src/main/java/org/opensearch/monitor/fs/FsInfo.java index fc1bb86f2ad3e..ddff022112665 100644 --- a/server/src/main/java/org/opensearch/monitor/fs/FsInfo.java +++ b/server/src/main/java/org/opensearch/monitor/fs/FsInfo.java @@ -448,9 +448,13 @@ public long ioTimeInMillis() { return (currentIOTime - previousIOTime); } + public String getDeviceName() { + return deviceName; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field("device_name", deviceName); + builder.field("device_name", getDeviceName()); builder.field(IoStats.OPERATIONS, operations()); builder.field(IoStats.READ_OPERATIONS, readOperations()); builder.field(IoStats.WRITE_OPERATIONS, writeOperations()); diff --git a/server/src/main/java/org/opensearch/monitor/jvm/JvmInfo.java b/server/src/main/java/org/opensearch/monitor/jvm/JvmInfo.java index dc27ab0fb91c2..e3290bfec6905 100644 --- a/server/src/main/java/org/opensearch/monitor/jvm/JvmInfo.java +++ b/server/src/main/java/org/opensearch/monitor/jvm/JvmInfo.java @@ -216,6 +216,7 @@ private static boolean usingBundledJdkOrJre() { } } + @SuppressWarnings("removal") public static JvmInfo jvmInfo() { SecurityManager sm = System.getSecurityManager(); if (sm != null) { diff --git a/server/src/main/java/org/opensearch/node/IoUsageStats.java b/server/src/main/java/org/opensearch/node/IoUsageStats.java new file mode 100644 index 0000000000000..ecb1ac1bb1de4 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/IoUsageStats.java @@ -0,0 +1,69 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Locale; + +/** + * This class is to store tne IO Usage Stats and used to return in node stats API. + */ +public class IoUsageStats implements Writeable, ToXContentFragment { + + private double ioUtilisationPercent; + + public IoUsageStats(double ioUtilisationPercent) { + this.ioUtilisationPercent = ioUtilisationPercent; + } + + /** + * + * @param in the stream to read from + * @throws IOException if an error occurs while reading from the StreamOutput + */ + public IoUsageStats(StreamInput in) throws IOException { + this.ioUtilisationPercent = in.readDouble(); + } + + /** + * Write this into the {@linkplain StreamOutput}. + * + * @param out the output stream to write entity content to + */ + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeDouble(this.ioUtilisationPercent); + } + + public double getIoUtilisationPercent() { + return ioUtilisationPercent; + } + + public void setIoUtilisationPercent(double ioUtilisationPercent) { + this.ioUtilisationPercent = ioUtilisationPercent; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("max_io_utilization_percent", String.format(Locale.ROOT, "%.1f", this.ioUtilisationPercent)); + return builder.endObject(); + } + + @Override + public String toString() { + return "IO utilization percent: " + String.format(Locale.ROOT, "%.1f", this.ioUtilisationPercent); + } +} diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 8510122c39fcb..3ef3ae4f6230e 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -44,6 +44,7 @@ import org.opensearch.action.ActionModule.DynamicActionRegistry; import org.opensearch.action.ActionType; import org.opensearch.action.admin.cluster.snapshots.status.TransportNodesSnapshotsStatus; +import org.opensearch.action.admin.indices.view.ViewService; import org.opensearch.action.search.SearchExecutionStatsCollector; import org.opensearch.action.search.SearchPhaseController; import org.opensearch.action.search.SearchRequestOperationsCompositeListenerFactory; @@ -83,6 +84,8 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.SetOnce; import org.opensearch.common.StopWatch; +import org.opensearch.common.cache.module.CacheModule; +import org.opensearch.common.cache.service.CacheService; import org.opensearch.common.inject.Injector; import org.opensearch.common.inject.Key; import org.opensearch.common.inject.Module; @@ -178,6 +181,7 @@ import org.opensearch.persistent.PersistentTasksService; import org.opensearch.plugins.ActionPlugin; import org.opensearch.plugins.AnalysisPlugin; +import org.opensearch.plugins.CachePlugin; import org.opensearch.plugins.CircuitBreakerPlugin; import org.opensearch.plugins.ClusterPlugin; import org.opensearch.plugins.CryptoKeyProviderPlugin; @@ -527,7 +531,11 @@ protected Node( */ this.environment = new Environment(settings, initialEnvironment.configDir(), Node.NODE_LOCAL_STORAGE_SETTING.get(settings)); Environment.assertEquivalent(initialEnvironment, this.environment); - nodeEnvironment = new NodeEnvironment(tmpSettings, environment); + if (DiscoveryNode.isSearchNode(settings) == false) { + nodeEnvironment = new NodeEnvironment(tmpSettings, environment); + } else { + nodeEnvironment = new NodeEnvironment(settings, environment, new FileCacheCleaner(this::fileCache)); + } logger.info( "node name [{}], node ID [{}], cluster name [{}], roles {}", NODE_NAME_SETTING.get(tmpSettings), @@ -678,7 +686,6 @@ protected Node( ); // File cache will be initialized by the node once circuit breakers are in place. initializeFileCache(settings, circuitBreakerService.getBreaker(CircuitBreaker.REQUEST)); - final FileCacheCleaner fileCacheCleaner = new FileCacheCleaner(nodeEnvironment, fileCache); final MonitorService monitorService = new MonitorService(settings, nodeEnvironment, threadPool, fileCache); pluginsService.filterPlugins(CircuitBreakerPlugin.class).forEach(plugin -> { @@ -789,6 +796,8 @@ protected Node( final SearchRequestSlowLog searchRequestSlowLog = new SearchRequestSlowLog(clusterService); remoteStoreStatsTrackerFactory = new RemoteStoreStatsTrackerFactory(clusterService, settings); + CacheModule cacheModule = new CacheModule(pluginsService.filterPlugins(CachePlugin.class), settings); + CacheService cacheService = cacheModule.getCacheService(); final IndicesService indicesService = new IndicesService( settings, pluginsService, @@ -812,10 +821,10 @@ protected Node( recoveryStateFactories, remoteDirectoryFactory, repositoriesServiceReference::get, - fileCacheCleaner, searchRequestStats, remoteStoreStatsTrackerFactory, - recoverySettings + recoverySettings, + cacheService ); final IngestService ingestService = new IngestService( @@ -862,6 +871,8 @@ protected Node( metadataCreateIndexService ); + final ViewService viewService = new ViewService(clusterService, client, null); + Collection<Object> pluginComponents = pluginsService.filterPlugins(Plugin.class) .stream() .flatMap( @@ -912,6 +923,7 @@ protected Node( final RestController restController = actionModule.getRestController(); final NodeResourceUsageTracker nodeResourceUsageTracker = new NodeResourceUsageTracker( + monitorService.fsService(), threadPool, settings, clusterService.getClusterSettings() @@ -1230,6 +1242,7 @@ protected Node( b.bind(MetadataCreateIndexService.class).toInstance(metadataCreateIndexService); b.bind(AwarenessReplicaBalance.class).toInstance(awarenessReplicaBalance); b.bind(MetadataCreateDataStreamService.class).toInstance(metadataCreateDataStreamService); + b.bind(ViewService.class).toInstance(viewService); b.bind(SearchService.class).toInstance(searchService); b.bind(SearchTransportService.class).toInstance(searchTransportService); b.bind(SearchPhaseController.class) diff --git a/server/src/main/java/org/opensearch/node/NodeResourceUsageStats.java b/server/src/main/java/org/opensearch/node/NodeResourceUsageStats.java index 6ef66d4ac1914..26e53218cf026 100644 --- a/server/src/main/java/org/opensearch/node/NodeResourceUsageStats.java +++ b/server/src/main/java/org/opensearch/node/NodeResourceUsageStats.java @@ -8,6 +8,7 @@ package org.opensearch.node; +import org.opensearch.Version; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -24,12 +25,20 @@ public class NodeResourceUsageStats implements Writeable { long timestamp; double cpuUtilizationPercent; double memoryUtilizationPercent; + private IoUsageStats ioUsageStats; - public NodeResourceUsageStats(String nodeId, long timestamp, double memoryUtilizationPercent, double cpuUtilizationPercent) { + public NodeResourceUsageStats( + String nodeId, + long timestamp, + double memoryUtilizationPercent, + double cpuUtilizationPercent, + IoUsageStats ioUsageStats + ) { this.nodeId = nodeId; this.timestamp = timestamp; this.cpuUtilizationPercent = cpuUtilizationPercent; this.memoryUtilizationPercent = memoryUtilizationPercent; + this.ioUsageStats = ioUsageStats; } public NodeResourceUsageStats(StreamInput in) throws IOException { @@ -37,6 +46,11 @@ public NodeResourceUsageStats(StreamInput in) throws IOException { this.timestamp = in.readLong(); this.cpuUtilizationPercent = in.readDouble(); this.memoryUtilizationPercent = in.readDouble(); + if (in.getVersion().onOrAfter(Version.V_2_13_0)) { + this.ioUsageStats = in.readOptionalWriteable(IoUsageStats::new); + } else { + this.ioUsageStats = null; + } } @Override @@ -45,6 +59,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(this.timestamp); out.writeDouble(this.cpuUtilizationPercent); out.writeDouble(this.memoryUtilizationPercent); + if (out.getVersion().onOrAfter(Version.V_2_13_0)) { + out.writeOptionalWriteable(this.ioUsageStats); + } } @Override @@ -52,8 +69,11 @@ public String toString() { StringBuilder sb = new StringBuilder("NodeResourceUsageStats["); sb.append(nodeId).append("]("); sb.append("Timestamp: ").append(timestamp); - sb.append(", CPU utilization percent: ").append(String.format(Locale.ROOT, "%.1f", cpuUtilizationPercent)); - sb.append(", Memory utilization percent: ").append(String.format(Locale.ROOT, "%.1f", memoryUtilizationPercent)); + sb.append(", CPU utilization percent: ").append(String.format(Locale.ROOT, "%.1f", this.getCpuUtilizationPercent())); + sb.append(", Memory utilization percent: ").append(String.format(Locale.ROOT, "%.1f", this.getMemoryUtilizationPercent())); + if (this.ioUsageStats != null) { + sb.append(", ").append(this.getIoUsageStats()); + } sb.append(")"); return sb.toString(); } @@ -63,7 +83,8 @@ public String toString() { nodeResourceUsageStats.nodeId, nodeResourceUsageStats.timestamp, nodeResourceUsageStats.memoryUtilizationPercent, - nodeResourceUsageStats.cpuUtilizationPercent + nodeResourceUsageStats.cpuUtilizationPercent, + nodeResourceUsageStats.ioUsageStats ); } @@ -75,6 +96,14 @@ public double getCpuUtilizationPercent() { return cpuUtilizationPercent; } + public IoUsageStats getIoUsageStats() { + return ioUsageStats; + } + + public void setIoUsageStats(IoUsageStats ioUsageStats) { + this.ioUsageStats = ioUsageStats; + } + public long getTimestamp() { return timestamp; } diff --git a/server/src/main/java/org/opensearch/node/NodesResourceUsageStats.java b/server/src/main/java/org/opensearch/node/NodesResourceUsageStats.java index 3dff9a27f71a8..35c82c904ad1c 100644 --- a/server/src/main/java/org/opensearch/node/NodesResourceUsageStats.java +++ b/server/src/main/java/org/opensearch/node/NodesResourceUsageStats.java @@ -60,6 +60,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws "memory_utilization_percent", String.format(Locale.ROOT, "%.1f", resourceUsageStats.memoryUtilizationPercent) ); + if (resourceUsageStats.getIoUsageStats() != null) { + builder.field("io_usage_stats", resourceUsageStats.getIoUsageStats()); + } } builder.endObject(); } diff --git a/server/src/main/java/org/opensearch/node/ResourceUsageCollectorService.java b/server/src/main/java/org/opensearch/node/ResourceUsageCollectorService.java index f1c763e09f147..ecd2a5615e1fe 100644 --- a/server/src/main/java/org/opensearch/node/ResourceUsageCollectorService.java +++ b/server/src/main/java/org/opensearch/node/ResourceUsageCollectorService.java @@ -78,14 +78,16 @@ public void collectNodeResourceUsageStats( String nodeId, long timestamp, double memoryUtilizationPercent, - double cpuUtilizationPercent + double cpuUtilizationPercent, + IoUsageStats ioUsageStats ) { nodeIdToResourceUsageStats.compute(nodeId, (id, resourceUsageStats) -> { if (resourceUsageStats == null) { - return new NodeResourceUsageStats(nodeId, timestamp, memoryUtilizationPercent, cpuUtilizationPercent); + return new NodeResourceUsageStats(nodeId, timestamp, memoryUtilizationPercent, cpuUtilizationPercent, ioUsageStats); } else { resourceUsageStats.cpuUtilizationPercent = cpuUtilizationPercent; resourceUsageStats.memoryUtilizationPercent = memoryUtilizationPercent; + resourceUsageStats.setIoUsageStats(ioUsageStats); resourceUsageStats.timestamp = timestamp; return resourceUsageStats; } @@ -129,7 +131,8 @@ private void collectLocalNodeResourceUsageStats() { clusterService.state().nodes().getLocalNodeId(), System.currentTimeMillis(), nodeResourceUsageTracker.getMemoryUtilizationPercent(), - nodeResourceUsageTracker.getCpuUtilizationPercent() + nodeResourceUsageTracker.getCpuUtilizationPercent(), + nodeResourceUsageTracker.getIoUsageStats() ); } } diff --git a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java index 7b2a6c34d3db6..7575c6ff5fb34 100644 --- a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java +++ b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java @@ -131,12 +131,8 @@ private RepositoryMetadata buildRepositoryMetadata(DiscoveryNode node, String na } private RepositoriesMetadata buildRepositoriesMetadata(DiscoveryNode node) { + Set<String> repositoryNames = getValidatedRepositoryNames(node); List<RepositoryMetadata> repositoryMetadataList = new ArrayList<>(); - Set<String> repositoryNames = new HashSet<>(); - - repositoryNames.add(validateAttributeNonNull(node, REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY)); - repositoryNames.add(validateAttributeNonNull(node, REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY)); - repositoryNames.add(validateAttributeNonNull(node, REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY)); for (String repositoryName : repositoryNames) { repositoryMetadataList.add(buildRepositoryMetadata(node, repositoryName)); @@ -145,12 +141,36 @@ private RepositoriesMetadata buildRepositoriesMetadata(DiscoveryNode node) { return new RepositoriesMetadata(repositoryMetadataList); } + private Set<String> getValidatedRepositoryNames(DiscoveryNode node) { + Set<String> repositoryNames = new HashSet<>(); + if (node.getAttributes().containsKey(REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY) + || node.getAttributes().containsKey(REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY)) { + repositoryNames.add(validateAttributeNonNull(node, REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY)); + repositoryNames.add(validateAttributeNonNull(node, REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY)); + repositoryNames.add(validateAttributeNonNull(node, REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY)); + } else if (node.getAttributes().containsKey(REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY)) { + repositoryNames.add(validateAttributeNonNull(node, REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY)); + } + return repositoryNames; + } + public static boolean isRemoteStoreAttributePresent(Settings settings) { return settings.getByPrefix(Node.NODE_ATTRIBUTES.getKey() + REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX).isEmpty() == false; } + public static boolean isRemoteDataAttributePresent(Settings settings) { + return settings.getByPrefix(Node.NODE_ATTRIBUTES.getKey() + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY).isEmpty() == false + || settings.getByPrefix(Node.NODE_ATTRIBUTES.getKey() + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY).isEmpty() == false; + } + + public static boolean isRemoteClusterStateAttributePresent(Settings settings) { + return settings.getByPrefix(Node.NODE_ATTRIBUTES.getKey() + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY) + .isEmpty() == false; + } + public static boolean isRemoteStoreClusterStateEnabled(Settings settings) { - return RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.get(settings) && isRemoteStoreAttributePresent(settings); + return RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.get(settings) + && isRemoteClusterStateAttributePresent(settings); } public RepositoriesMetadata getRepositoriesMetadata() { diff --git a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java index ca2413a057a6b..33b182dd3cc97 100644 --- a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java +++ b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java @@ -15,18 +15,22 @@ import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.Setting; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; import org.opensearch.repositories.RepositoryException; import org.opensearch.threadpool.ThreadPool; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.function.Supplier; +import static org.opensearch.common.util.FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL; + /** * Contains all the method needed for a remote store backed node lifecycle. */ @@ -39,6 +43,33 @@ public class RemoteStoreNodeService { "remote_store.compatibility_mode", CompatibilityMode.STRICT.name(), CompatibilityMode::parseString, + value -> { + if (value == CompatibilityMode.MIXED + && FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING) == false) { + throw new IllegalArgumentException( + " mixed mode is under an experimental feature and can be activated only by enabling " + + REMOTE_STORE_MIGRATION_EXPERIMENTAL + + " feature flag in the JVM options " + ); + } + }, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public static final Setting<Direction> MIGRATION_DIRECTION_SETTING = new Setting<>( + "migration.direction", + Direction.NONE.name(), + Direction::parseString, + value -> { + if (value != Direction.NONE && FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING) == false) { + throw new IllegalArgumentException( + " migration.direction is under an experimental feature and can be activated only by enabling " + + REMOTE_STORE_MIGRATION_EXPERIMENTAL + + " feature flag in the JVM options " + ); + } + }, Setting.Property.Dynamic, Setting.Property.NodeScope ); @@ -49,7 +80,8 @@ public class RemoteStoreNodeService { * @opensearch.internal */ public enum CompatibilityMode { - STRICT("strict"); + STRICT("strict"), + MIXED("mixed"); public final String mode; @@ -66,13 +98,38 @@ public static CompatibilityMode parseString(String compatibilityMode) { + compatibilityMode + "] compatibility mode is not supported. " + "supported modes are [" - + CompatibilityMode.values().toString() + + Arrays.toString(CompatibilityMode.values()) + "]" ); } } } + /** + * Migration Direction intended for docrep to remote store migration and vice versa + * + * @opensearch.internal + */ + public enum Direction { + REMOTE_STORE("remote_store"), + NONE("none"), + DOCREP("docrep"); + + public final String direction; + + Direction(String d) { + this.direction = d; + } + + public static Direction parseString(String direction) { + try { + return Direction.valueOf(direction.toUpperCase(Locale.ROOT)); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException("[" + direction + "] migration.direction is not supported."); + } + } + } + public RemoteStoreNodeService(Supplier<RepositoriesService> repositoriesService, ThreadPool threadPool) { this.repositoriesService = repositoriesService; this.threadPool = threadPool; diff --git a/server/src/main/java/org/opensearch/node/resource/tracker/AbstractAverageUsageTracker.java b/server/src/main/java/org/opensearch/node/resource/tracker/AbstractAverageUsageTracker.java index f83a1b7f9fc05..69c7afc1d4b43 100644 --- a/server/src/main/java/org/opensearch/node/resource/tracker/AbstractAverageUsageTracker.java +++ b/server/src/main/java/org/opensearch/node/resource/tracker/AbstractAverageUsageTracker.java @@ -24,12 +24,12 @@ public abstract class AbstractAverageUsageTracker extends AbstractLifecycleComponent { private static final Logger LOGGER = LogManager.getLogger(AbstractAverageUsageTracker.class); - private final ThreadPool threadPool; - private final TimeValue pollingInterval; + protected final ThreadPool threadPool; + protected final TimeValue pollingInterval; private TimeValue windowDuration; private final AtomicReference<MovingAverage> observations = new AtomicReference<>(); - private volatile Scheduler.Cancellable scheduledFuture; + protected volatile Scheduler.Cancellable scheduledFuture; public AbstractAverageUsageTracker(ThreadPool threadPool, TimeValue pollingInterval, TimeValue windowDuration) { this.threadPool = threadPool; diff --git a/server/src/main/java/org/opensearch/node/resource/tracker/AverageIoUsageTracker.java b/server/src/main/java/org/opensearch/node/resource/tracker/AverageIoUsageTracker.java new file mode 100644 index 0000000000000..5472d4bda2326 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/resource/tracker/AverageIoUsageTracker.java @@ -0,0 +1,102 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.resource.tracker; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.Constants; +import org.opensearch.common.ValidationException; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.monitor.fs.FsInfo.DeviceStats; +import org.opensearch.monitor.fs.FsService; +import org.opensearch.node.IoUsageStats; +import org.opensearch.threadpool.ThreadPool; + +import java.util.HashMap; +import java.util.Optional; + +/** + * AverageIoUsageTracker tracks the IO usage by polling the FS Stats for IO metrics every (pollingInterval) + * and keeping track of the rolling average over a defined time window (windowDuration). + */ +public class AverageIoUsageTracker extends AbstractAverageUsageTracker { + + private static final Logger LOGGER = LogManager.getLogger(AverageIoUsageTracker.class); + private final FsService fsService; + private final HashMap<String, Long> prevIoTimeDeviceMap; + private long prevTimeInMillis; + private IoUsageStats ioUsageStats; + + public AverageIoUsageTracker(FsService fsService, ThreadPool threadPool, TimeValue pollingInterval, TimeValue windowDuration) { + super(threadPool, pollingInterval, windowDuration); + this.fsService = fsService; + this.prevIoTimeDeviceMap = new HashMap<>(); + this.prevTimeInMillis = -1; + this.ioUsageStats = null; + } + + /** + * Get current IO usage percentage calculated using fs stats + */ + @Override + public long getUsage() { + long usage = 0; + Optional<ValidationException> validationException = this.preValidateFsStats(); + if (validationException != null && validationException.isPresent()) { + throw validationException.get(); + } + // Currently even during the raid setup we have only one mount device and it is giving 0 io time from /proc/diskstats + DeviceStats[] devicesStats = fsService.stats().getIoStats().getDevicesStats(); + long latestTimeInMillis = fsService.stats().getTimestamp(); + for (DeviceStats devicesStat : devicesStats) { + long devicePreviousIoTime = prevIoTimeDeviceMap.getOrDefault(devicesStat.getDeviceName(), (long) -1); + long deviceCurrentIoTime = devicesStat.ioTimeInMillis(); + if (prevTimeInMillis > 0 && (latestTimeInMillis - this.prevTimeInMillis > 0) && devicePreviousIoTime > 0) { + long absIoTime = (deviceCurrentIoTime - devicePreviousIoTime); + long deviceCurrentIoUsage = absIoTime * 100 / (latestTimeInMillis - this.prevTimeInMillis); + // We are returning the maximum IO Usage for all the attached devices + usage = Math.max(usage, deviceCurrentIoUsage); + } + prevIoTimeDeviceMap.put(devicesStat.getDeviceName(), devicesStat.ioTimeInMillis()); + } + this.prevTimeInMillis = latestTimeInMillis; + return usage; + } + + @Override + protected void doStart() { + if (Constants.LINUX) { + this.ioUsageStats = new IoUsageStats(-1); + scheduledFuture = threadPool.scheduleWithFixedDelay(() -> { + long usage = getUsage(); + recordUsage(usage); + updateIoUsageStats(); + }, pollingInterval, ThreadPool.Names.GENERIC); + } + } + + public Optional<ValidationException> preValidateFsStats() { + ValidationException validationException = new ValidationException(); + if (fsService == null + || fsService.stats() == null + || fsService.stats().getIoStats() == null + || fsService.stats().getIoStats().getDevicesStats() == null) { + validationException.addValidationError("FSService IoStats Or DeviceStats are Missing"); + } + return validationException.validationErrors().isEmpty() ? Optional.empty() : Optional.of(validationException); + } + + private void updateIoUsageStats() { + this.ioUsageStats.setIoUtilisationPercent(this.isReady() ? this.getAverage() : -1); + } + + public IoUsageStats getIoUsageStats() { + return this.ioUsageStats; + } +} diff --git a/server/src/main/java/org/opensearch/node/resource/tracker/NodeResourceUsageTracker.java b/server/src/main/java/org/opensearch/node/resource/tracker/NodeResourceUsageTracker.java index cf5f38c1b004c..621f90e80454c 100644 --- a/server/src/main/java/org/opensearch/node/resource/tracker/NodeResourceUsageTracker.java +++ b/server/src/main/java/org/opensearch/node/resource/tracker/NodeResourceUsageTracker.java @@ -8,10 +8,13 @@ package org.opensearch.node.resource.tracker; +import org.apache.lucene.util.Constants; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.monitor.fs.FsService; +import org.opensearch.node.IoUsageStats; import org.opensearch.threadpool.ThreadPool; /** @@ -22,10 +25,14 @@ public class NodeResourceUsageTracker extends AbstractLifecycleComponent { private final ClusterSettings clusterSettings; private AverageCpuUsageTracker cpuUsageTracker; private AverageMemoryUsageTracker memoryUsageTracker; + private AverageIoUsageTracker ioUsageTracker; private ResourceTrackerSettings resourceTrackerSettings; - public NodeResourceUsageTracker(ThreadPool threadPool, Settings settings, ClusterSettings clusterSettings) { + private final FsService fsService; + + public NodeResourceUsageTracker(FsService fsService, ThreadPool threadPool, Settings settings, ClusterSettings clusterSettings) { + this.fsService = fsService; this.threadPool = threadPool; this.clusterSettings = clusterSettings; this.resourceTrackerSettings = new ResourceTrackerSettings(settings); @@ -52,10 +59,20 @@ public double getMemoryUtilizationPercent() { return 0.0; } + /** + * Return io stats average if we have enough datapoints, otherwise return 0 + */ + public IoUsageStats getIoUsageStats() { + return ioUsageTracker.getIoUsageStats(); + } + /** * Checks if all of the resource usage trackers are ready */ public boolean isReady() { + if (Constants.LINUX) { + return memoryUsageTracker.isReady() && cpuUsageTracker.isReady() && ioUsageTracker.isReady(); + } return memoryUsageTracker.isReady() && cpuUsageTracker.isReady(); } @@ -79,6 +96,17 @@ void initialize() { ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING, this::setMemoryWindowDuration ); + + ioUsageTracker = new AverageIoUsageTracker( + fsService, + threadPool, + resourceTrackerSettings.getIoPollingInterval(), + resourceTrackerSettings.getIoWindowDuration() + ); + clusterSettings.addSettingsUpdateConsumer( + ResourceTrackerSettings.GLOBAL_IO_USAGE_AC_WINDOW_DURATION_SETTING, + this::setIoWindowDuration + ); } private void setMemoryWindowDuration(TimeValue windowDuration) { @@ -91,6 +119,11 @@ private void setCpuWindowDuration(TimeValue windowDuration) { resourceTrackerSettings.setCpuWindowDuration(windowDuration); } + private void setIoWindowDuration(TimeValue windowDuration) { + ioUsageTracker.setWindowSize(windowDuration); + resourceTrackerSettings.setIoWindowDuration(windowDuration); + } + /** * Visible for testing */ @@ -102,17 +135,20 @@ ResourceTrackerSettings getResourceTrackerSettings() { protected void doStart() { cpuUsageTracker.doStart(); memoryUsageTracker.doStart(); + ioUsageTracker.doStart(); } @Override protected void doStop() { cpuUsageTracker.doStop(); memoryUsageTracker.doStop(); + ioUsageTracker.doStop(); } @Override protected void doClose() { cpuUsageTracker.doClose(); memoryUsageTracker.doClose(); + ioUsageTracker.doClose(); } } diff --git a/server/src/main/java/org/opensearch/node/resource/tracker/ResourceTrackerSettings.java b/server/src/main/java/org/opensearch/node/resource/tracker/ResourceTrackerSettings.java index f81b008ba7e8b..b423b92c8a4fb 100644 --- a/server/src/main/java/org/opensearch/node/resource/tracker/ResourceTrackerSettings.java +++ b/server/src/main/java/org/opensearch/node/resource/tracker/ResourceTrackerSettings.java @@ -26,6 +26,14 @@ private static class Defaults { * This is the default window duration on which the average resource utilization values will be calculated */ private static final long WINDOW_DURATION_IN_SECONDS = 30; + /** + * This is the default polling interval for IO usage tracker + */ + private static final long IO_POLLING_INTERVAL_IN_MILLIS = 5000; + /** + * This is the default window duration for IO usage tracker on which the average resource utilization values will be calculated + */ + private static final long IO_WINDOW_DURATION_IN_SECONDS = 120; } public static final Setting<TimeValue> GLOBAL_CPU_USAGE_AC_POLLING_INTERVAL_SETTING = Setting.positiveTimeSetting( @@ -40,6 +48,18 @@ private static class Defaults { Setting.Property.NodeScope ); + public static final Setting<TimeValue> GLOBAL_IO_USAGE_AC_POLLING_INTERVAL_SETTING = Setting.positiveTimeSetting( + "node.resource.tracker.global_io_usage.polling_interval", + TimeValue.timeValueMillis(Defaults.IO_POLLING_INTERVAL_IN_MILLIS), + Setting.Property.NodeScope + ); + public static final Setting<TimeValue> GLOBAL_IO_USAGE_AC_WINDOW_DURATION_SETTING = Setting.positiveTimeSetting( + "node.resource.tracker.global_io_usage.window_duration", + TimeValue.timeValueSeconds(Defaults.IO_WINDOW_DURATION_IN_SECONDS), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + public static final Setting<TimeValue> GLOBAL_JVM_USAGE_AC_POLLING_INTERVAL_SETTING = Setting.positiveTimeSetting( "node.resource.tracker.global_jvmmp.polling_interval", TimeValue.timeValueMillis(Defaults.POLLING_INTERVAL_IN_MILLIS), @@ -56,12 +76,16 @@ private static class Defaults { private volatile TimeValue cpuPollingInterval; private volatile TimeValue memoryWindowDuration; private volatile TimeValue memoryPollingInterval; + private volatile TimeValue ioWindowDuration; + private volatile TimeValue ioPollingInterval; public ResourceTrackerSettings(Settings settings) { this.cpuPollingInterval = GLOBAL_CPU_USAGE_AC_POLLING_INTERVAL_SETTING.get(settings); this.cpuWindowDuration = GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING.get(settings); this.memoryPollingInterval = GLOBAL_JVM_USAGE_AC_POLLING_INTERVAL_SETTING.get(settings); this.memoryWindowDuration = GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING.get(settings); + this.ioPollingInterval = GLOBAL_IO_USAGE_AC_POLLING_INTERVAL_SETTING.get(settings); + this.ioWindowDuration = GLOBAL_IO_USAGE_AC_WINDOW_DURATION_SETTING.get(settings); } public TimeValue getCpuWindowDuration() { @@ -80,6 +104,14 @@ public TimeValue getMemoryWindowDuration() { return memoryWindowDuration; } + public TimeValue getIoPollingInterval() { + return ioPollingInterval; + } + + public TimeValue getIoWindowDuration() { + return ioWindowDuration; + } + public void setCpuWindowDuration(TimeValue cpuWindowDuration) { this.cpuWindowDuration = cpuWindowDuration; } @@ -87,4 +119,8 @@ public void setCpuWindowDuration(TimeValue cpuWindowDuration) { public void setMemoryWindowDuration(TimeValue memoryWindowDuration) { this.memoryWindowDuration = memoryWindowDuration; } + + public void setIoWindowDuration(TimeValue ioWindowDuration) { + this.ioWindowDuration = ioWindowDuration; + } } diff --git a/server/src/main/java/org/opensearch/plugins/CachePlugin.java b/server/src/main/java/org/opensearch/plugins/CachePlugin.java new file mode 100644 index 0000000000000..d962ed1db14bf --- /dev/null +++ b/server/src/main/java/org/opensearch/plugins/CachePlugin.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugins; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.cache.ICache; + +import java.util.Map; + +/** + * Plugin to extend cache related classes + * + * @opensearch.experimental + */ +@ExperimentalApi +public interface CachePlugin { + + /** + * Returns a map of cacheStoreType and a factory via which objects can be created on demand. + * For example: + * If there are two implementations of this plugin, lets say A and B, each may return below which can be + * aggregated by fetching all plugins. + * + * A: Map.of(DISK, new ADiskCache.Factor(), + * ON_HEAP, new AOnHeapCache.Factor()) + * + * B: Map.of(ON_HEAP, new ADiskCache.Factor()) + * + * @return Map of cacheStoreType and an associated factory. + */ + Map<String, ICache.Factory> getCacheFactoryMap(); + + String getName(); +} diff --git a/server/src/main/java/org/opensearch/plugins/PluginInfo.java b/server/src/main/java/org/opensearch/plugins/PluginInfo.java index dc8fd6e604d72..b6030f4ded5e5 100644 --- a/server/src/main/java/org/opensearch/plugins/PluginInfo.java +++ b/server/src/main/java/org/opensearch/plugins/PluginInfo.java @@ -32,20 +32,28 @@ package org.opensearch.plugins; +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.json.JsonReadFeature; + import org.opensearch.Version; import org.opensearch.bootstrap.JarHell; import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.xcontent.json.JsonXContentParser; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.DeprecationHandler; +import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.semver.SemverRange; import java.io.IOException; import java.io.InputStream; import java.nio.file.Files; import java.nio.file.Path; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -65,11 +73,15 @@ public class PluginInfo implements Writeable, ToXContentObject { public static final String OPENSEARCH_PLUGIN_PROPERTIES = "plugin-descriptor.properties"; public static final String OPENSEARCH_PLUGIN_POLICY = "plugin-security.policy"; + private static final JsonFactory jsonFactory = new JsonFactory().configure( + JsonReadFeature.ALLOW_UNQUOTED_FIELD_NAMES.mappedFeature(), + true + ); private final String name; private final String description; private final String version; - private final Version opensearchVersion; + private final List<SemverRange> opensearchVersionRanges; private final String javaVersion; private final String classname; private final String customFolderName; @@ -99,11 +111,41 @@ public PluginInfo( String customFolderName, List<String> extendedPlugins, boolean hasNativeController + ) { + this( + name, + description, + version, + List.of(SemverRange.fromString(opensearchVersion.toString())), + javaVersion, + classname, + customFolderName, + extendedPlugins, + hasNativeController + ); + } + + public PluginInfo( + String name, + String description, + String version, + List<SemverRange> opensearchVersionRanges, + String javaVersion, + String classname, + String customFolderName, + List<String> extendedPlugins, + boolean hasNativeController ) { this.name = name; this.description = description; this.version = version; - this.opensearchVersion = opensearchVersion; + // Ensure only one range is specified (for now) + if (opensearchVersionRanges.size() != 1) { + throw new IllegalArgumentException( + "Exactly one range is allowed to be specified in dependencies for the plugin [" + name + "]" + ); + } + this.opensearchVersionRanges = opensearchVersionRanges; this.javaVersion = javaVersion; this.classname = classname; this.customFolderName = customFolderName; @@ -152,11 +194,16 @@ public PluginInfo( * @param in the stream * @throws IOException if an I/O exception occurred reading the plugin info from the stream */ + @SuppressWarnings("unchecked") public PluginInfo(final StreamInput in) throws IOException { this.name = in.readString(); this.description = in.readString(); this.version = in.readString(); - this.opensearchVersion = in.readVersion(); + if (in.getVersion().onOrAfter(Version.V_2_13_0)) { + this.opensearchVersionRanges = (List<SemverRange>) in.readGenericValue(); + } else { + this.opensearchVersionRanges = List.of(new SemverRange(in.readVersion(), SemverRange.RangeOperator.DEFAULT)); + } this.javaVersion = in.readString(); this.classname = in.readString(); this.customFolderName = in.readString(); @@ -169,7 +216,15 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeString(name); out.writeString(description); out.writeString(version); - out.writeVersion(opensearchVersion); + if (out.getVersion().onOrAfter(Version.V_2_13_0)) { + out.writeGenericValue(opensearchVersionRanges); + } else { + /* + This works for currently supported range notations (=,~) + As more notations get added, then a suitable version must be picked. + */ + out.writeVersion(opensearchVersionRanges.get(0).getRangeVersion()); + } out.writeString(javaVersion); out.writeString(classname); if (customFolderName != null) { @@ -214,10 +269,49 @@ public static PluginInfo readFromProperties(final Path path) throws IOException } final String opensearchVersionString = propsMap.remove("opensearch.version"); - if (opensearchVersionString == null) { - throw new IllegalArgumentException("property [opensearch.version] is missing for plugin [" + name + "]"); + final String dependenciesValue = propsMap.remove("dependencies"); + if (opensearchVersionString == null && dependenciesValue == null) { + throw new IllegalArgumentException( + "Either [opensearch.version] or [dependencies] property must be specified for the plugin [" + name + "]" + ); + } + if (opensearchVersionString != null && dependenciesValue != null) { + throw new IllegalArgumentException( + "Only one of [opensearch.version] or [dependencies] property can be specified for the plugin [" + name + "]" + ); + } + + final List<SemverRange> opensearchVersionRanges = new ArrayList<>(); + if (opensearchVersionString != null) { + opensearchVersionRanges.add(SemverRange.fromString(opensearchVersionString)); + } else { + Map<String, String> dependenciesMap; + try ( + final JsonXContentParser parser = new JsonXContentParser( + NamedXContentRegistry.EMPTY, + DeprecationHandler.IGNORE_DEPRECATIONS, + jsonFactory.createParser(dependenciesValue) + ) + ) { + dependenciesMap = parser.mapStrings(); + } + if (dependenciesMap.size() != 1) { + throw new IllegalArgumentException( + "Exactly one dependency is allowed to be specified in plugin descriptor properties: " + dependenciesMap + ); + } + if (dependenciesMap.keySet().stream().noneMatch(s -> s.equals("opensearch"))) { + throw new IllegalArgumentException("Only opensearch is allowed to be specified as a plugin dependency: " + dependenciesMap); + } + String[] ranges = dependenciesMap.get("opensearch").split(","); + if (ranges.length != 1) { + throw new IllegalArgumentException( + "Exactly one range is allowed to be specified in dependencies for the plugin [\" + name + \"]" + ); + } + opensearchVersionRanges.add(SemverRange.fromString(ranges[0].trim())); } - final Version opensearchVersion = Version.fromString(opensearchVersionString); + final String javaVersionString = propsMap.remove("java.version"); if (javaVersionString == null) { throw new IllegalArgumentException("property [java.version] is missing for plugin [" + name + "]"); @@ -273,7 +367,7 @@ public static PluginInfo readFromProperties(final Path path) throws IOException name, description, version, - opensearchVersion, + opensearchVersionRanges, javaVersionString, classname, customFolderName, @@ -337,12 +431,26 @@ public String getVersion() { } /** - * The version of OpenSearch the plugin was built for. + * The list of OpenSearch version ranges the plugin is compatible with. * - * @return an OpenSearch version + * @return a list of OpenSearch version ranges */ - public Version getOpenSearchVersion() { - return opensearchVersion; + public List<SemverRange> getOpenSearchVersionRanges() { + return opensearchVersionRanges; + } + + /** + * Pretty print the semver ranges and return the string. + * @return semver ranges string + */ + public String getOpenSearchVersionRangesString() { + if (opensearchVersionRanges == null || opensearchVersionRanges.isEmpty()) { + throw new IllegalStateException("Opensearch version ranges list cannot be empty"); + } + if (opensearchVersionRanges.size() == 1) { + return opensearchVersionRanges.get(0).toString(); + } + return opensearchVersionRanges.stream().map(Object::toString).collect(Collectors.joining(",", "[", "]")); } /** @@ -378,7 +486,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws { builder.field("name", name); builder.field("version", version); - builder.field("opensearch_version", opensearchVersion); + builder.field("opensearch_version", getOpenSearchVersionRangesString()); builder.field("java_version", javaVersion); builder.field("description", description); builder.field("classname", classname); @@ -432,7 +540,7 @@ public String toString(String prefix) { .append("\n") .append(prefix) .append("OpenSearch Version: ") - .append(opensearchVersion) + .append(getOpenSearchVersionRangesString()) .append("\n") .append(prefix) .append("Java Version: ") diff --git a/server/src/main/java/org/opensearch/plugins/PluginSecurity.java b/server/src/main/java/org/opensearch/plugins/PluginSecurity.java index e7d92016d4082..1bf8642d1112f 100644 --- a/server/src/main/java/org/opensearch/plugins/PluginSecurity.java +++ b/server/src/main/java/org/opensearch/plugins/PluginSecurity.java @@ -135,6 +135,7 @@ static String formatPermission(Permission permission) { /** * Parses plugin policy into a set of permissions. Each permission is formatted for output to users. */ + @SuppressWarnings("removal") public static Set<String> parsePermissions(Path file, Path tmpDir) throws IOException { // create a zero byte file for "comparison" // this is necessary because the default policy impl automatically grants two permissions: diff --git a/server/src/main/java/org/opensearch/plugins/PluginsService.java b/server/src/main/java/org/opensearch/plugins/PluginsService.java index cc9cc5b5b5fbf..a6eefd2f4fd17 100644 --- a/server/src/main/java/org/opensearch/plugins/PluginsService.java +++ b/server/src/main/java/org/opensearch/plugins/PluginsService.java @@ -52,6 +52,7 @@ import org.opensearch.core.common.Strings; import org.opensearch.core.service.ReportingService; import org.opensearch.index.IndexModule; +import org.opensearch.semver.SemverRange; import org.opensearch.threadpool.ExecutorBuilder; import org.opensearch.transport.TransportSettings; @@ -387,12 +388,12 @@ public static List<Path> findPluginDirs(final Path rootPath) throws IOException * Verify the given plugin is compatible with the current OpenSearch installation. */ static void verifyCompatibility(PluginInfo info) { - if (info.getOpenSearchVersion().equals(Version.CURRENT) == false) { + if (!isPluginVersionCompatible(info, Version.CURRENT)) { throw new IllegalArgumentException( "Plugin [" + info.getName() + "] was built for OpenSearch version " - + info.getOpenSearchVersion() + + info.getOpenSearchVersionRangesString() + " but version " + Version.CURRENT + " is running" @@ -401,6 +402,16 @@ static void verifyCompatibility(PluginInfo info) { JarHell.checkJavaVersion(info.getName(), info.getJavaVersion()); } + public static boolean isPluginVersionCompatible(final PluginInfo pluginInfo, final Version coreVersion) { + // Core version must satisfy the semver range in plugin info + for (SemverRange range : pluginInfo.getOpenSearchVersionRanges()) { + if (!range.isSatisfiedBy(coreVersion)) { + return false; + } + } + return true; + } + static void checkForFailedPluginRemovals(final Path pluginsDirectory) throws IOException { /* * Check for the existence of a marker file that indicates any plugins are in a garbage state from a failed attempt to remove the @@ -682,6 +693,7 @@ static void checkBundleJarHell(Set<URL> classpath, Bundle bundle, Map<String, Se } } + @SuppressWarnings("removal") private Plugin loadBundle(Bundle bundle, Map<String, Plugin> loaded) { String name = bundle.plugin.getName(); diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlService.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlService.java index adca6992833bd..5b842ff0d3399 100644 --- a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlService.java +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlService.java @@ -10,11 +10,13 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.Constants; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Settings; import org.opensearch.node.ResourceUsageCollectorService; import org.opensearch.ratelimitting.admissioncontrol.controllers.AdmissionController; import org.opensearch.ratelimitting.admissioncontrol.controllers.CpuBasedAdmissionController; +import org.opensearch.ratelimitting.admissioncontrol.controllers.IoBasedAdmissionController; import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.ratelimitting.admissioncontrol.stats.AdmissionControlStats; import org.opensearch.ratelimitting.admissioncontrol.stats.AdmissionControllerStats; @@ -26,6 +28,7 @@ import java.util.concurrent.ConcurrentMap; import static org.opensearch.ratelimitting.admissioncontrol.controllers.CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER; +import static org.opensearch.ratelimitting.admissioncontrol.controllers.IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER; /** * Admission control Service that bootstraps and manages all the Admission Controllers in OpenSearch. @@ -58,15 +61,18 @@ public AdmissionControlService( this.clusterService = clusterService; this.settings = settings; this.resourceUsageCollectorService = resourceUsageCollectorService; - this.initialise(); + this.initialize(); } /** * Initialise and Register all the admissionControllers */ - private void initialise() { + private void initialize() { // Initialise different type of admission controllers registerAdmissionController(CPU_BASED_ADMISSION_CONTROLLER); + if (Constants.LINUX) { + registerAdmissionController(IO_BASED_ADMISSION_CONTROLLER); + } } /** @@ -101,6 +107,13 @@ private AdmissionController controllerFactory(String admissionControllerName) { this.clusterService, this.settings ); + case IO_BASED_ADMISSION_CONTROLLER: + return new IoBasedAdmissionController( + admissionControllerName, + this.resourceUsageCollectorService, + this.clusterService, + this.settings + ); default: throw new IllegalArgumentException("Not Supported AdmissionController : " + admissionControllerName); } @@ -128,7 +141,7 @@ public AdmissionController getAdmissionController(String controllerName) { */ public AdmissionControlStats stats() { List<AdmissionControllerStats> statsList = new ArrayList<>(); - if (this.admissionControllers.size() > 0) { + if (!this.admissionControllers.isEmpty()) { this.admissionControllers.forEach((controllerName, admissionController) -> { AdmissionControllerStats admissionControllerStats = new AdmissionControllerStats(admissionController); statsList.add(admissionControllerStats); diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/AdmissionController.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/AdmissionController.java index 2246ce34dd399..f5bb5fa660e7f 100644 --- a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/AdmissionController.java +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/AdmissionController.java @@ -24,7 +24,6 @@ * and admission control can be applied if configured limit has been reached */ public abstract class AdmissionController { - private final String admissionControllerName; final ResourceUsageCollectorService resourceUsageCollectorService; public final Map<String, AtomicLong> rejectionCountMap; diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/IoBasedAdmissionController.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/IoBasedAdmissionController.java new file mode 100644 index 0000000000000..ad6cc3ff378f0 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/IoBasedAdmissionController.java @@ -0,0 +1,126 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.controllers; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.node.NodeResourceUsageStats; +import org.opensearch.node.ResourceUsageCollectorService; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; +import org.opensearch.ratelimitting.admissioncontrol.settings.IoBasedAdmissionControllerSettings; + +import java.util.Locale; +import java.util.Optional; + +/** + * Class for IO Based Admission Controller in OpenSearch, which aims to provide IO utilisation admission control. + * It provides methods to apply admission control if configured limit has been reached + */ +public class IoBasedAdmissionController extends AdmissionController { + public static final String IO_BASED_ADMISSION_CONTROLLER = "global_io_usage"; + private static final Logger LOGGER = LogManager.getLogger(IoBasedAdmissionController.class); + public IoBasedAdmissionControllerSettings settings; + + /** + * @param admissionControllerName name of the admissionController + * @param resourceUsageCollectorService instance used to get resource usage stats of the node + * @param clusterService instance of the clusterService + */ + public IoBasedAdmissionController( + String admissionControllerName, + ResourceUsageCollectorService resourceUsageCollectorService, + ClusterService clusterService, + Settings settings + ) { + super(admissionControllerName, resourceUsageCollectorService, clusterService); + this.settings = new IoBasedAdmissionControllerSettings(clusterService.getClusterSettings(), settings); + } + + /** + * Apply admission control based on the resource usage for an action + * + * @param action is the transport action + * @param admissionControlActionType type of admissionControlActionType + */ + @Override + public void apply(String action, AdmissionControlActionType admissionControlActionType) { + if (this.isEnabledForTransportLayer(this.settings.getTransportLayerAdmissionControllerMode())) { + this.applyForTransportLayer(action, admissionControlActionType); + } + } + + /** + * Apply transport layer admission control if configured limit has been reached + */ + private void applyForTransportLayer(String actionName, AdmissionControlActionType admissionControlActionType) { + if (isLimitsBreached(actionName, admissionControlActionType)) { + this.addRejectionCount(admissionControlActionType.getType(), 1); + if (this.isAdmissionControllerEnforced(this.settings.getTransportLayerAdmissionControllerMode())) { + throw new OpenSearchRejectedExecutionException( + String.format( + Locale.ROOT, + "Io usage admission controller rejected the request for action [%s] as IO limit reached", + admissionControlActionType.name() + ) + ); + } + } + } + + /** + * Check if the configured resource usage limits are breached for the action + */ + private boolean isLimitsBreached(String actionName, AdmissionControlActionType admissionControlActionType) { + // check if cluster state is ready + if (clusterService.state() != null && clusterService.state().nodes() != null) { + long ioUsageThreshold = this.getIoRejectionThreshold(admissionControlActionType); + Optional<NodeResourceUsageStats> nodePerformanceStatistics = this.resourceUsageCollectorService.getNodeStatistics( + this.clusterService.state().nodes().getLocalNodeId() + ); + if (nodePerformanceStatistics.isPresent()) { + double ioUsage = nodePerformanceStatistics.get().getIoUsageStats().getIoUtilisationPercent(); + if (ioUsage >= ioUsageThreshold) { + LOGGER.warn( + "IoBasedAdmissionController limit reached as the current IO " + + "usage [{}] exceeds the allowed limit [{}] for transport action [{}] in admissionControlMode [{}]", + ioUsage, + ioUsageThreshold, + actionName, + this.settings.getTransportLayerAdmissionControllerMode() + ); + return true; + } + } + } + return false; + } + + /** + * Get IO rejection threshold based on action type + */ + private long getIoRejectionThreshold(AdmissionControlActionType admissionControlActionType) { + switch (admissionControlActionType) { + case SEARCH: + return this.settings.getSearchIOUsageLimit(); + case INDEXING: + return this.settings.getIndexingIOUsageLimit(); + default: + throw new IllegalArgumentException( + String.format( + Locale.ROOT, + "Admission control not Supported for AdmissionControlActionType: %s", + admissionControlActionType.getType() + ) + ); + } + } +} diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/IoBasedAdmissionControllerSettings.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/IoBasedAdmissionControllerSettings.java new file mode 100644 index 0000000000000..e58ed28d21605 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/IoBasedAdmissionControllerSettings.java @@ -0,0 +1,98 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.settings; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlSettings; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; + +/** + * Settings related to IO based admission controller. + * @opensearch.internal + */ +public class IoBasedAdmissionControllerSettings { + + /** + * Default parameters for the IoBasedAdmissionControllerSettings + */ + public static class Defaults { + public static final long IO_USAGE_LIMIT = 95; + } + + private AdmissionControlMode transportLayerMode; + private Long searchIOUsageLimit; + private Long indexingIOUsageLimit; + + /** + * Feature level setting to operate in shadow-mode or in enforced-mode. If enforced field is set + * rejection will be performed, otherwise only rejection metrics will be populated. + */ + public static final Setting<AdmissionControlMode> IO_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE = new Setting<>( + "admission_control.transport.io_usage.mode_override", + AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE, + AdmissionControlMode::fromName, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * This setting used to set the IO Limits for the search requests by default it will use default IO usage limit + */ + public static final Setting<Long> SEARCH_IO_USAGE_LIMIT = Setting.longSetting( + "admission_control.search.io_usage.limit", + Defaults.IO_USAGE_LIMIT, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * This setting used to set the IO limits for the indexing requests by default it will use default IO usage limit + */ + public static final Setting<Long> INDEXING_IO_USAGE_LIMIT = Setting.longSetting( + "admission_control.indexing.io_usage.limit", + Defaults.IO_USAGE_LIMIT, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public IoBasedAdmissionControllerSettings(ClusterSettings clusterSettings, Settings settings) { + this.transportLayerMode = IO_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.get(settings); + clusterSettings.addSettingsUpdateConsumer(IO_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE, this::setTransportLayerMode); + this.searchIOUsageLimit = SEARCH_IO_USAGE_LIMIT.get(settings); + this.indexingIOUsageLimit = INDEXING_IO_USAGE_LIMIT.get(settings); + clusterSettings.addSettingsUpdateConsumer(INDEXING_IO_USAGE_LIMIT, this::setIndexingIOUsageLimit); + clusterSettings.addSettingsUpdateConsumer(SEARCH_IO_USAGE_LIMIT, this::setSearchIOUsageLimit); + } + + public void setIndexingIOUsageLimit(Long indexingIOUsageLimit) { + this.indexingIOUsageLimit = indexingIOUsageLimit; + } + + public void setSearchIOUsageLimit(Long searchIOUsageLimit) { + this.searchIOUsageLimit = searchIOUsageLimit; + } + + public AdmissionControlMode getTransportLayerAdmissionControllerMode() { + return transportLayerMode; + } + + public void setTransportLayerMode(AdmissionControlMode transportLayerMode) { + this.transportLayerMode = transportLayerMode; + } + + public Long getIndexingIOUsageLimit() { + return indexingIOUsageLimit; + } + + public Long getSearchIOUsageLimit() { + return searchIOUsageLimit; + } +} diff --git a/server/src/main/java/org/opensearch/repositories/RepositoriesModule.java b/server/src/main/java/org/opensearch/repositories/RepositoriesModule.java index cc4d3c006d84c..afb6e530b0eec 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoriesModule.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoriesModule.java @@ -39,6 +39,7 @@ import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.plugins.RepositoryPlugin; import org.opensearch.repositories.fs.FsRepository; +import org.opensearch.repositories.fs.ReloadableFsRepository; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -71,6 +72,11 @@ public RepositoriesModule( metadata -> new FsRepository(metadata, env, namedXContentRegistry, clusterService, recoverySettings) ); + factories.put( + ReloadableFsRepository.TYPE, + metadata -> new ReloadableFsRepository(metadata, env, namedXContentRegistry, clusterService, recoverySettings) + ); + for (RepositoryPlugin repoPlugin : repoPlugins) { Map<String, Repository.Factory> newRepoTypes = repoPlugin.getRepositories( env, diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index 8a2260e1f6d90..076173177feee 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -117,6 +117,7 @@ import org.opensearch.index.snapshots.blobstore.RemoteStoreShardShallowCopySnapshot; import org.opensearch.index.snapshots.blobstore.SlicedInputStream; import org.opensearch.index.snapshots.blobstore.SnapshotFiles; +import org.opensearch.index.store.RemoteSegmentStoreDirectory; import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; @@ -236,6 +237,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp Setting.Property.Deprecated ); + private static final Logger staticLogger = LogManager.getLogger(BlobStoreRepository.class); + /** * Setting to disable caching of the latest repository data. */ @@ -1098,6 +1101,79 @@ private void asyncCleanupUnlinkedShardLevelBlobs( } } + public static void remoteDirectoryCleanupAsync( + RemoteSegmentStoreDirectoryFactory remoteDirectoryFactory, + ThreadPool threadpool, + String remoteStoreRepoForIndex, + String indexUUID, + ShardId shardId, + String threadPoolName + ) { + threadpool.executor(threadPoolName) + .execute( + new RemoteStoreShardCleanupTask( + () -> RemoteSegmentStoreDirectory.remoteDirectoryCleanup( + remoteDirectoryFactory, + remoteStoreRepoForIndex, + indexUUID, + shardId + ), + indexUUID, + shardId + ) + ); + } + + protected void releaseRemoteStoreLockAndCleanup( + String shardId, + String shallowSnapshotUUID, + BlobContainer shardContainer, + RemoteStoreLockManagerFactory remoteStoreLockManagerFactory + ) throws IOException { + if (remoteStoreLockManagerFactory == null) { + return; + } + + RemoteStoreShardShallowCopySnapshot remoteStoreShardShallowCopySnapshot = REMOTE_STORE_SHARD_SHALLOW_COPY_SNAPSHOT_FORMAT.read( + shardContainer, + shallowSnapshotUUID, + namedXContentRegistry + ); + String indexUUID = remoteStoreShardShallowCopySnapshot.getIndexUUID(); + String remoteStoreRepoForIndex = remoteStoreShardShallowCopySnapshot.getRemoteStoreRepository(); + // Releasing lock file before deleting the shallow-snap-UUID file because in case of any failure while + // releasing the lock file, we would still have the shallow-snap-UUID file and that would be used during + // next delete operation for releasing this lock file + RemoteStoreLockManager remoteStoreMetadataLockManager = remoteStoreLockManagerFactory.newLockManager( + remoteStoreRepoForIndex, + indexUUID, + shardId + ); + remoteStoreMetadataLockManager.release(FileLockInfo.getLockInfoBuilder().withAcquirerId(shallowSnapshotUUID).build()); + logger.debug("Successfully released lock for shard {} of index with uuid {}", shardId, indexUUID); + if (!isIndexPresent(clusterService, indexUUID)) { + // Note: this is a temporary solution where snapshot deletion triggers remote store side cleanup if + // index is already deleted. this is the best effort at the moment since shard cleanup will still happen + // asynchronously using REMOTE_PURGE thread pool. if it fails, it could leave some stale files in remote + // directory. this issue could even happen in cases of shard level remote store data cleanup which also + // happens asynchronously. in long term, we have plans to implement remote store GC poller mechanism which + // will take care of such stale data. + // related issue: https://github.com/opensearch-project/OpenSearch/issues/8469 + RemoteSegmentStoreDirectoryFactory remoteDirectoryFactory = new RemoteSegmentStoreDirectoryFactory( + remoteStoreLockManagerFactory.getRepositoriesService(), + threadPool + ); + remoteDirectoryCleanupAsync( + remoteDirectoryFactory, + threadPool, + remoteStoreRepoForIndex, + indexUUID, + new ShardId(Index.UNKNOWN_INDEX_NAME, indexUUID, Integer.parseInt(shardId)), + ThreadPool.Names.REMOTE_PURGE + ); + } + } + // When remoteStoreLockManagerFactory is non-null, while deleting the files, lock files are also released before deletion of respective // shallow-snap-UUID files. And if it is null, we just delete the stale shard blobs. private void executeStaleShardDelete( @@ -1109,56 +1185,34 @@ private void executeStaleShardDelete( if (filesToDelete != null) { threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(ActionRunnable.wrap(listener, l -> { try { - if (remoteStoreLockManagerFactory != null) { - for (String fileToDelete : filesToDelete) { - if (fileToDelete.contains(SHALLOW_SNAPSHOT_PREFIX)) { - String[] fileToDeletePath = fileToDelete.split("/"); - String indexId = fileToDeletePath[1]; - String shardId = fileToDeletePath[2]; - String shallowSnapBlob = fileToDeletePath[3]; - String snapshotUUID = shallowSnapBlob.substring( - SHALLOW_SNAPSHOT_PREFIX.length(), - shallowSnapBlob.length() - ".dat".length() - ); - BlobContainer shardContainer = blobStore().blobContainer(indicesPath().add(indexId).add(shardId)); - RemoteStoreShardShallowCopySnapshot remoteStoreShardShallowCopySnapshot = - REMOTE_STORE_SHARD_SHALLOW_COPY_SNAPSHOT_FORMAT.read( - shardContainer, - snapshotUUID, - namedXContentRegistry - ); - String indexUUID = remoteStoreShardShallowCopySnapshot.getIndexUUID(); - String remoteStoreRepoForIndex = remoteStoreShardShallowCopySnapshot.getRemoteStoreRepository(); - // Releasing lock file before deleting the shallow-snap-UUID file because in case of any failure while - // releasing the lock file, we would still have the shallow-snap-UUID file and that would be used during - // next delete operation for releasing this lock file - RemoteStoreLockManager remoteStoreMetadataLockManager = remoteStoreLockManagerFactory.newLockManager( - remoteStoreRepoForIndex, - indexUUID, - shardId - ); - remoteStoreMetadataLockManager.release( - FileLockInfo.getLockInfoBuilder().withAcquirerId(snapshotUUID).build() + // filtering files for which remote store lock release and cleanup succeeded, + // remaining files for which it failed will be retried in next snapshot delete run. + List<String> eligibleFilesToDelete = new ArrayList<>(); + for (String fileToDelete : filesToDelete) { + if (fileToDelete.contains(SHALLOW_SNAPSHOT_PREFIX)) { + String[] fileToDeletePath = fileToDelete.split("/"); + String indexId = fileToDeletePath[1]; + String shardId = fileToDeletePath[2]; + String shallowSnapBlob = fileToDeletePath[3]; + String snapshotUUID = extractShallowSnapshotUUID(shallowSnapBlob).orElseThrow(); + BlobContainer shardContainer = blobStore().blobContainer(indicesPath().add(indexId).add(shardId)); + try { + releaseRemoteStoreLockAndCleanup(shardId, snapshotUUID, shardContainer, remoteStoreLockManagerFactory); + eligibleFilesToDelete.add(fileToDelete); + } catch (Exception e) { + logger.error( + "Failed to release lock or cleanup shard for indexID {}, shardID {} " + "and snapshot {}", + indexId, + shardId, + snapshotUUID ); - if (!isIndexPresent(clusterService, indexUUID)) { - // this is a temporary solution where snapshot deletion triggers remote store side - // cleanup if index is already deleted. We will add a poller in future to take - // care of remote store side cleanup. - // see https://github.com/opensearch-project/OpenSearch/issues/8469 - new RemoteSegmentStoreDirectoryFactory( - remoteStoreLockManagerFactory.getRepositoriesService(), - threadPool - ).newDirectory( - remoteStoreRepoForIndex, - indexUUID, - new ShardId(Index.UNKNOWN_INDEX_NAME, indexUUID, Integer.valueOf(shardId)) - ).close(); - } } + } else { + eligibleFilesToDelete.add(fileToDelete); } } // Deleting the shard blobs - deleteFromContainer(blobContainer(), filesToDelete); + deleteFromContainer(blobContainer(), eligibleFilesToDelete); l.onResponse(null); } catch (Exception e) { logger.warn( @@ -1586,45 +1640,17 @@ private void executeOneStaleIndexDelete( try { logger.debug("[{}] Found stale index [{}]. Cleaning it up", metadata.name(), indexSnId); if (remoteStoreLockManagerFactory != null) { - Map<String, BlobContainer> shardBlobs = indexEntry.getValue().children(); - if (!shardBlobs.isEmpty()) { - for (Map.Entry<String, BlobContainer> shardBlob : shardBlobs.entrySet()) { - Map<String, BlobMetadata> shardLevelBlobs = shardBlob.getValue().listBlobs(); - for (Map.Entry<String, BlobMetadata> shardLevelBlob : shardLevelBlobs.entrySet()) { - String blob = shardLevelBlob.getKey(); - String snapshotUUID = blob.substring(SHALLOW_SNAPSHOT_PREFIX.length(), blob.length() - ".dat".length()); - if (blob.startsWith(SHALLOW_SNAPSHOT_PREFIX) && blob.endsWith(".dat")) { - RemoteStoreShardShallowCopySnapshot remoteStoreShardShallowCopySnapshot = - REMOTE_STORE_SHARD_SHALLOW_COPY_SNAPSHOT_FORMAT.read( - shardBlob.getValue(), - snapshotUUID, - namedXContentRegistry - ); - String indexUUID = remoteStoreShardShallowCopySnapshot.getIndexUUID(); - String remoteStoreRepoForIndex = remoteStoreShardShallowCopySnapshot.getRemoteStoreRepository(); - // Releasing lock files before deleting the shallow-snap-UUID file because in case of any failure - // while releasing the lock file, we would still have the corresponding shallow-snap-UUID file - // and that would be used during next delete operation for releasing this stale lock file - RemoteStoreLockManager remoteStoreMetadataLockManager = remoteStoreLockManagerFactory - .newLockManager(remoteStoreRepoForIndex, indexUUID, shardBlob.getKey()); - remoteStoreMetadataLockManager.release( - FileLockInfo.getLockInfoBuilder().withAcquirerId(snapshotUUID).build() - ); - if (!isIndexPresent(clusterService, indexUUID)) { - // this is a temporary solution where snapshot deletion triggers remote store side - // cleanup if index is already deleted. We will add a poller in future to take - // care of remote store side cleanup. - // see https://github.com/opensearch-project/OpenSearch/issues/8469 - new RemoteSegmentStoreDirectoryFactory( - remoteStoreLockManagerFactory.getRepositoriesService(), - threadPool - ).newDirectory( - remoteStoreRepoForIndex, - indexUUID, - new ShardId(Index.UNKNOWN_INDEX_NAME, indexUUID, Integer.valueOf(shardBlob.getKey())) - ).close(); - } - } + final Map<String, BlobContainer> shardBlobs = indexEntry.getValue().children(); + for (Map.Entry<String, BlobContainer> shardBlob : shardBlobs.entrySet()) { + for (String blob : shardBlob.getValue().listBlobs().keySet()) { + final Optional<String> snapshotUUID = extractShallowSnapshotUUID(blob); + if (snapshotUUID.isPresent()) { + releaseRemoteStoreLockAndCleanup( + shardBlob.getKey(), + snapshotUUID.get(), + shardBlob.getValue(), + remoteStoreLockManagerFactory + ); } } } @@ -3362,12 +3388,8 @@ private static List<String> unusedBlobs( blob.substring(SNAPSHOT_PREFIX.length(), blob.length() - ".dat".length()) ) == false) || (remoteStoreLockManagerFactory != null - ? (blob.startsWith(SHALLOW_SNAPSHOT_PREFIX) - && blob.endsWith(".dat") - && survivingSnapshotUUIDs.contains( - blob.substring(SHALLOW_SNAPSHOT_PREFIX.length(), blob.length() - ".dat".length()) - ) == false) - : false) + && extractShallowSnapshotUUID(blob).map(snapshotUUID -> !survivingSnapshotUUIDs.contains(snapshotUUID)) + .orElse(false)) || (blob.startsWith(UPLOADED_DATA_BLOB_PREFIX) && updatedSnapshots.findNameFile(canonicalName(blob)) == null) || FsBlobContainer.isTempBlobName(blob) ) @@ -3509,6 +3531,13 @@ private static void failStoreIfCorrupted(Store store, Exception e) { } } + private static Optional<String> extractShallowSnapshotUUID(String blobName) { + if (blobName.startsWith(SHALLOW_SNAPSHOT_PREFIX)) { + return Optional.of(blobName.substring(SHALLOW_SNAPSHOT_PREFIX.length(), blobName.length() - ".dat".length())); + } + return Optional.empty(); + } + /** * The result of removing a snapshot from a shard folder in the repository. */ diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/RemoteStoreShardCleanupTask.java b/server/src/main/java/org/opensearch/repositories/blobstore/RemoteStoreShardCleanupTask.java new file mode 100644 index 0000000000000..b6b8957d1bd19 --- /dev/null +++ b/server/src/main/java/org/opensearch/repositories/blobstore/RemoteStoreShardCleanupTask.java @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.repositories.blobstore; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.core.index.shard.ShardId; + +import java.util.Set; + +import static org.opensearch.common.util.concurrent.ConcurrentCollections.newConcurrentSet; + +/** + A Runnable wrapper to make sure that for a given shard only one cleanup task runs at a time. + */ +public class RemoteStoreShardCleanupTask implements Runnable { + private final Runnable task; + private final String shardIdentifier; + final static Set<String> ongoingRemoteDirectoryCleanups = newConcurrentSet(); + private static final Logger staticLogger = LogManager.getLogger(RemoteStoreShardCleanupTask.class); + + public RemoteStoreShardCleanupTask(Runnable task, String indexUUID, ShardId shardId) { + this.task = task; + this.shardIdentifier = indexShardIdentifier(indexUUID, shardId); + } + + private static String indexShardIdentifier(String indexUUID, ShardId shardId) { + return String.join("/", indexUUID, String.valueOf(shardId.id())); + } + + @Override + public void run() { + // If there is already a same task ongoing for a shard, we need to skip the new task to avoid multiple + // concurrent cleanup of same shard. + if (ongoingRemoteDirectoryCleanups.add(shardIdentifier)) { + try { + task.run(); + } finally { + ongoingRemoteDirectoryCleanups.remove(shardIdentifier); + } + } else { + staticLogger.warn("one cleanup task for shard {} is already ongoing, need to skip this task", shardIdentifier); + } + } +} diff --git a/server/src/main/java/org/opensearch/repositories/fs/ReloadableFsRepository.java b/server/src/main/java/org/opensearch/repositories/fs/ReloadableFsRepository.java index c06c805a39396..e8020a432a58a 100644 --- a/server/src/main/java/org/opensearch/repositories/fs/ReloadableFsRepository.java +++ b/server/src/main/java/org/opensearch/repositories/fs/ReloadableFsRepository.java @@ -8,18 +8,52 @@ package org.opensearch.repositories.fs; +import org.opensearch.OpenSearchException; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Randomness; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.common.blobstore.fs.FsBlobContainer; +import org.opensearch.common.blobstore.fs.FsBlobStore; +import org.opensearch.common.settings.Setting; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; import org.opensearch.indices.recovery.RecoverySettings; +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Path; +import java.util.Random; + /** - * Extension of {@link FsRepository} that can be reloaded inplace + * Extension of {@link FsRepository} that can be reloaded inplace , supports failing operation and slowing it down * * @opensearch.internal */ public class ReloadableFsRepository extends FsRepository { + public static final String TYPE = "reloadable-fs"; + + private final FailSwitch fail; + private final SlowDownWriteSwitch slowDown; + + public static final Setting<Integer> REPOSITORIES_FAILRATE_SETTING = Setting.intSetting( + "repositories.fail.rate", + 0, + 0, + 100, + Setting.Property.NodeScope + ); + + public static final Setting<Integer> REPOSITORIES_SLOWDOWN_SETTING = Setting.intSetting( + "repositories.slowdown", + 0, + 0, + 100, + Setting.Property.NodeScope + ); + /** * Constructs a shared file system repository that is reloadable in-place. */ @@ -31,6 +65,11 @@ public ReloadableFsRepository( RecoverySettings recoverySettings ) { super(metadata, environment, namedXContentRegistry, clusterService, recoverySettings); + fail = new FailSwitch(); + fail.failRate(REPOSITORIES_FAILRATE_SETTING.get(metadata.settings())); + slowDown = new SlowDownWriteSwitch(); + slowDown.setSleepSeconds(REPOSITORIES_SLOWDOWN_SETTING.get(metadata.settings())); + readRepositoryMetadata(); } @Override @@ -40,12 +79,124 @@ public boolean isReloadable() { @Override public void reload(RepositoryMetadata repositoryMetadata) { - if (isReloadable() == false) { - return; - } - super.reload(repositoryMetadata); + readRepositoryMetadata(); validateLocation(); readMetadata(); } + + private void readRepositoryMetadata() { + fail.failRate(REPOSITORIES_FAILRATE_SETTING.get(metadata.settings())); + slowDown.setSleepSeconds(REPOSITORIES_SLOWDOWN_SETTING.get(metadata.settings())); + } + + protected BlobStore createBlobStore() throws Exception { + final String location = REPOSITORIES_LOCATION_SETTING.get(getMetadata().settings()); + final Path locationFile = environment.resolveRepoFile(location); + return new ThrowingBlobStore(bufferSize, locationFile, isReadOnly(), fail, slowDown); + } + + // A random integer from min-max (inclusive). + public static int randomIntBetween(int min, int max) { + Random random = Randomness.get(); + return random.nextInt(max - min + 1) + min; + } + + static class FailSwitch { + private volatile int failRate; + private volatile boolean onceFailedFailAlways = false; + + public boolean fail() { + final int rnd = randomIntBetween(1, 100); + boolean fail = rnd <= failRate; + if (fail && onceFailedFailAlways) { + failAlways(); + } + return fail; + } + + public void failAlways() { + failRate = 100; + } + + public void failRate(int rate) { + failRate = rate; + } + + public void onceFailedFailAlways() { + onceFailedFailAlways = true; + } + } + + static class SlowDownWriteSwitch { + private volatile int sleepSeconds; + + public void setSleepSeconds(int sleepSeconds) { + this.sleepSeconds = sleepSeconds; + } + + public int getSleepSeconds() { + return sleepSeconds; + } + } + + private static class ThrowingBlobStore extends FsBlobStore { + + private final FailSwitch fail; + private final SlowDownWriteSwitch slowDown; + + public ThrowingBlobStore(int bufferSizeInBytes, Path path, boolean readonly, FailSwitch fail, SlowDownWriteSwitch slowDown) + throws IOException { + super(bufferSizeInBytes, path, readonly); + this.fail = fail; + this.slowDown = slowDown; + } + + @Override + public BlobContainer blobContainer(BlobPath path) { + try { + return new ThrowingBlobContainer(this, path, buildAndCreate(path), fail, slowDown); + } catch (IOException ex) { + throw new OpenSearchException("failed to create blob container", ex); + } + } + } + + private static class ThrowingBlobContainer extends FsBlobContainer { + + private final FailSwitch fail; + private final SlowDownWriteSwitch slowDown; + + public ThrowingBlobContainer(FsBlobStore blobStore, BlobPath blobPath, Path path, FailSwitch fail, SlowDownWriteSwitch slowDown) { + super(blobStore, blobPath, path); + this.fail = fail; + this.slowDown = slowDown; + } + + @Override + public void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize, boolean failIfAlreadyExists) + throws IOException { + checkFailRateAndSleep(blobName); + super.writeBlobAtomic(blobName, inputStream, blobSize, failIfAlreadyExists); + } + + private void checkFailRateAndSleep(String blobName) throws IOException { + if (fail.fail() && blobName.contains(".dat") == false) { + throw new IOException("blob container throwing error"); + } + if (slowDown.getSleepSeconds() > 0) { + try { + Thread.sleep(slowDown.getSleepSeconds() * 1000L); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + } + + @Override + public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException { + checkFailRateAndSleep(blobName); + super.writeBlob(blobName, inputStream, blobSize, failIfAlreadyExists); + } + } } diff --git a/server/src/main/java/org/opensearch/rest/MethodHandlers.java b/server/src/main/java/org/opensearch/rest/MethodHandlers.java index 8c29bf2e66036..30221705e1aba 100644 --- a/server/src/main/java/org/opensearch/rest/MethodHandlers.java +++ b/server/src/main/java/org/opensearch/rest/MethodHandlers.java @@ -6,82 +6,24 @@ * compatible open source license. */ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - package org.opensearch.rest; -import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; -import java.util.HashMap; -import java.util.Map; import java.util.Set; /** - * Encapsulate multiple handlers for the same path, allowing different handlers for different HTTP verbs. - * - * @opensearch.api + * A collection of REST method handlers. */ -final class MethodHandlers { - - private final String path; - private final Map<RestRequest.Method, RestHandler> methodHandlers; - - MethodHandlers(String path, RestHandler handler, RestRequest.Method... methods) { - this.path = path; - this.methodHandlers = new HashMap<>(methods.length); - for (RestRequest.Method method : methods) { - methodHandlers.put(method, handler); - } - } - - /** - * Add a handler for an additional array of methods. Note that {@code MethodHandlers} - * does not allow replacing the handler for an already existing method. - */ - MethodHandlers addMethods(RestHandler handler, RestRequest.Method... methods) { - for (RestRequest.Method method : methods) { - RestHandler existing = methodHandlers.putIfAbsent(method, handler); - if (existing != null) { - throw new IllegalArgumentException("Cannot replace existing handler for [" + path + "] for method: " + method); - } - } - return this; - } - +@PublicApi(since = "2.12.0") +public interface MethodHandlers { /** - * Returns the handler for the given method or {@code null} if none exists. + * Return a set of all valid HTTP methods for the particular path. */ - @Nullable - RestHandler getHandler(RestRequest.Method method) { - return methodHandlers.get(method); - } + Set<RestRequest.Method> getValidMethods(); /** - * Return a set of all valid HTTP methods for the particular path + * Returns the relative HTTP path of the set of method handlers. */ - Set<RestRequest.Method> getValidMethods() { - return methodHandlers.keySet(); - } + String getPath(); } diff --git a/server/src/main/java/org/opensearch/rest/RestController.java b/server/src/main/java/org/opensearch/rest/RestController.java index cc48b59699a17..95abb9b3daeca 100644 --- a/server/src/main/java/org/opensearch/rest/RestController.java +++ b/server/src/main/java/org/opensearch/rest/RestController.java @@ -65,6 +65,7 @@ import java.io.IOException; import java.io.InputStream; import java.net.URI; +import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -107,7 +108,7 @@ public class RestController implements HttpServerTransport.Dispatcher { } } - private final PathTrie<MethodHandlers> handlers = new PathTrie<>(RestUtils.REST_DECODER); + private final PathTrie<RestMethodHandlers> handlers = new PathTrie<>(RestUtils.REST_DECODER); private final UnaryOperator<RestHandler> handlerWrapper; @@ -144,6 +145,16 @@ public RestController( ); } + /** + * Returns an iterator over registered REST method handlers. + * @return {@link Iterator} of {@link MethodHandlers} + */ + public Iterator<MethodHandlers> getAllHandlers() { + List<MethodHandlers> methodHandlers = new ArrayList<>(); + handlers.retrieveAll().forEachRemaining(methodHandlers::add); + return methodHandlers.iterator(); + } + /** * Registers a REST handler to be executed when the provided {@code method} and {@code path} match the request. * @@ -221,7 +232,7 @@ protected void registerHandler(RestRequest.Method method, String path, RestHandl private void registerHandlerNoWrap(RestRequest.Method method, String path, RestHandler maybeWrappedHandler) { handlers.insertOrUpdate( path, - new MethodHandlers(path, maybeWrappedHandler, method), + new RestMethodHandlers(path, maybeWrappedHandler, method), (mHandlers, newMHandler) -> mHandlers.addMethods(maybeWrappedHandler, method) ); } @@ -392,10 +403,10 @@ private void tryAllHandlers(final RestRequest request, final RestChannel channel // Resolves the HTTP method and fails if the method is invalid requestMethod = request.method(); // Loop through all possible handlers, attempting to dispatch the request - Iterator<MethodHandlers> allHandlers = getAllHandlers(request.params(), rawPath); + Iterator<RestMethodHandlers> allHandlers = getAllRestMethodHandlers(request.params(), rawPath); while (allHandlers.hasNext()) { final RestHandler handler; - final MethodHandlers handlers = allHandlers.next(); + final RestMethodHandlers handlers = allHandlers.next(); if (handlers == null) { handler = null; } else { @@ -423,7 +434,7 @@ private void tryAllHandlers(final RestRequest request, final RestChannel channel handleBadRequest(uri, requestMethod, channel); } - Iterator<MethodHandlers> getAllHandlers(@Nullable Map<String, String> requestParamsRef, String rawPath) { + Iterator<RestMethodHandlers> getAllRestMethodHandlers(@Nullable Map<String, String> requestParamsRef, String rawPath) { final Supplier<Map<String, String>> paramsSupplier; if (requestParamsRef == null) { paramsSupplier = () -> null; @@ -561,7 +572,7 @@ private boolean handleAuthenticateUser(final RestRequest request, final RestChan */ private Set<RestRequest.Method> getValidHandlerMethodSet(String rawPath) { Set<RestRequest.Method> validMethods = new HashSet<>(); - Iterator<MethodHandlers> allHandlers = getAllHandlers(null, rawPath); + Iterator<RestMethodHandlers> allHandlers = getAllRestMethodHandlers(null, rawPath); while (allHandlers.hasNext()) { final MethodHandlers methodHandlers = allHandlers.next(); if (methodHandlers != null) { diff --git a/server/src/main/java/org/opensearch/rest/RestMethodHandlers.java b/server/src/main/java/org/opensearch/rest/RestMethodHandlers.java new file mode 100644 index 0000000000000..a430d8ace447c --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/RestMethodHandlers.java @@ -0,0 +1,92 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.rest; + +import org.opensearch.common.Nullable; + +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +/** + * Encapsulate multiple handlers for the same path, allowing different handlers for different HTTP verbs. + */ +final class RestMethodHandlers implements MethodHandlers { + + private final String path; + private final Map<RestRequest.Method, RestHandler> methodHandlers; + + RestMethodHandlers(String path, RestHandler handler, RestRequest.Method... methods) { + this.path = path; + this.methodHandlers = new HashMap<>(methods.length); + for (RestRequest.Method method : methods) { + methodHandlers.put(method, handler); + } + } + + /** + * Add a handler for an additional array of methods. Note that {@code MethodHandlers} + * does not allow replacing the handler for an already existing method. + */ + public RestMethodHandlers addMethods(RestHandler handler, RestRequest.Method... methods) { + for (RestRequest.Method method : methods) { + RestHandler existing = methodHandlers.putIfAbsent(method, handler); + if (existing != null) { + throw new IllegalArgumentException("Cannot replace existing handler for [" + path + "] for method: " + method); + } + } + return this; + } + + /** + * Returns the handler for the given method or {@code null} if none exists. + */ + @Nullable + public RestHandler getHandler(RestRequest.Method method) { + return methodHandlers.get(method); + } + + /** + * Return a set of all valid HTTP methods for the particular path. + */ + public Set<RestRequest.Method> getValidMethods() { + return methodHandlers.keySet(); + } + + /** + * Returns the relative HTTP path of the set of method handlers. + */ + public String getPath() { + return path; + } +} diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestRemoteClusterInfoAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestRemoteClusterInfoAction.java index 28edba4db387d..138f9fdf5c813 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestRemoteClusterInfoAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestRemoteClusterInfoAction.java @@ -53,7 +53,7 @@ public final class RestRemoteClusterInfoAction extends BaseRestHandler { @Override public List<Route> routes() { - return singletonList(new Route(GET, "_remote/info")); + return singletonList(new Route(GET, "/_remote/info")); } @Override diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestForceMergeAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestForceMergeAction.java index 06f1d5f46f90b..f3e66bd20cd86 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestForceMergeAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestForceMergeAction.java @@ -76,6 +76,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC mergeRequest.maxNumSegments(request.paramAsInt("max_num_segments", mergeRequest.maxNumSegments())); mergeRequest.onlyExpungeDeletes(request.paramAsBoolean("only_expunge_deletes", mergeRequest.onlyExpungeDeletes())); mergeRequest.flush(request.paramAsBoolean("flush", mergeRequest.flush())); + mergeRequest.primaryOnly(request.paramAsBoolean("primary_only", mergeRequest.primaryOnly())); if (mergeRequest.onlyExpungeDeletes() && mergeRequest.maxNumSegments() != ForceMergeRequest.Defaults.MAX_NUM_SEGMENTS) { deprecationLogger.deprecate( "force_merge_expunge_deletes_and_max_num_segments_deprecation", diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestPutMappingAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestPutMappingAction.java index 8fdf000139d89..0d805f5f3bfb8 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestPutMappingAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestPutMappingAction.java @@ -66,10 +66,10 @@ public class RestPutMappingAction extends BaseRestHandler { public List<Route> routes() { return unmodifiableList( asList( - new Route(POST, "/{index}/_mapping/"), - new Route(PUT, "/{index}/_mapping/"), - new Route(POST, "/{index}/_mappings/"), - new Route(PUT, "/{index}/_mappings/") + new Route(POST, "/{index}/_mapping"), + new Route(PUT, "/{index}/_mapping"), + new Route(POST, "/{index}/_mappings"), + new Route(PUT, "/{index}/_mappings") ) ); } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestViewAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestViewAction.java new file mode 100644 index 0000000000000..47be439a97fc4 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestViewAction.java @@ -0,0 +1,240 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.admin.indices; + +import org.opensearch.action.admin.indices.view.CreateViewAction; +import org.opensearch.action.admin.indices.view.DeleteViewAction; +import org.opensearch.action.admin.indices.view.GetViewAction; +import org.opensearch.action.admin.indices.view.ListViewNamesAction; +import org.opensearch.action.admin.indices.view.SearchViewAction; +import org.opensearch.action.admin.indices.view.UpdateViewAction; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.client.node.NodeClient; +import org.opensearch.common.ValidationException; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.NamedRoute; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestCancellableNodeClient; +import org.opensearch.rest.action.RestStatusToXContentListener; +import org.opensearch.rest.action.RestToXContentListener; +import org.opensearch.rest.action.search.RestSearchAction; + +import java.io.IOException; +import java.util.List; +import java.util.function.IntConsumer; + +import static org.opensearch.rest.RestRequest.Method.DELETE; +import static org.opensearch.rest.RestRequest.Method.GET; +import static org.opensearch.rest.RestRequest.Method.POST; +import static org.opensearch.rest.RestRequest.Method.PUT; + +/** All rest handlers for view actions */ +@ExperimentalApi +public class RestViewAction { + + public static final String VIEW_NAME = "view_name"; + public static final String VIEW_NAME_PARAMETER = "{" + VIEW_NAME + "}"; + + /** Handler for create view */ + @ExperimentalApi + public static class CreateViewHandler extends BaseRestHandler { + + @Override + public List<Route> routes() { + return List.of(new NamedRoute.Builder().path("/views").method(POST).uniqueName(CreateViewAction.NAME).build()); + } + + @Override + public String getName() { + return CreateViewAction.NAME; + } + + @Override + protected RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + try (final XContentParser parser = request.contentParser()) { + final CreateViewAction.Request createViewAction = CreateViewAction.Request.fromXContent(parser); + + final ValidationException validationResult = createViewAction.validate(); + if (validationResult != null) { + throw validationResult; + } + + return channel -> client.admin().indices().createView(createViewAction, new RestToXContentListener<>(channel)); + } + } + } + + /** Handler for delete view */ + @ExperimentalApi + public static class DeleteViewHandler extends BaseRestHandler { + + @Override + public List<Route> routes() { + return List.of( + new NamedRoute.Builder().path("/views/" + VIEW_NAME_PARAMETER).method(DELETE).uniqueName(DeleteViewAction.NAME).build() + ); + } + + @Override + public String getName() { + return DeleteViewAction.NAME; + } + + @Override + protected RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + final String viewId = request.param(VIEW_NAME); + + final DeleteViewAction.Request deleteRequest = new DeleteViewAction.Request(viewId); + + final ValidationException validationResult = deleteRequest.validate(); + if (validationResult != null) { + throw validationResult; + } + + return channel -> client.admin().indices().deleteView(deleteRequest, new RestToXContentListener<>(channel)); + } + } + + /** Handler for update view */ + @ExperimentalApi + public static class UpdateViewHandler extends BaseRestHandler { + + @Override + public List<Route> routes() { + return List.of( + new NamedRoute.Builder().path("/views/" + VIEW_NAME_PARAMETER).method(PUT).uniqueName(UpdateViewAction.NAME).build() + ); + } + + @Override + public String getName() { + return UpdateViewAction.NAME; + } + + @Override + protected RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + final String viewId = request.param(VIEW_NAME); + + try (final XContentParser parser = request.contentParser()) { + final CreateViewAction.Request updateRequest = UpdateViewAction.Request.fromXContent(parser, viewId); + + final ValidationException validationResult = updateRequest.validate(); + if (validationResult != null) { + throw validationResult; + } + + return channel -> client.admin().indices().updateView(updateRequest, new RestToXContentListener<>(channel)); + } + } + } + + /** Handler for get view */ + @ExperimentalApi + public static class GetViewHandler extends BaseRestHandler { + + @Override + public List<Route> routes() { + return List.of( + new NamedRoute.Builder().path("/views/" + VIEW_NAME_PARAMETER).method(GET).uniqueName(GetViewAction.NAME).build() + ); + } + + @Override + public String getName() { + return GetViewAction.NAME; + } + + @Override + protected RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + final String viewId = request.param(VIEW_NAME); + + final GetViewAction.Request getRequest = new GetViewAction.Request(viewId); + + final ValidationException validationResult = getRequest.validate(); + if (validationResult != null) { + throw validationResult; + } + + return channel -> client.admin().indices().getView(getRequest, new RestToXContentListener<>(channel)); + } + } + + /** Handler for get view */ + @ExperimentalApi + public static class ListViewNamesHandler extends BaseRestHandler { + + @Override + public List<Route> routes() { + return List.of(new NamedRoute.Builder().path("/views/").method(GET).uniqueName(ListViewNamesAction.NAME).build()); + } + + @Override + public String getName() { + return ListViewNamesAction.NAME; + } + + @Override + protected RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + return channel -> client.listViewNames(new ListViewNamesAction.Request(), new RestToXContentListener<>(channel)); + } + } + + /** Handler for search view */ + @ExperimentalApi + public static class SearchViewHandler extends BaseRestHandler { + @Override + public List<Route> routes() { + return List.of( + new NamedRoute.Builder().path("/views/" + VIEW_NAME_PARAMETER + "/_search") + .method(GET) + .uniqueName(SearchViewAction.NAME) + .build(), + new NamedRoute.Builder().path("/views/" + VIEW_NAME_PARAMETER + "/_search") + .method(POST) + .uniqueName(SearchViewAction.NAME) + .build() + ); + } + + @Override + public String getName() { + return SearchViewAction.NAME; + } + + @Override + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + final String viewId = request.param(VIEW_NAME); + + final SearchViewAction.Request viewSearchRequest = new SearchViewAction.Request(viewId, new SearchRequest()); + final IntConsumer setSize = size -> viewSearchRequest.source().size(size); + + request.withContentOrSourceParamParserOrNull( + parser -> RestSearchAction.parseSearchRequest( + viewSearchRequest, + request, + parser, + client.getNamedWriteableRegistry(), + setSize + ) + ); + + final ValidationException validationResult = viewSearchRequest.validate(); + if (validationResult != null) { + throw validationResult; + } + + return channel -> { + final RestCancellableNodeClient cancelClient = new RestCancellableNodeClient(client, request.getHttpChannel()); + cancelClient.execute(SearchViewAction.INSTANCE, viewSearchRequest, new RestStatusToXContentListener<>(channel)); + }; + } + } +} diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java index 23cc1cb507072..9dc711f804144 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java @@ -54,7 +54,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.action.ActionListener; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.Strings; @@ -582,31 +581,29 @@ protected Table getTableWithHeader(final RestRequest request) { "sibling:pri;alias:sqto,searchQueryTotal;default:false;text-align:right;desc:total query phase ops" ); table.addCell("pri.search.query_total", "default:false;text-align:right;desc:total query phase ops"); - if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) { - table.addCell( - "search.concurrent_query_current", - "sibling:pri;alias:scqc,searchConcurrentQueryCurrent;default:false;text-align:right;desc:current concurrent query phase ops" - ); - table.addCell("pri.search.concurrent_query_current", "default:false;text-align:right;desc:current concurrent query phase ops"); + table.addCell( + "search.concurrent_query_current", + "sibling:pri;alias:scqc,searchConcurrentQueryCurrent;default:false;text-align:right;desc:current concurrent query phase ops" + ); + table.addCell("pri.search.concurrent_query_current", "default:false;text-align:right;desc:current concurrent query phase ops"); - table.addCell( - "search.concurrent_query_time", - "sibling:pri;alias:scqti,searchConcurrentQueryTime;default:false;text-align:right;desc:time spent in concurrent query phase" - ); - table.addCell("pri.search.concurrent_query_time", "default:false;text-align:right;desc:time spent in concurrent query phase"); + table.addCell( + "search.concurrent_query_time", + "sibling:pri;alias:scqti,searchConcurrentQueryTime;default:false;text-align:right;desc:time spent in concurrent query phase" + ); + table.addCell("pri.search.concurrent_query_time", "default:false;text-align:right;desc:time spent in concurrent query phase"); - table.addCell( - "search.concurrent_query_total", - "sibling:pri;alias:scqto,searchConcurrentQueryTotal;default:false;text-align:right;desc:total query phase ops" - ); - table.addCell("pri.search.concurrent_query_total", "default:false;text-align:right;desc:total query phase ops"); + table.addCell( + "search.concurrent_query_total", + "sibling:pri;alias:scqto,searchConcurrentQueryTotal;default:false;text-align:right;desc:total query phase ops" + ); + table.addCell("pri.search.concurrent_query_total", "default:false;text-align:right;desc:total query phase ops"); - table.addCell( - "search.concurrent_avg_slice_count", - "sibling:pri;alias:casc,searchConcurrentAvgSliceCount;default:false;text-align:right;desc:average query concurrency" - ); - table.addCell("pri.search.concurrent_avg_slice_count", "default:false;text-align:right;desc:average query concurrency"); - } + table.addCell( + "search.concurrent_avg_slice_count", + "sibling:pri;alias:casc,searchConcurrentAvgSliceCount;default:false;text-align:right;desc:average query concurrency" + ); + table.addCell("pri.search.concurrent_avg_slice_count", "default:false;text-align:right;desc:average query concurrency"); table.addCell( "search.scroll_current", @@ -916,19 +913,17 @@ Table buildTable( table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getQueryCount()); table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getQueryCount()); - if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) { - table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getConcurrentQueryCurrent()); - table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getConcurrentQueryCurrent()); + table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getConcurrentQueryCurrent()); + table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getConcurrentQueryCurrent()); - table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getConcurrentQueryTime()); - table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getConcurrentQueryTime()); + table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getConcurrentQueryTime()); + table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getConcurrentQueryTime()); - table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getConcurrentQueryCount()); - table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getConcurrentQueryCount()); + table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getConcurrentQueryCount()); + table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getConcurrentQueryCount()); - table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getConcurrentAvgSliceCount()); - table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getConcurrentAvgSliceCount()); - } + table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getConcurrentAvgSliceCount()); + table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getConcurrentAvgSliceCount()); table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getScrollCurrent()); table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getScrollCurrent()); diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java index dd3e0ba836557..e11012a23fce7 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java @@ -47,7 +47,6 @@ import org.opensearch.common.Table; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.network.NetworkAddress; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.Strings; import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.common.unit.ByteSizeValue; @@ -304,24 +303,22 @@ protected Table getTableWithHeader(final RestRequest request) { table.addCell("search.query_current", "alias:sqc,searchQueryCurrent;default:false;text-align:right;desc:current query phase ops"); table.addCell("search.query_time", "alias:sqti,searchQueryTime;default:false;text-align:right;desc:time spent in query phase"); table.addCell("search.query_total", "alias:sqto,searchQueryTotal;default:false;text-align:right;desc:total query phase ops"); - if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) { - table.addCell( - "search.concurrent_query_current", - "alias:scqc,searchConcurrentQueryCurrent;default:false;text-align:right;desc:current concurrent query phase ops" - ); - table.addCell( - "search.concurrent_query_time", - "alias:scqti,searchConcurrentQueryTime;default:false;text-align:right;desc:time spent in concurrent query phase" - ); - table.addCell( - "search.concurrent_query_total", - "alias:scqto,searchConcurrentQueryTotal;default:false;text-align:right;desc:total concurrent query phase ops" - ); - table.addCell( - "search.concurrent_avg_slice_count", - "alias:casc,searchConcurrentAvgSliceCount;default:false;text-align:right;desc:average query concurrency" - ); - } + table.addCell( + "search.concurrent_query_current", + "alias:scqc,searchConcurrentQueryCurrent;default:false;text-align:right;desc:current concurrent query phase ops" + ); + table.addCell( + "search.concurrent_query_time", + "alias:scqti,searchConcurrentQueryTime;default:false;text-align:right;desc:time spent in concurrent query phase" + ); + table.addCell( + "search.concurrent_query_total", + "alias:scqto,searchConcurrentQueryTotal;default:false;text-align:right;desc:total concurrent query phase ops" + ); + table.addCell( + "search.concurrent_avg_slice_count", + "alias:casc,searchConcurrentAvgSliceCount;default:false;text-align:right;desc:average query concurrency" + ); table.addCell("search.scroll_current", "alias:scc,searchScrollCurrent;default:false;text-align:right;desc:open scroll contexts"); table.addCell( "search.scroll_time", @@ -548,12 +545,10 @@ Table buildTable( table.addCell(searchStats == null ? null : searchStats.getTotal().getQueryCurrent()); table.addCell(searchStats == null ? null : searchStats.getTotal().getQueryTime()); table.addCell(searchStats == null ? null : searchStats.getTotal().getQueryCount()); - if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) { - table.addCell(searchStats == null ? null : searchStats.getTotal().getConcurrentQueryCurrent()); - table.addCell(searchStats == null ? null : searchStats.getTotal().getConcurrentQueryTime()); - table.addCell(searchStats == null ? null : searchStats.getTotal().getConcurrentQueryCount()); - table.addCell(searchStats == null ? null : searchStats.getTotal().getConcurrentAvgSliceCount()); - } + table.addCell(searchStats == null ? null : searchStats.getTotal().getConcurrentQueryCurrent()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getConcurrentQueryTime()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getConcurrentQueryCount()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getConcurrentAvgSliceCount()); table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollCurrent()); table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollTime()); table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollCount()); diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java index d0d00e4c4596a..4cd10c6874e0a 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java @@ -44,7 +44,6 @@ import org.opensearch.common.Table; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.Strings; import org.opensearch.index.cache.query.QueryCacheStats; import org.opensearch.index.engine.CommitStats; @@ -220,24 +219,22 @@ protected Table getTableWithHeader(final RestRequest request) { table.addCell("search.query_current", "alias:sqc,searchQueryCurrent;default:false;text-align:right;desc:current query phase ops"); table.addCell("search.query_time", "alias:sqti,searchQueryTime;default:false;text-align:right;desc:time spent in query phase"); table.addCell("search.query_total", "alias:sqto,searchQueryTotal;default:false;text-align:right;desc:total query phase ops"); - if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) { - table.addCell( - "search.concurrent_query_current", - "alias:scqc,searchConcurrentQueryCurrent;default:false;text-align:right;desc:current concurrent query phase ops" - ); - table.addCell( - "search.concurrent_query_time", - "alias:scqti,searchConcurrentQueryTime;default:false;text-align:right;desc:time spent in concurrent query phase" - ); - table.addCell( - "search.concurrent_query_total", - "alias:scqto,searchConcurrentQueryTotal;default:false;text-align:right;desc:total concurrent query phase ops" - ); - table.addCell( - "search.concurrent_avg_slice_count", - "alias:casc,searchConcurrentAvgSliceCount;default:false;text-align:right;desc:average query concurrency" - ); - } + table.addCell( + "search.concurrent_query_current", + "alias:scqc,searchConcurrentQueryCurrent;default:false;text-align:right;desc:current concurrent query phase ops" + ); + table.addCell( + "search.concurrent_query_time", + "alias:scqti,searchConcurrentQueryTime;default:false;text-align:right;desc:time spent in concurrent query phase" + ); + table.addCell( + "search.concurrent_query_total", + "alias:scqto,searchConcurrentQueryTotal;default:false;text-align:right;desc:total concurrent query phase ops" + ); + table.addCell( + "search.concurrent_avg_slice_count", + "alias:casc,searchConcurrentAvgSliceCount;default:false;text-align:right;desc:average query concurrency" + ); table.addCell("search.scroll_current", "alias:scc,searchScrollCurrent;default:false;text-align:right;desc:open scroll contexts"); table.addCell( "search.scroll_time", @@ -419,13 +416,11 @@ Table buildTable(RestRequest request, ClusterStateResponse state, IndicesStatsRe table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getQueryCurrent())); table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getQueryTime())); table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getQueryCount())); - if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) { - table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getConcurrentQueryCurrent())); - table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getConcurrentQueryTime())); - table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getConcurrentQueryCount())); - table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getConcurrentAvgSliceCount())); + table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getConcurrentQueryCurrent())); + table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getConcurrentQueryTime())); + table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getConcurrentQueryCount())); + table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getConcurrentAvgSliceCount())); - } table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getScrollCurrent())); table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getScrollTime())); table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getScrollCount())); diff --git a/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java index 080366e536da1..80dc34c4d5d68 100644 --- a/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java +++ b/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java @@ -86,10 +86,13 @@ public class RestSearchAction extends BaseRestHandler { */ public static final String TOTAL_HITS_AS_INT_PARAM = "rest_total_hits_as_int"; public static final String TYPED_KEYS_PARAM = "typed_keys"; + public static final String INCLUDE_NAMED_QUERIES_SCORE_PARAM = "include_named_queries_score"; private static final Set<String> RESPONSE_PARAMS; static { - final Set<String> responseParams = new HashSet<>(Arrays.asList(TYPED_KEYS_PARAM, TOTAL_HITS_AS_INT_PARAM)); + final Set<String> responseParams = new HashSet<>( + Arrays.asList(TYPED_KEYS_PARAM, TOTAL_HITS_AS_INT_PARAM, INCLUDE_NAMED_QUERIES_SCORE_PARAM) + ); RESPONSE_PARAMS = Collections.unmodifiableSet(responseParams); } @@ -209,6 +212,7 @@ public static void parseSearchRequest( searchRequest.pipeline(request.param("search_pipeline")); checkRestTotalHits(request, searchRequest); + request.paramAsBoolean(INCLUDE_NAMED_QUERIES_SCORE_PARAM, false); if (searchRequest.pointInTimeBuilder() != null) { preparePointInTime(searchRequest, request, namedWriteableRegistry); @@ -286,6 +290,10 @@ private static void parseSearchSource(final SearchSourceBuilder searchSourceBuil searchSourceBuilder.trackScores(request.paramAsBoolean("track_scores", false)); } + if (request.hasParam("include_named_queries_score")) { + searchSourceBuilder.includeNamedQueriesScores(request.paramAsBoolean("include_named_queries_score", false)); + } + if (request.hasParam("track_total_hits")) { if (Booleans.isBoolean(request.param("track_total_hits"))) { searchSourceBuilder.trackTotalHits(request.paramAsBoolean("track_total_hits", true)); diff --git a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java index 960b46d68977b..061aa2f6e5896 100644 --- a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java @@ -50,7 +50,6 @@ import org.opensearch.common.lucene.search.Queries; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; @@ -149,6 +148,8 @@ final class DefaultSearchContext extends SearchContext { private SortAndFormats sort; private Float minimumScore; private boolean trackScores = false; // when sorting, track scores as well... + + private boolean includeNamedQueriesScore = false; private int trackTotalHitsUpTo = SearchContext.DEFAULT_TRACK_TOTAL_HITS_UP_TO; private FieldDoc searchAfter; private CollapseContext collapse; @@ -636,6 +637,17 @@ public boolean trackScores() { return this.trackScores; } + @Override + public SearchContext includeNamedQueriesScore(boolean includeNamedQueriesScore) { + this.includeNamedQueriesScore = includeNamedQueriesScore; + return this; + } + + @Override + public boolean includeNamedQueriesScore() { + return includeNamedQueriesScore; + } + @Override public SearchContext trackTotalHitsUpTo(int trackTotalHitsUpTo) { this.trackTotalHitsUpTo = trackTotalHitsUpTo; @@ -950,9 +962,7 @@ public BucketCollectorProcessor bucketCollectorProcessor() { * false: otherwise */ private boolean evaluateConcurrentSegmentSearchSettings(Executor concurrentSearchExecutor) { - if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH) - && (clusterService != null) - && (concurrentSearchExecutor != null)) { + if ((clusterService != null) && (concurrentSearchExecutor != null)) { return indexService.getIndexSettings() .getSettings() .getAsBoolean( diff --git a/server/src/main/java/org/opensearch/search/MultiValueMode.java b/server/src/main/java/org/opensearch/search/MultiValueMode.java index ca088203733c6..a99da674836f2 100644 --- a/server/src/main/java/org/opensearch/search/MultiValueMode.java +++ b/server/src/main/java/org/opensearch/search/MultiValueMode.java @@ -685,6 +685,11 @@ public boolean advanceExact(int target) throws IOException { public double doubleValue() throws IOException { return this.value; } + + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } }; } } @@ -745,6 +750,11 @@ public boolean advanceExact(int parentDoc) throws IOException { public double doubleValue() throws IOException { return lastEmittedValue; } + + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } }; } diff --git a/server/src/main/java/org/opensearch/search/SearchHit.java b/server/src/main/java/org/opensearch/search/SearchHit.java index 10e65fca3afb5..6391353cfe5b1 100644 --- a/server/src/main/java/org/opensearch/search/SearchHit.java +++ b/server/src/main/java/org/opensearch/search/SearchHit.java @@ -64,19 +64,21 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.SourceFieldMapper; import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.rest.action.search.RestSearchAction; import org.opensearch.search.fetch.subphase.highlight.HighlightField; import org.opensearch.search.lookup.SourceLookup; import org.opensearch.transport.RemoteClusterAware; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.stream.Collectors; import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; @@ -120,7 +122,7 @@ public final class SearchHit implements Writeable, ToXContentObject, Iterable<Do private SearchSortValues sortValues = SearchSortValues.EMPTY; - private String[] matchedQueries = Strings.EMPTY_ARRAY; + private Map<String, Float> matchedQueries = new HashMap<>(); private Explanation explanation; @@ -203,10 +205,20 @@ public SearchHit(StreamInput in) throws IOException { sortValues = new SearchSortValues(in); size = in.readVInt(); - if (size > 0) { - matchedQueries = new String[size]; + if (in.getVersion().onOrAfter(Version.V_2_13_0)) { + if (size > 0) { + Map<String, Float> tempMap = in.readMap(StreamInput::readString, StreamInput::readFloat); + matchedQueries = tempMap.entrySet() + .stream() + .sorted(Map.Entry.comparingByKey()) + .collect( + Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue, (oldValue, newValue) -> oldValue, LinkedHashMap::new) + ); + } + } else { + matchedQueries = new LinkedHashMap<>(size); for (int i = 0; i < size; i++) { - matchedQueries[i] = in.readString(); + matchedQueries.put(in.readString(), Float.NaN); } } // we call the setter here because that also sets the local index parameter @@ -224,36 +236,6 @@ public SearchHit(StreamInput in) throws IOException { } } - private Map<String, DocumentField> readFields(StreamInput in) throws IOException { - Map<String, DocumentField> fields; - int size = in.readVInt(); - if (size == 0) { - fields = emptyMap(); - } else if (size == 1) { - DocumentField hitField = new DocumentField(in); - fields = singletonMap(hitField.getName(), hitField); - } else { - fields = new HashMap<>(size); - for (int i = 0; i < size; i++) { - DocumentField field = new DocumentField(in); - fields.put(field.getName(), field); - } - fields = unmodifiableMap(fields); - } - return fields; - } - - private void writeFields(StreamOutput out, Map<String, DocumentField> fields) throws IOException { - if (fields == null) { - out.writeVInt(0); - } else { - out.writeVInt(fields.size()); - for (DocumentField field : fields.values()) { - field.writeTo(out); - } - } - } - private static final Text SINGLE_MAPPING_TYPE = new Text(MapperService.SINGLE_MAPPING_NAME); @Override @@ -286,11 +268,13 @@ public void writeTo(StreamOutput out) throws IOException { } sortValues.writeTo(out); - if (matchedQueries.length == 0) { - out.writeVInt(0); + out.writeVInt(matchedQueries.size()); + if (out.getVersion().onOrAfter(Version.V_2_13_0)) { + if (!matchedQueries.isEmpty()) { + out.writeMap(matchedQueries, StreamOutput::writeString, StreamOutput::writeFloat); + } } else { - out.writeVInt(matchedQueries.length); - for (String matchedFilter : matchedQueries) { + for (String matchedFilter : matchedQueries.keySet()) { out.writeString(matchedFilter); } } @@ -458,11 +442,11 @@ public DocumentField field(String fieldName) { } /* - * Adds a new DocumentField to the map in case both parameters are not null. - * */ + * Adds a new DocumentField to the map in case both parameters are not null. + * */ public void setDocumentField(String fieldName, DocumentField field) { if (fieldName == null || field == null) return; - if (documentFields.size() == 0) this.documentFields = new HashMap<>(); + if (documentFields.isEmpty()) this.documentFields = new HashMap<>(); this.documentFields.put(fieldName, field); } @@ -475,7 +459,7 @@ public DocumentField removeDocumentField(String fieldName) { * were required to be loaded. */ public Map<String, DocumentField> getFields() { - if (metaFields.size() > 0 || documentFields.size() > 0) { + if (!metaFields.isEmpty() || !documentFields.isEmpty()) { final Map<String, DocumentField> fields = new HashMap<>(); fields.putAll(metaFields); fields.putAll(documentFields); @@ -560,14 +544,45 @@ public String getClusterAlias() { } public void matchedQueries(String[] matchedQueries) { - this.matchedQueries = matchedQueries; + if (matchedQueries != null) { + for (String query : matchedQueries) { + this.matchedQueries.put(query, Float.NaN); + } + } + } + + public void matchedQueriesWithScores(Map<String, Float> matchedQueries) { + if (matchedQueries != null) { + this.matchedQueries = matchedQueries; + } } /** * The set of query and filter names the query matched with. Mainly makes sense for compound filters and queries. */ public String[] getMatchedQueries() { - return this.matchedQueries; + return matchedQueries == null ? new String[0] : matchedQueries.keySet().toArray(new String[0]); + } + + /** + * Returns the score of the provided named query if it matches. + * <p> + * If the 'include_named_queries_score' is not set, this method will return {@link Float#NaN} + * for each named query instead of a numerical score. + * </p> + * + * @param name The name of the query to retrieve the score for. + * @return The score of the named query, or {@link Float#NaN} if 'include_named_queries_score' is not set. + */ + public Float getMatchedQueryScore(String name) { + return getMatchedQueriesAndScores().get(name); + } + + /** + * @return The map of the named queries that matched and their associated score. + */ + public Map<String, Float> getMatchedQueriesAndScores() { + return matchedQueries == null ? Collections.emptyMap() : matchedQueries; } /** @@ -654,7 +669,7 @@ public XContentBuilder toInnerXContent(XContentBuilder builder, Params params) t for (DocumentField field : metaFields.values()) { // ignore empty metadata fields - if (field.getValues().size() == 0) { + if (field.getValues().isEmpty()) { continue; } // _ignored is the only multi-valued meta field @@ -670,10 +685,10 @@ public XContentBuilder toInnerXContent(XContentBuilder builder, Params params) t } if (documentFields.isEmpty() == false && // ignore fields all together if they are all empty - documentFields.values().stream().anyMatch(df -> df.getValues().size() > 0)) { + documentFields.values().stream().anyMatch(df -> !df.getValues().isEmpty())) { builder.startObject(Fields.FIELDS); for (DocumentField field : documentFields.values()) { - if (field.getValues().size() > 0) { + if (!field.getValues().isEmpty()) { field.toXContent(builder, params); } } @@ -687,12 +702,21 @@ public XContentBuilder toInnerXContent(XContentBuilder builder, Params params) t builder.endObject(); } sortValues.toXContent(builder, params); - if (matchedQueries.length > 0) { - builder.startArray(Fields.MATCHED_QUERIES); - for (String matchedFilter : matchedQueries) { - builder.value(matchedFilter); + if (!matchedQueries.isEmpty()) { + boolean includeMatchedQueriesScore = params.paramAsBoolean(RestSearchAction.INCLUDE_NAMED_QUERIES_SCORE_PARAM, false); + if (includeMatchedQueriesScore) { + builder.startObject(Fields.MATCHED_QUERIES); + for (Map.Entry<String, Float> entry : matchedQueries.entrySet()) { + builder.field(entry.getKey(), entry.getValue()); + } + builder.endObject(); + } else { + builder.startArray(Fields.MATCHED_QUERIES); + for (String matchedFilter : matchedQueries.keySet()) { + builder.value(matchedFilter); + } + builder.endArray(); } - builder.endArray(); } if (getExplanation() != null) { builder.field(Fields._EXPLANATION); @@ -797,7 +821,27 @@ public static void declareInnerHitsParseFields(ObjectParser<Map<String, Object>, (p, c) -> parseInnerHits(p), new ParseField(Fields.INNER_HITS) ); - parser.declareStringArray((map, list) -> map.put(Fields.MATCHED_QUERIES, list), new ParseField(Fields.MATCHED_QUERIES)); + parser.declareField((p, map, context) -> { + XContentParser.Token token = p.currentToken(); + Map<String, Float> matchedQueries = new LinkedHashMap<>(); + if (token == XContentParser.Token.START_OBJECT) { + String fieldName = null; + while ((token = p.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + fieldName = p.currentName(); + } else if (token.isValue()) { + matchedQueries.put(fieldName, p.floatValue()); + } + } + } else if (token == XContentParser.Token.START_ARRAY) { + while (p.nextToken() != XContentParser.Token.END_ARRAY) { + matchedQueries.put(p.text(), Float.NaN); + } + } else { + throw new IllegalStateException("expected object or array but got [" + token + "]"); + } + map.put(Fields.MATCHED_QUERIES, matchedQueries); + }, new ParseField(Fields.MATCHED_QUERIES), ObjectParser.ValueType.OBJECT_ARRAY); parser.declareField( (map, list) -> map.put(Fields.SORT, list), SearchSortValues::fromXContent, @@ -828,7 +872,7 @@ public static SearchHit createFromMap(Map<String, Object> values) { assert shardId.getIndexName().equals(index); searchHit.shard(new SearchShardTarget(nodeId, shardId, clusterAlias, OriginalIndices.NONE)); } else { - // these fields get set anyways when setting the shard target, + // these fields get set anyway when setting the shard target, // but we set them explicitly when we don't have enough info to rebuild the shard target searchHit.index = index; searchHit.clusterAlias = clusterAlias; @@ -842,10 +886,7 @@ public static SearchHit createFromMap(Map<String, Object> values) { searchHit.sourceRef(get(SourceFieldMapper.NAME, values, null)); searchHit.explanation(get(Fields._EXPLANATION, values, null)); searchHit.setInnerHits(get(Fields.INNER_HITS, values, null)); - List<String> matchedQueries = get(Fields.MATCHED_QUERIES, values, null); - if (matchedQueries != null) { - searchHit.matchedQueries(matchedQueries.toArray(new String[0])); - } + searchHit.matchedQueriesWithScores(get(Fields.MATCHED_QUERIES, values, null)); return searchHit; } @@ -965,7 +1006,7 @@ public boolean equals(Object obj) { && Objects.equals(documentFields, other.documentFields) && Objects.equals(metaFields, other.metaFields) && Objects.equals(getHighlightFields(), other.getHighlightFields()) - && Arrays.equals(matchedQueries, other.matchedQueries) + && Objects.equals(getMatchedQueriesAndScores(), other.getMatchedQueriesAndScores()) && Objects.equals(explanation, other.explanation) && Objects.equals(shard, other.shard) && Objects.equals(innerHits, other.innerHits) @@ -985,7 +1026,7 @@ public int hashCode() { documentFields, metaFields, getHighlightFields(), - Arrays.hashCode(matchedQueries), + getMatchedQueriesAndScores(), explanation, shard, innerHits, diff --git a/server/src/main/java/org/opensearch/search/SearchModule.java b/server/src/main/java/org/opensearch/search/SearchModule.java index 62d397de58187..88218896dceae 100644 --- a/server/src/main/java/org/opensearch/search/SearchModule.java +++ b/server/src/main/java/org/opensearch/search/SearchModule.java @@ -39,7 +39,6 @@ import org.opensearch.common.geo.ShapesAvailability; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.ParseFieldRegistry; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; @@ -1279,7 +1278,7 @@ private SearchPlugin.ExecutorServiceProvider registerIndexSearcherExecutorProvid } } - if (provider == null && FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) { + if (provider == null) { provider = (ThreadPool threadPool) -> threadPool.executor(INDEX_SEARCHER); } return provider; diff --git a/server/src/main/java/org/opensearch/search/SearchService.java b/server/src/main/java/org/opensearch/search/SearchService.java index 2c85fcbb25f35..62eb597e387e6 100644 --- a/server/src/main/java/org/opensearch/search/SearchService.java +++ b/server/src/main/java/org/opensearch/search/SearchService.java @@ -250,7 +250,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv public static final Setting<Boolean> CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING = Setting.boolSetting( "search.concurrent_segment_search.enabled", - true, + false, Property.Dynamic, Property.NodeScope ); @@ -1274,6 +1274,7 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc } } context.trackScores(source.trackScores()); + context.includeNamedQueriesScore(source.includeNamedQueriesScore()); if (source.trackTotalHitsUpTo() != null && source.trackTotalHitsUpTo() != SearchContext.TRACK_TOTAL_HITS_ACCURATE && context.scrollContext() != null) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/FastFilterRewriteHelper.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/FastFilterRewriteHelper.java index f377287d0b3bd..e587b7f169e5f 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/FastFilterRewriteHelper.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/FastFilterRewriteHelper.java @@ -8,9 +8,15 @@ package org.opensearch.search.aggregations.bucket; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.document.LongPoint; +import org.apache.lucene.index.DocValues; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.PointValues; import org.apache.lucene.search.ConstantScoreQuery; +import org.apache.lucene.search.FieldExistsQuery; import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.PointRangeQuery; @@ -18,16 +24,15 @@ import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Weight; import org.apache.lucene.util.NumericUtils; -import org.opensearch.common.CheckedFunction; import org.opensearch.common.Rounding; import org.opensearch.common.lucene.search.function.FunctionScoreQuery; import org.opensearch.index.mapper.DateFieldMapper; +import org.opensearch.index.mapper.DocCountFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.query.DateRangeIncludingNowQuery; -import org.opensearch.search.DocValueFormat; -import org.opensearch.search.aggregations.bucket.composite.CompositeKey; import org.opensearch.search.aggregations.bucket.composite.CompositeValuesSourceConfig; import org.opensearch.search.aggregations.bucket.composite.RoundingValuesSource; +import org.opensearch.search.aggregations.bucket.histogram.LongBounds; import org.opensearch.search.internal.SearchContext; import java.io.IOException; @@ -37,7 +42,8 @@ import java.util.OptionalLong; import java.util.function.BiConsumer; import java.util.function.Function; -import java.util.function.Supplier; + +import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS; /** * Utility class to help rewrite aggregations into filters. @@ -55,6 +61,8 @@ public final class FastFilterRewriteHelper { private FastFilterRewriteHelper() {} + private static final Logger logger = LogManager.getLogger(FastFilterRewriteHelper.class); + private static final int MAX_NUM_FILTER_BUCKETS = 1024; private static final Map<Class<?>, Function<Query, Query>> queryWrappers; @@ -80,13 +88,13 @@ private static Query unwrapIntoConcreteQuery(Query query) { } /** - * Finds the min and max bounds of field values for the shard + * Finds the global min and max bounds of the field for the shard across all segments + * + * @return null if the field is empty or not indexed */ - private static long[] getIndexBounds(final SearchContext context, final String fieldName) throws IOException { + private static long[] getShardBounds(final SearchContext context, final String fieldName) throws IOException { final List<LeafReaderContext> leaves = context.searcher().getIndexReader().leaves(); long min = Long.MAX_VALUE, max = Long.MIN_VALUE; - // Since the query does not specify bounds for aggregation, we can - // build the global min/max from local min/max within each segment for (LeafReaderContext leaf : leaves) { final PointValues values = leaf.reader().getPointValues(fieldName); if (values != null) { @@ -95,51 +103,80 @@ private static long[] getIndexBounds(final SearchContext context, final String f } } - if (min == Long.MAX_VALUE || max == Long.MIN_VALUE) return null; + if (min == Long.MAX_VALUE || max == Long.MIN_VALUE) { + return null; + } + return new long[] { min, max }; + } + /** + * Finds the min and max bounds of the field for the segment + * + * @return null if the field is empty or not indexed + */ + private static long[] getSegmentBounds(final LeafReaderContext context, final String fieldName) throws IOException { + long min = Long.MAX_VALUE, max = Long.MIN_VALUE; + final PointValues values = context.reader().getPointValues(fieldName); + if (values != null) { + min = Math.min(min, NumericUtils.sortableBytesToLong(values.getMinPackedValue(), 0)); + max = Math.max(max, NumericUtils.sortableBytesToLong(values.getMaxPackedValue(), 0)); + } + + if (min == Long.MAX_VALUE || max == Long.MIN_VALUE) { + return null; + } return new long[] { min, max }; } /** - * This method also acts as a pre-condition check for the optimization, - * returns null if the optimization cannot be applied + * This method also acts as a pre-condition check for the optimization + * + * @return null if the processed query not as expected */ - public static long[] getAggregationBounds(final SearchContext context, final String fieldName) throws IOException { + public static long[] getDateHistoAggBounds(final SearchContext context, final String fieldName) throws IOException { final Query cq = unwrapIntoConcreteQuery(context.query()); - final long[] indexBounds = getIndexBounds(context, fieldName); if (cq instanceof PointRangeQuery) { final PointRangeQuery prq = (PointRangeQuery) cq; - // Ensure that the query and aggregation are on the same field - if (prq.getField().equals(fieldName)) { - return new long[] { - // Minimum bound for aggregation is the max between query and global - Math.max(NumericUtils.sortableBytesToLong(prq.getLowerPoint(), 0), indexBounds[0]), - // Maximum bound for aggregation is the min between query and global - Math.min(NumericUtils.sortableBytesToLong(prq.getUpperPoint(), 0), indexBounds[1]) }; - } + final long[] indexBounds = getShardBounds(context, fieldName); + if (indexBounds == null) return null; + return getBoundsWithRangeQuery(prq, fieldName, indexBounds); } else if (cq instanceof MatchAllDocsQuery) { - return indexBounds; + return getShardBounds(context, fieldName); + } else if (cq instanceof FieldExistsQuery) { + // when a range query covers all values of a shard, it will be rewrite field exists query + if (((FieldExistsQuery) cq).getField().equals(fieldName)) { + return getShardBounds(context, fieldName); + } } - // Check if the top-level query (which may be a PRQ on another field) is functionally match-all - Weight weight = context.searcher().createWeight(context.query(), ScoreMode.COMPLETE_NO_SCORES, 1f); - for (LeafReaderContext ctx : context.searcher().getIndexReader().leaves()) { - if (weight.count(ctx) != ctx.reader().numDocs()) { + + return null; + } + + private static long[] getBoundsWithRangeQuery(PointRangeQuery prq, String fieldName, long[] indexBounds) { + // Ensure that the query and aggregation are on the same field + if (prq.getField().equals(fieldName)) { + // Minimum bound for aggregation is the max between query and global + long lower = Math.max(NumericUtils.sortableBytesToLong(prq.getLowerPoint(), 0), indexBounds[0]); + // Maximum bound for aggregation is the min between query and global + long upper = Math.min(NumericUtils.sortableBytesToLong(prq.getUpperPoint(), 0), indexBounds[1]); + if (lower > upper) { return null; } + return new long[] { lower, upper }; } - return indexBounds; + + return null; } /** * Creates the date range filters for aggregations using the interval, min/max - * bounds and the rounding values + * bounds and prepared rounding */ private static Weight[] createFilterForAggregations( final SearchContext context, + final DateFieldMapper.DateFieldType fieldType, final long interval, final Rounding.Prepared preparedRounding, - final String field, - final DateFieldMapper.DateFieldType fieldType, long low, final long high ) throws IOException { @@ -149,7 +186,10 @@ private static Weight[] createFilterForAggregations( int bucketCount = 0; while (roundedLow <= fieldType.convertNanosToMillis(high)) { bucketCount++; - if (bucketCount > MAX_NUM_FILTER_BUCKETS) return null; + if (bucketCount > MAX_NUM_FILTER_BUCKETS) { + logger.debug("Max number of filters reached [{}], skip the fast filter optimization", MAX_NUM_FILTER_BUCKETS); + return null; + } // Below rounding is needed as the interval could return in // non-rounded values for something like calendar month roundedLow = preparedRounding.round(roundedLow + interval); @@ -176,10 +216,10 @@ private static Weight[] createFilterForAggregations( // is included in the next bucket fieldType.convertRoundedMillisToNanos(roundedLow) - 1, upper, 0); - filters[i++] = context.searcher().createWeight(new PointRangeQuery(field, lower, upper, 1) { + filters[i++] = context.searcher().createWeight(new PointRangeQuery(fieldType.name(), lower, upper, 1) { @Override protected String toString(int dimension, byte[] value) { - return null; + return Long.toString(LongPoint.decodeDimension(value, 0)); } }, ScoreMode.COMPLETE_NO_SCORES, 1); } @@ -189,16 +229,24 @@ protected String toString(int dimension, byte[] value) { } /** - * Context object to do fast filter optimization + * Context object for fast filter optimization + * <p> + * Usage: first set aggregation type, then check isRewriteable, then buildFastFilter */ public static class FastFilterContext { + private boolean rewriteable = false; private Weight[] filters = null; - public AggregationType aggregationType; + private boolean filtersBuiltAtShardLevel = false; - public FastFilterContext() {} + private AggregationType aggregationType; + private final SearchContext context; - private void setFilters(Weight[] filters) { - this.filters = filters; + public FastFilterContext(SearchContext context) { + this.context = context; + } + + public AggregationType getAggregationType() { + return aggregationType; } public void setAggregationType(AggregationType aggregationType) { @@ -206,119 +254,147 @@ public void setAggregationType(AggregationType aggregationType) { } public boolean isRewriteable(final Object parent, final int subAggLength) { - return aggregationType.isRewriteable(parent, subAggLength); + boolean rewriteable = aggregationType.isRewriteable(parent, subAggLength); + logger.debug("Fast filter rewriteable: {} for shard {}", rewriteable, context.indexShard().shardId()); + this.rewriteable = rewriteable; + return rewriteable; + } + + public void buildFastFilter() throws IOException { + assert filters == null : "Filters should only be built once, but they are already built"; + this.filters = this.aggregationType.buildFastFilter(context); + if (filters != null) { + logger.debug("Fast filter built for shard {}", context.indexShard().shardId()); + filtersBuiltAtShardLevel = true; + } } /** - * This filter build method is for date histogram aggregation type - * - * @param computeBounds get the lower and upper bound of the field in a shard search - * @param roundingFunction produce Rounding that contains interval of date range. - * Rounding is computed dynamically using the bounds in AutoDateHistogram - * @param preparedRoundingSupplier produce PreparedRounding to round values at call-time + * Built filters for a segment */ - public void buildFastFilter( - SearchContext context, - CheckedFunction<DateHistogramAggregationType, long[], IOException> computeBounds, - Function<long[], Rounding> roundingFunction, - Supplier<Rounding.Prepared> preparedRoundingSupplier - ) throws IOException { - assert this.aggregationType instanceof DateHistogramAggregationType; - DateHistogramAggregationType aggregationType = (DateHistogramAggregationType) this.aggregationType; - DateFieldMapper.DateFieldType fieldType = aggregationType.getFieldType(); - final long[] bounds = computeBounds.apply(aggregationType); - if (bounds == null) return; - - final Rounding rounding = roundingFunction.apply(bounds); - final OptionalLong intervalOpt = Rounding.getInterval(rounding); - if (intervalOpt.isEmpty()) return; - final long interval = intervalOpt.getAsLong(); - - // afterKey is the last bucket key in previous response, while the bucket key - // is the start of the bucket values, so add the interval - if (aggregationType instanceof CompositeAggregationType && ((CompositeAggregationType) aggregationType).afterKey != -1) { - bounds[0] = ((CompositeAggregationType) aggregationType).afterKey + interval; + public Weight[] buildFastFilter(LeafReaderContext leaf) throws IOException { + Weight[] filters = this.aggregationType.buildFastFilter(leaf, context); + if (filters != null) { + logger.debug("Fast filter built for shard {} segment {}", context.indexShard().shardId(), leaf.ord); } - - final Weight[] filters = FastFilterRewriteHelper.createFilterForAggregations( - context, - interval, - preparedRoundingSupplier.get(), - fieldType.name(), - fieldType, - bounds[0], - bounds[1] - ); - this.setFilters(filters); + return filters; } } /** * Different types have different pre-conditions, filter building logic, etc. */ - public interface AggregationType { + interface AggregationType { + boolean isRewriteable(Object parent, int subAggLength); + + Weight[] buildFastFilter(SearchContext ctx) throws IOException; + + Weight[] buildFastFilter(LeafReaderContext leaf, SearchContext ctx) throws IOException; + + default int getSize() { + return Integer.MAX_VALUE; + } } /** * For date histogram aggregation */ - public static class DateHistogramAggregationType implements AggregationType { + public static abstract class AbstractDateHistogramAggregationType implements AggregationType { private final MappedFieldType fieldType; private final boolean missing; private final boolean hasScript; + private LongBounds hardBounds; - public DateHistogramAggregationType(MappedFieldType fieldType, boolean missing, boolean hasScript) { + public AbstractDateHistogramAggregationType(MappedFieldType fieldType, boolean missing, boolean hasScript) { this.fieldType = fieldType; this.missing = missing; this.hasScript = hasScript; } + public AbstractDateHistogramAggregationType(MappedFieldType fieldType, boolean missing, boolean hasScript, LongBounds hardBounds) { + this(fieldType, missing, hasScript); + this.hardBounds = hardBounds; + } + @Override public boolean isRewriteable(Object parent, int subAggLength) { if (parent == null && subAggLength == 0 && !missing && !hasScript) { - return fieldType != null && fieldType instanceof DateFieldMapper.DateFieldType; + if (fieldType != null && fieldType instanceof DateFieldMapper.DateFieldType) { + return fieldType.isSearchable(); + } } return false; } - public DateFieldMapper.DateFieldType getFieldType() { - assert fieldType instanceof DateFieldMapper.DateFieldType; - return (DateFieldMapper.DateFieldType) fieldType; + @Override + public Weight[] buildFastFilter(SearchContext context) throws IOException { + long[] bounds = getDateHistoAggBounds(context, fieldType.name()); + logger.debug("Bounds are {} for shard {}", bounds, context.indexShard().shardId()); + return buildFastFilter(context, bounds); } - } - /** - * For composite aggregation with date histogram as a source - */ - public static class CompositeAggregationType extends DateHistogramAggregationType { - private final RoundingValuesSource valuesSource; - private long afterKey = -1L; - private final int size; - - public CompositeAggregationType( - CompositeValuesSourceConfig[] sourceConfigs, - CompositeKey rawAfterKey, - List<DocValueFormat> formats, - int size - ) { - super(sourceConfigs[0].fieldType(), sourceConfigs[0].missingBucket(), sourceConfigs[0].hasScript()); - this.valuesSource = (RoundingValuesSource) sourceConfigs[0].valuesSource(); - this.size = size; - if (rawAfterKey != null) { - assert rawAfterKey.size() == 1 && formats.size() == 1; - this.afterKey = formats.get(0).parseLong(rawAfterKey.get(0).toString(), false, () -> { - throw new IllegalArgumentException("now() is not supported in [after] key"); - }); + @Override + public Weight[] buildFastFilter(LeafReaderContext leaf, SearchContext context) throws IOException { + long[] bounds = getSegmentBounds(leaf, fieldType.name()); + logger.debug("Bounds are {} for shard {} segment {}", bounds, context.indexShard().shardId(), leaf.ord); + return buildFastFilter(context, bounds); + } + + private Weight[] buildFastFilter(SearchContext context, long[] bounds) throws IOException { + bounds = processHardBounds(bounds); + if (bounds == null) { + return null; + } + assert bounds[0] <= bounds[1] : "Low bound should be less than high bound"; + + final Rounding rounding = getRounding(bounds[0], bounds[1]); + final OptionalLong intervalOpt = Rounding.getInterval(rounding); + if (intervalOpt.isEmpty()) { + return null; } + final long interval = intervalOpt.getAsLong(); + + // process the after key of composite agg + processAfterKey(bounds, interval); + + return FastFilterRewriteHelper.createFilterForAggregations( + context, + (DateFieldMapper.DateFieldType) fieldType, + interval, + getRoundingPrepared(), + bounds[0], + bounds[1] + ); } - public Rounding getRounding() { - return valuesSource.getRounding(); + protected abstract Rounding getRounding(final long low, final long high); + + protected abstract Rounding.Prepared getRoundingPrepared(); + + protected void processAfterKey(long[] bound, long interval) {} + + protected long[] processHardBounds(long[] bounds) { + if (bounds != null) { + // Update min/max limit if user specified any hard bounds + if (hardBounds != null) { + if (hardBounds.getMin() > bounds[0]) { + bounds[0] = hardBounds.getMin(); + } + if (hardBounds.getMax() - 1 < bounds[1]) { + bounds[1] = hardBounds.getMax() - 1; // hard bounds max is exclusive + } + if (bounds[0] > bounds[1]) { + return null; + } + } + } + return bounds; } - public Rounding.Prepared getRoundingPreparer() { - return valuesSource.getPreparedRounding(); + public DateFieldMapper.DateFieldType getFieldType() { + assert fieldType instanceof DateFieldMapper.DateFieldType; + return (DateFieldMapper.DateFieldType) fieldType; } } @@ -335,7 +411,9 @@ public static long getBucketOrd(long bucketOrd) { } /** - * This is executed for each segment by passing the leaf reader context + * Try to get the bucket doc counts from the fast filters for the aggregation + * <p> + * Usage: invoked at segment level — in getLeafCollector of aggregator * * @param incrementDocCount takes in the bucket key value and the bucket count */ @@ -345,9 +423,38 @@ public static boolean tryFastFilterAggregation( final BiConsumer<Long, Integer> incrementDocCount ) throws IOException { if (fastFilterContext == null) return false; - if (fastFilterContext.filters == null) return false; + if (!fastFilterContext.rewriteable) { + return false; + } + + NumericDocValues docCountValues = DocValues.getNumeric(ctx.reader(), DocCountFieldMapper.NAME); + if (docCountValues.nextDoc() != NO_MORE_DOCS) { + logger.debug( + "Shard {} segment {} has at least one document with _doc_count field, skip fast filter optimization", + fastFilterContext.context.indexShard().shardId(), + ctx.ord + ); + return false; + } + + // if no filters built at shard level (see getDateHistoAggBounds method for possible reasons) + // check if the query is functionally match-all at segment level + if (!fastFilterContext.filtersBuiltAtShardLevel && !segmentMatchAll(fastFilterContext.context, ctx)) { + return false; + } + Weight[] filters = fastFilterContext.filters; + if (filters == null) { + logger.debug( + "Shard {} segment {} functionally match all documents. Build the fast filter", + fastFilterContext.context.indexShard().shardId(), + ctx.ord + ); + filters = fastFilterContext.buildFastFilter(ctx); + if (filters == null) { + return false; + } + } - final Weight[] filters = fastFilterContext.filters; final int[] counts = new int[filters.length]; int i; for (i = 0; i < filters.length; i++) { @@ -360,26 +467,31 @@ public static boolean tryFastFilterAggregation( } int s = 0; - int size = Integer.MAX_VALUE; + int size = fastFilterContext.aggregationType.getSize(); for (i = 0; i < filters.length; i++) { if (counts[i] > 0) { long bucketKey = i; // the index of filters is the key for filters aggregation - if (fastFilterContext.aggregationType instanceof DateHistogramAggregationType) { - final DateFieldMapper.DateFieldType fieldType = ((DateHistogramAggregationType) fastFilterContext.aggregationType) - .getFieldType(); + if (fastFilterContext.aggregationType instanceof AbstractDateHistogramAggregationType) { + final DateFieldMapper.DateFieldType fieldType = + ((AbstractDateHistogramAggregationType) fastFilterContext.aggregationType).getFieldType(); bucketKey = fieldType.convertNanosToMillis( NumericUtils.sortableBytesToLong(((PointRangeQuery) filters[i].getQuery()).getLowerPoint(), 0) ); - if (fastFilterContext.aggregationType instanceof CompositeAggregationType) { - size = ((CompositeAggregationType) fastFilterContext.aggregationType).size; - } } incrementDocCount.accept(bucketKey, counts[i]); s++; - if (s > size) return true; + if (s > size) { + break; + } } } + logger.debug("Fast filter optimization applied to shard {} segment {}", fastFilterContext.context.indexShard().shardId(), ctx.ord); return true; } + + private static boolean segmentMatchAll(SearchContext ctx, LeafReaderContext leafCtx) throws IOException { + Weight weight = ctx.searcher().createWeight(ctx.query(), ScoreMode.COMPLETE_NO_SCORES, 1f); + return weight != null && weight.count(leafCtx) == leafCtx.reader().numDocs(); + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java index 5e8791441d83a..e57acba5bc0ad 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java @@ -44,7 +44,9 @@ import org.opensearch.search.aggregations.AggregationBuilder; import org.opensearch.search.aggregations.AggregatorFactories; import org.opensearch.search.aggregations.AggregatorFactory; +import org.opensearch.search.aggregations.bucket.filter.FilterAggregatorFactory; import org.opensearch.search.aggregations.bucket.nested.NestedAggregatorFactory; +import org.opensearch.search.aggregations.bucket.nested.ReverseNestedAggregatorFactory; import org.opensearch.search.aggregations.support.ValuesSourceRegistry; import java.io.IOException; @@ -240,14 +242,16 @@ public BucketCardinality bucketCardinality() { * this aggregator or the instance of the parent's factory that is incompatible with * the composite aggregation. */ - private AggregatorFactory checkParentIsNullOrNested(AggregatorFactory factory) { + private static AggregatorFactory checkParentIsSafe(AggregatorFactory factory) { if (factory == null) { return null; - } else if (factory instanceof NestedAggregatorFactory) { - return checkParentIsNullOrNested(factory.getParent()); - } else { - return factory; - } + } else if (factory instanceof NestedAggregatorFactory + || factory instanceof FilterAggregatorFactory + || factory instanceof ReverseNestedAggregatorFactory) { + return checkParentIsSafe(factory.getParent()); + } else { + return factory; + } } private static void validateSources(List<CompositeValuesSourceBuilder<?>> sources) { @@ -278,7 +282,7 @@ protected AggregatorFactory doBuild( AggregatorFactory parent, AggregatorFactories.Builder subfactoriesBuilder ) throws IOException { - AggregatorFactory invalid = checkParentIsNullOrNested(parent); + AggregatorFactory invalid = checkParentIsSafe(parent); if (invalid != null) { throw new IllegalArgumentException( "[composite] aggregation cannot be used with a parent aggregation of" diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java index 2ff79fb623def..4af14ab014db5 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java @@ -80,6 +80,7 @@ protected Aggregator createInternal( @Override protected boolean supportsConcurrentSegmentSearch() { - return true; + // See https://github.com/opensearch-project/OpenSearch/issues/12331 for details + return false; } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java index 822b8a6c4b118..b97c814cdf645 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java @@ -164,24 +164,55 @@ final class CompositeAggregator extends BucketsAggregator { this.queue = new CompositeValuesCollectorQueue(context.bigArrays(), sources, size, rawAfterKey); this.rawAfterKey = rawAfterKey; - fastFilterContext = new FastFilterRewriteHelper.FastFilterContext(); + fastFilterContext = new FastFilterRewriteHelper.FastFilterContext(context); if (!FastFilterRewriteHelper.isCompositeAggRewriteable(sourceConfigs)) return; - fastFilterContext.setAggregationType( - new FastFilterRewriteHelper.CompositeAggregationType(sourceConfigs, rawAfterKey, formats, size) - ); + fastFilterContext.setAggregationType(new CompositeAggregationType()); if (fastFilterContext.isRewriteable(parent, subAggregators.length)) { - // bucketOrds is the data structure for saving date histogram results + // bucketOrds is used for saving date histogram results bucketOrds = LongKeyedBucketOrds.build(context.bigArrays(), CardinalityUpperBound.ONE); - // Currently the filter rewrite is only supported for date histograms - FastFilterRewriteHelper.CompositeAggregationType aggregationType = - (FastFilterRewriteHelper.CompositeAggregationType) fastFilterContext.aggregationType; - preparedRounding = aggregationType.getRoundingPreparer(); - fastFilterContext.buildFastFilter( - context, - fc -> FastFilterRewriteHelper.getAggregationBounds(context, fc.getFieldType().name()), - x -> aggregationType.getRounding(), - () -> preparedRounding - ); + preparedRounding = ((CompositeAggregationType) fastFilterContext.getAggregationType()).getRoundingPrepared(); + fastFilterContext.buildFastFilter(); + } + } + + /** + * Currently the filter rewrite is only supported for date histograms + */ + private class CompositeAggregationType extends FastFilterRewriteHelper.AbstractDateHistogramAggregationType { + private final RoundingValuesSource valuesSource; + private long afterKey = -1L; + + public CompositeAggregationType() { + super(sourceConfigs[0].fieldType(), sourceConfigs[0].missingBucket(), sourceConfigs[0].hasScript()); + this.valuesSource = (RoundingValuesSource) sourceConfigs[0].valuesSource(); + if (rawAfterKey != null) { + assert rawAfterKey.size() == 1 && formats.size() == 1; + this.afterKey = formats.get(0).parseLong(rawAfterKey.get(0).toString(), false, () -> { + throw new IllegalArgumentException("now() is not supported in [after] key"); + }); + } + } + + public Rounding getRounding(final long low, final long high) { + return valuesSource.getRounding(); + } + + public Rounding.Prepared getRoundingPrepared() { + return valuesSource.getPreparedRounding(); + } + + @Override + protected void processAfterKey(long[] bound, long interval) { + // afterKey is the last bucket key in previous response, and the bucket key + // is the minimum of all values in the bucket, so need to add the interval + if (afterKey != -1L) { + bound[0] = afterKey + interval; + } + } + + @Override + public int getSize() { + return size; } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java index a0a636c121e12..db21b384c77ea 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java @@ -56,7 +56,7 @@ public class FilterAggregatorFactory extends AggregatorFactory { private Weight weight; - private Query filter; + private final Query filter; public FilterAggregatorFactory( String name, @@ -85,7 +85,7 @@ public Weight getWeight() { try { weight = contextSearcher.createWeight(contextSearcher.rewrite(filter), ScoreMode.COMPLETE_NO_SCORES, 1f); } catch (IOException e) { - throw new AggregationInitializationException("Failed to initialse filter", e); + throw new AggregationInitializationException("Failed to initialise filter", e); } } return weight; @@ -98,7 +98,7 @@ public Aggregator createInternal( CardinalityUpperBound cardinality, Map<String, Object> metadata ) throws IOException { - return new FilterAggregator(name, () -> this.getWeight(), factories, searchContext, parent, cardinality, metadata); + return new FilterAggregator(name, this::getWeight, factories, searchContext, parent, cardinality, metadata); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java index 0ea820abbedf4..12aefc540e75c 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java @@ -42,6 +42,7 @@ import org.opensearch.common.util.IntArray; import org.opensearch.common.util.LongArray; import org.opensearch.core.common.util.ByteArray; +import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorFactories; @@ -156,45 +157,53 @@ private AutoDateHistogramAggregator( this.roundingPreparer = roundingPreparer; this.preparedRounding = prepareRounding(0); - fastFilterContext = new FastFilterRewriteHelper.FastFilterContext(); + fastFilterContext = new FastFilterRewriteHelper.FastFilterContext(context); fastFilterContext.setAggregationType( - new FastFilterRewriteHelper.DateHistogramAggregationType( + new AutoHistogramAggregationType( valuesSourceConfig.fieldType(), valuesSourceConfig.missing() != null, valuesSourceConfig.script() != null ) ); if (fastFilterContext.isRewriteable(parent, subAggregators.length)) { - fastFilterContext.buildFastFilter( - context, - fc -> FastFilterRewriteHelper.getAggregationBounds(context, fc.getFieldType().name()), - b -> getMinimumRounding(b[0], b[1]), - // Passing prepared rounding as supplier to ensure the correct prepared - // rounding is set as it is done during getMinimumRounding - () -> preparedRounding - ); + fastFilterContext.buildFastFilter(); } } - private Rounding getMinimumRounding(final long low, final long high) { - // max - min / targetBuckets = bestDuration - // find the right innerInterval this bestDuration belongs to - // since we cannot exceed targetBuckets, bestDuration should go up, - // so the right innerInterval should be an upper bound - long bestDuration = (high - low) / targetBuckets; - while (roundingIdx < roundingInfos.length - 1) { - final RoundingInfo curRoundingInfo = roundingInfos[roundingIdx]; - final int temp = curRoundingInfo.innerIntervals[curRoundingInfo.innerIntervals.length - 1]; - // If the interval duration is covered by the maximum inner interval, - // we can start with this outer interval for creating the buckets - if (bestDuration <= temp * curRoundingInfo.roughEstimateDurationMillis) { - break; + private class AutoHistogramAggregationType extends FastFilterRewriteHelper.AbstractDateHistogramAggregationType { + + public AutoHistogramAggregationType(MappedFieldType fieldType, boolean missing, boolean hasScript) { + super(fieldType, missing, hasScript); + } + + @Override + protected Rounding getRounding(final long low, final long high) { + // max - min / targetBuckets = bestDuration + // find the right innerInterval this bestDuration belongs to + // since we cannot exceed targetBuckets, bestDuration should go up, + // so the right innerInterval should be an upper bound + long bestDuration = (high - low) / targetBuckets; + // reset so this function is idempotent + roundingIdx = 0; + while (roundingIdx < roundingInfos.length - 1) { + final RoundingInfo curRoundingInfo = roundingInfos[roundingIdx]; + final int temp = curRoundingInfo.innerIntervals[curRoundingInfo.innerIntervals.length - 1]; + // If the interval duration is covered by the maximum inner interval, + // we can start with this outer interval for creating the buckets + if (bestDuration <= temp * curRoundingInfo.roughEstimateDurationMillis) { + break; + } + roundingIdx++; } - roundingIdx++; + + preparedRounding = prepareRounding(roundingIdx); + return roundingInfos[roundingIdx].rounding; } - preparedRounding = prepareRounding(roundingIdx); - return roundingInfos[roundingIdx].rounding; + @Override + protected Prepared getRoundingPrepared() { + return preparedRounding; + } } protected abstract LongKeyedBucketOrds getBucketOrds(); diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java index b95bd093b82a6..0e830106c8284 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java @@ -39,6 +39,7 @@ import org.opensearch.common.Nullable; import org.opensearch.common.Rounding; import org.opensearch.common.lease.Releasables; +import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorFactories; @@ -115,29 +116,35 @@ class DateHistogramAggregator extends BucketsAggregator implements SizedBucketAg bucketOrds = LongKeyedBucketOrds.build(context.bigArrays(), cardinality); - fastFilterContext = new FastFilterRewriteHelper.FastFilterContext(); + fastFilterContext = new FastFilterRewriteHelper.FastFilterContext(context); fastFilterContext.setAggregationType( - new FastFilterRewriteHelper.DateHistogramAggregationType( + new DateHistogramAggregationType( valuesSourceConfig.fieldType(), valuesSourceConfig.missing() != null, - valuesSourceConfig.script() != null + valuesSourceConfig.script() != null, + hardBounds ) ); if (fastFilterContext.isRewriteable(parent, subAggregators.length)) { - fastFilterContext.buildFastFilter(context, this::computeBounds, x -> rounding, () -> preparedRounding); + fastFilterContext.buildFastFilter(); } } - private long[] computeBounds(final FastFilterRewriteHelper.DateHistogramAggregationType fieldContext) throws IOException { - final long[] bounds = FastFilterRewriteHelper.getAggregationBounds(context, fieldContext.getFieldType().name()); - if (bounds != null) { - // Update min/max limit if user specified any hard bounds - if (hardBounds != null) { - bounds[0] = Math.max(bounds[0], hardBounds.getMin()); - bounds[1] = Math.min(bounds[1], hardBounds.getMax() - 1); // hard bounds max is exclusive - } + private class DateHistogramAggregationType extends FastFilterRewriteHelper.AbstractDateHistogramAggregationType { + + public DateHistogramAggregationType(MappedFieldType fieldType, boolean missing, boolean hasScript, LongBounds hardBounds) { + super(fieldType, missing, hasScript, hardBounds); + } + + @Override + protected Rounding getRounding(long low, long high) { + return rounding; + } + + @Override + protected Rounding.Prepared getRoundingPrepared() { + return preparedRounding; } - return bounds; } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java index 5ed899408ab40..69fda2f3f6133 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java @@ -35,8 +35,13 @@ import org.apache.lucene.index.DocValues; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.index.Terms; +import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.search.CollectionTerminatedException; +import org.apache.lucene.search.Weight; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.PriorityQueue; @@ -46,6 +51,7 @@ import org.opensearch.common.util.LongHash; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.mapper.DocCountFieldMapper; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.AggregationExecutionException; import org.opensearch.search.aggregations.Aggregator; @@ -73,6 +79,7 @@ import static org.opensearch.search.aggregations.InternalOrder.isKeyOrder; import static org.apache.lucene.index.SortedSetDocValues.NO_MORE_ORDS; +import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS; /** * An aggregator of string values that relies on global ordinals in order to build buckets. @@ -85,6 +92,8 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr private final LongPredicate acceptedGlobalOrdinals; private final long valueCount; + private final String fieldName; + private Weight weight; private final GlobalOrdLookupFunction lookupGlobalOrd; protected final CollectionStrategy collectionStrategy; protected int segmentsWithSingleValuedOrds = 0; @@ -136,16 +145,105 @@ public GlobalOrdinalsStringTermsAggregator( return new DenseGlobalOrds(); }); } + this.fieldName = (valuesSource instanceof ValuesSource.Bytes.WithOrdinals.FieldData) + ? ((ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource).getIndexFieldName() + : null; } String descriptCollectionStrategy() { return collectionStrategy.describe(); } + public void setWeight(Weight weight) { + this.weight = weight; + } + + /** + Read doc frequencies directly from indexed terms in the segment to skip iterating through individual documents + @param ctx The LeafReaderContext to collect terms from + @param globalOrds The SortedSetDocValues for the field's ordinals + @param ordCountConsumer A consumer to accept collected term frequencies + @return A LeafBucketCollector implementation with collection termination, since collection is complete + @throws IOException If an I/O error occurs during reading + */ + LeafBucketCollector termDocFreqCollector( + LeafReaderContext ctx, + SortedSetDocValues globalOrds, + BiConsumer<Long, Integer> ordCountConsumer + ) throws IOException { + if (weight == null) { + // Weight not assigned - cannot use this optimization + return null; + } else { + if (weight.count(ctx) == 0) { + // No documents matches top level query on this segment, we can skip the segment entirely + return LeafBucketCollector.NO_OP_COLLECTOR; + } else if (weight.count(ctx) != ctx.reader().maxDoc()) { + // weight.count(ctx) == ctx.reader().maxDoc() implies there are no deleted documents and + // top-level query matches all docs in the segment + return null; + } + } + + Terms segmentTerms = ctx.reader().terms(this.fieldName); + if (segmentTerms == null) { + // Field is not indexed. + return null; + } + + NumericDocValues docCountValues = DocValues.getNumeric(ctx.reader(), DocCountFieldMapper.NAME); + if (docCountValues.nextDoc() != NO_MORE_DOCS) { + // This segment has at least one document with the _doc_count field. + return null; + } + + TermsEnum indexTermsEnum = segmentTerms.iterator(); + BytesRef indexTerm = indexTermsEnum.next(); + TermsEnum globalOrdinalTermsEnum = globalOrds.termsEnum(); + BytesRef ordinalTerm = globalOrdinalTermsEnum.next(); + + // Iterate over the terms in the segment, look for matches in the global ordinal terms, + // and increment bucket count when segment terms match global ordinal terms. + while (indexTerm != null && ordinalTerm != null) { + int compare = indexTerm.compareTo(ordinalTerm); + if (compare == 0) { + if (acceptedGlobalOrdinals.test(globalOrdinalTermsEnum.ord())) { + ordCountConsumer.accept(globalOrdinalTermsEnum.ord(), indexTermsEnum.docFreq()); + } + indexTerm = indexTermsEnum.next(); + ordinalTerm = globalOrdinalTermsEnum.next(); + } else if (compare < 0) { + indexTerm = indexTermsEnum.next(); + } else { + ordinalTerm = globalOrdinalTermsEnum.next(); + } + } + return new LeafBucketCollector() { + @Override + public void collect(int doc, long owningBucketOrd) throws IOException { + throw new CollectionTerminatedException(); + } + }; + } + @Override public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException { SortedSetDocValues globalOrds = valuesSource.globalOrdinalsValues(ctx); collectionStrategy.globalOrdsReady(globalOrds); + + if (collectionStrategy instanceof DenseGlobalOrds + && this.resultStrategy instanceof StandardTermsResults + && sub == LeafBucketCollector.NO_OP_COLLECTOR) { + LeafBucketCollector termDocFreqCollector = termDocFreqCollector( + ctx, + globalOrds, + (ord, docCount) -> incrementBucketDocCount(collectionStrategy.globalOrdToBucketOrd(0, ord), docCount) + ); + if (termDocFreqCollector != null) { + return termDocFreqCollector; + } + } + SortedDocValues singleValues = DocValues.unwrapSingleton(globalOrds); if (singleValues != null) { segmentsWithSingleValuedOrds++; @@ -343,9 +441,20 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCol final SortedSetDocValues segmentOrds = valuesSource.ordinalsValues(ctx); segmentDocCounts = context.bigArrays().grow(segmentDocCounts, 1 + segmentOrds.getValueCount()); assert sub == LeafBucketCollector.NO_OP_COLLECTOR; - final SortedDocValues singleValues = DocValues.unwrapSingleton(segmentOrds); mapping = valuesSource.globalOrdinalsMapping(ctx); - // Dense mode doesn't support include/exclude so we don't have to check it here. + + if (this.resultStrategy instanceof StandardTermsResults) { + LeafBucketCollector termDocFreqCollector = this.termDocFreqCollector( + ctx, + segmentOrds, + (ord, docCount) -> incrementBucketDocCount(mapping.applyAsLong(ord), docCount) + ); + if (termDocFreqCollector != null) { + return termDocFreqCollector; + } + } + + final SortedDocValues singleValues = DocValues.unwrapSingleton(segmentOrds); if (singleValues != null) { segmentsWithSingleValuedOrds++; return resultStrategy.wrapCollector(new LeafBucketCollectorBase(sub, segmentOrds) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/MissingValues.java b/server/src/main/java/org/opensearch/search/aggregations/support/MissingValues.java index da1d9961ed81b..d21737a8366b2 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/MissingValues.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/MissingValues.java @@ -227,6 +227,10 @@ public String toString() { return "anon SortedNumericDoubleValues of [" + super.toString() + "]"; } + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } }; } diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSource.java b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSource.java index 1a76183ac1a2d..1f4dd429e094e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSource.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSource.java @@ -244,6 +244,10 @@ public FieldData(IndexOrdinalsFieldData indexFieldData) { this.indexFieldData = indexFieldData; } + public String getIndexFieldName() { + return this.indexFieldData.getFieldName(); + } + @Override public SortedBinaryDocValues bytesValues(LeafReaderContext context) { final LeafOrdinalsFieldData atomicFieldData = indexFieldData.load(context); @@ -576,6 +580,11 @@ public boolean advanceExact(int target) throws IOException { } return false; } + + @Override + public int advance(int target) throws IOException { + return doubleValues.advance(target); + } } } diff --git a/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java index 434e630893f25..1a5a9dc6d1f03 100644 --- a/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java @@ -117,6 +117,7 @@ public final class SearchSourceBuilder implements Writeable, ToXContentObject, R public static final ParseField IGNORE_FAILURE_FIELD = new ParseField("ignore_failure"); public static final ParseField SORT_FIELD = new ParseField("sort"); public static final ParseField TRACK_SCORES_FIELD = new ParseField("track_scores"); + public static final ParseField INCLUDE_NAMED_QUERIES_SCORE = new ParseField("include_named_queries_score"); public static final ParseField TRACK_TOTAL_HITS_FIELD = new ParseField("track_total_hits"); public static final ParseField INDICES_BOOST_FIELD = new ParseField("indices_boost"); public static final ParseField AGGREGATIONS_FIELD = new ParseField("aggregations"); @@ -175,6 +176,8 @@ public static HighlightBuilder highlight() { private boolean trackScores = false; + private Boolean includeNamedQueriesScore; + private Integer trackTotalHitsUpTo; private SearchAfterBuilder searchAfterBuilder; @@ -276,6 +279,9 @@ public SearchSourceBuilder(StreamInput in) throws IOException { searchPipelineSource = in.readMap(); } } + if (in.getVersion().onOrAfter(Version.V_2_13_0)) { + includeNamedQueriesScore = in.readOptionalBoolean(); + } } @Override @@ -341,6 +347,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeMap(searchPipelineSource); } } + if (out.getVersion().onOrAfter(Version.V_2_13_0)) { + out.writeOptionalBoolean(includeNamedQueriesScore); + } } /** @@ -568,6 +577,22 @@ public SearchSourceBuilder trackScores(boolean trackScores) { return this; } + /** + * Applies when there are named queries, to return the scores along as well + * Defaults to {@code false}. + */ + public SearchSourceBuilder includeNamedQueriesScores(boolean includeNamedQueriesScore) { + this.includeNamedQueriesScore = includeNamedQueriesScore; + return this; + } + + /** + * Indicates whether scores will be returned as part of every search matched query.s + */ + public boolean includeNamedQueriesScore() { + return includeNamedQueriesScore != null && includeNamedQueriesScore; + } + /** * Indicates whether scores will be tracked for this request. */ @@ -1103,6 +1128,7 @@ private SearchSourceBuilder shallowCopy( rewrittenBuilder.terminateAfter = terminateAfter; rewrittenBuilder.timeout = timeout; rewrittenBuilder.trackScores = trackScores; + rewrittenBuilder.includeNamedQueriesScore = includeNamedQueriesScore; rewrittenBuilder.trackTotalHitsUpTo = trackTotalHitsUpTo; rewrittenBuilder.version = version; rewrittenBuilder.seqNoAndPrimaryTerm = seqNoAndPrimaryTerm; @@ -1155,6 +1181,8 @@ public void parseXContent(XContentParser parser, boolean checkTrailingTokens) th explain = parser.booleanValue(); } else if (TRACK_SCORES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { trackScores = parser.booleanValue(); + } else if (INCLUDE_NAMED_QUERIES_SCORE.match(currentFieldName, parser.getDeprecationHandler())) { + includeNamedQueriesScore = parser.booleanValue(); } else if (TRACK_TOTAL_HITS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { if (token == XContentParser.Token.VALUE_BOOLEAN || (token == XContentParser.Token.VALUE_STRING && Booleans.isBoolean(parser.text()))) { @@ -1418,6 +1446,10 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t builder.field(TRACK_SCORES_FIELD.getPreferredName(), true); } + if (includeNamedQueriesScore != null) { + builder.field(INCLUDE_NAMED_QUERIES_SCORE.getPreferredName(), includeNamedQueriesScore); + } + if (trackTotalHitsUpTo != null) { builder.field(TRACK_TOTAL_HITS_FIELD.getPreferredName(), trackTotalHitsUpTo); } @@ -1749,6 +1781,7 @@ public int hashCode() { terminateAfter, timeout, trackScores, + includeNamedQueriesScore, version, seqNoAndPrimaryTerm, profile, @@ -1791,6 +1824,7 @@ public boolean equals(Object obj) { && Objects.equals(terminateAfter, other.terminateAfter) && Objects.equals(timeout, other.timeout) && Objects.equals(trackScores, other.trackScores) + && Objects.equals(includeNamedQueriesScore, other.includeNamedQueriesScore) && Objects.equals(version, other.version) && Objects.equals(seqNoAndPrimaryTerm, other.seqNoAndPrimaryTerm) && Objects.equals(profile, other.profile) diff --git a/server/src/main/java/org/opensearch/search/fetch/FetchContext.java b/server/src/main/java/org/opensearch/search/fetch/FetchContext.java index 7e36ace9e2112..780a6f35524ea 100644 --- a/server/src/main/java/org/opensearch/search/fetch/FetchContext.java +++ b/server/src/main/java/org/opensearch/search/fetch/FetchContext.java @@ -188,6 +188,14 @@ public boolean fetchScores() { return searchContext.sort() != null && searchContext.trackScores(); } + public boolean includeNamedQueriesScore() { + return searchContext.includeNamedQueriesScore(); + } + + public boolean hasInnerHits() { + return searchContext.hasInnerHits(); + } + /** * Configuration for returning inner hits */ @@ -209,6 +217,10 @@ public FetchFieldsContext fetchFieldsContext() { return searchContext.fetchFieldsContext(); } + public boolean hasScriptFields() { + return searchContext.hasScriptFields(); + } + /** * Configuration for script fields */ diff --git a/server/src/main/java/org/opensearch/search/fetch/FetchPhase.java b/server/src/main/java/org/opensearch/search/fetch/FetchPhase.java index a842c0f1adc6e..1698f41caaf2b 100644 --- a/server/src/main/java/org/opensearch/search/fetch/FetchPhase.java +++ b/server/src/main/java/org/opensearch/search/fetch/FetchPhase.java @@ -91,7 +91,7 @@ /** * Fetch phase of a search request, used to fetch the actual top matching documents to be returned to the client, identified - * after reducing all of the matches returned by the query phase + * after reducing all the matches returned by the query phase * * @opensearch.api */ diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/InnerHitsContext.java b/server/src/main/java/org/opensearch/search/fetch/subphase/InnerHitsContext.java index 5855a0b3217f3..fa80bb04c77f5 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/InnerHitsContext.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/InnerHitsContext.java @@ -119,6 +119,11 @@ public String getName() { return name; } + @Override + public boolean hasInnerHits() { + return childInnerHits != null; + } + @Override public InnerHitsContext innerHits() { return childInnerHits; diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/InnerHitsPhase.java b/server/src/main/java/org/opensearch/search/fetch/subphase/InnerHitsPhase.java index 0b07dc35f13bb..cadad8529da9d 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/InnerHitsPhase.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/InnerHitsPhase.java @@ -64,7 +64,7 @@ public InnerHitsPhase(FetchPhase fetchPhase) { @Override public FetchSubPhaseProcessor getProcessor(FetchContext searchContext) { - if (searchContext.innerHits() == null) { + if (searchContext.hasInnerHits() == false) { return null; } Map<String, InnerHitsContext.InnerHitSubContext> innerHits = searchContext.innerHits().getInnerHits(); diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/MatchedQueriesPhase.java b/server/src/main/java/org/opensearch/search/fetch/subphase/MatchedQueriesPhase.java index 6c589438d6b4c..406d9c8b4bc03 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/MatchedQueriesPhase.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/MatchedQueriesPhase.java @@ -28,12 +28,12 @@ * Modifications Copyright OpenSearch Contributors. See * GitHub history for details. */ - package org.opensearch.search.fetch.subphase; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Scorer; import org.apache.lucene.search.ScorerSupplier; import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; @@ -45,6 +45,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -67,25 +68,69 @@ public FetchSubPhaseProcessor getProcessor(FetchContext context) throws IOExcept if (namedQueries.isEmpty()) { return null; } + + Map<String, Weight> weights = prepareWeights(context, namedQueries); + + return context.includeNamedQueriesScore() ? createScoringProcessor(weights) : createNonScoringProcessor(weights); + } + + private Map<String, Weight> prepareWeights(FetchContext context, Map<String, Query> namedQueries) throws IOException { Map<String, Weight> weights = new HashMap<>(); + ScoreMode scoreMode = context.includeNamedQueriesScore() ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES; for (Map.Entry<String, Query> entry : namedQueries.entrySet()) { - weights.put( - entry.getKey(), - context.searcher().createWeight(context.searcher().rewrite(entry.getValue()), ScoreMode.COMPLETE_NO_SCORES, 1) - ); + weights.put(entry.getKey(), context.searcher().createWeight(context.searcher().rewrite(entry.getValue()), scoreMode, 1)); } + return weights; + } + + private FetchSubPhaseProcessor createScoringProcessor(Map<String, Weight> weights) { return new FetchSubPhaseProcessor() { + final Map<String, Scorer> matchingScorers = new HashMap<>(); + + @Override + public void setNextReader(LeafReaderContext readerContext) throws IOException { + matchingScorers.clear(); + for (Map.Entry<String, Weight> entry : weights.entrySet()) { + ScorerSupplier scorerSupplier = entry.getValue().scorerSupplier(readerContext); + if (scorerSupplier != null) { + Scorer scorer = scorerSupplier.get(0L); + if (scorer != null) { + matchingScorers.put(entry.getKey(), scorer); + } + } + } + } + + @Override + public void process(HitContext hitContext) throws IOException { + Map<String, Float> matches = new LinkedHashMap<>(); + int docId = hitContext.docId(); + for (Map.Entry<String, Scorer> entry : matchingScorers.entrySet()) { + Scorer scorer = entry.getValue(); + if (scorer.iterator().docID() < docId) { + scorer.iterator().advance(docId); + } + if (scorer.iterator().docID() == docId) { + matches.put(entry.getKey(), scorer.score()); + } + } + hitContext.hit().matchedQueriesWithScores(matches); + } + }; + } - final Map<String, Bits> matchingIterators = new HashMap<>(); + private FetchSubPhaseProcessor createNonScoringProcessor(Map<String, Weight> weights) { + return new FetchSubPhaseProcessor() { + final Map<String, Bits> matchingBits = new HashMap<>(); @Override public void setNextReader(LeafReaderContext readerContext) throws IOException { - matchingIterators.clear(); + matchingBits.clear(); for (Map.Entry<String, Weight> entry : weights.entrySet()) { - ScorerSupplier ss = entry.getValue().scorerSupplier(readerContext); - if (ss != null) { - Bits matchingBits = Lucene.asSequentialAccessBits(readerContext.reader().maxDoc(), ss); - matchingIterators.put(entry.getKey(), matchingBits); + ScorerSupplier scorerSupplier = entry.getValue().scorerSupplier(readerContext); + if (scorerSupplier != null) { + Bits bits = Lucene.asSequentialAccessBits(readerContext.reader().maxDoc(), scorerSupplier); + matchingBits.put(entry.getKey(), bits); } } } @@ -93,15 +138,14 @@ public void setNextReader(LeafReaderContext readerContext) throws IOException { @Override public void process(HitContext hitContext) { List<String> matches = new ArrayList<>(); - int doc = hitContext.docId(); - for (Map.Entry<String, Bits> iterator : matchingIterators.entrySet()) { - if (iterator.getValue().get(doc)) { - matches.add(iterator.getKey()); + int docId = hitContext.docId(); + for (Map.Entry<String, Bits> entry : matchingBits.entrySet()) { + if (entry.getValue().get(docId)) { + matches.add(entry.getKey()); } } hitContext.hit().matchedQueries(matches.toArray(new String[0])); } }; } - } diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/ScriptFieldsPhase.java b/server/src/main/java/org/opensearch/search/fetch/subphase/ScriptFieldsPhase.java index 67d1863050a7b..bee536dbaf7f6 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/ScriptFieldsPhase.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/ScriptFieldsPhase.java @@ -54,7 +54,7 @@ public final class ScriptFieldsPhase implements FetchSubPhase { @Override public FetchSubPhaseProcessor getProcessor(FetchContext context) { - if (context.scriptFields() == null) { + if (context.hasScriptFields() == false) { return null; } List<ScriptFieldsContext.ScriptField> scriptFields = context.scriptFields().fields(); diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/PlainHighlighter.java b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/PlainHighlighter.java index eb5f4f3c14eb2..c06a733203434 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/PlainHighlighter.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/PlainHighlighter.java @@ -123,13 +123,27 @@ public HighlightField highlight(FieldHighlightContext fieldContext) throws IOExc List<Object> textsToHighlight; Analyzer analyzer = context.mapperService().documentMapper().mappers().indexAnalyzer(); final int maxAnalyzedOffset = context.getIndexSettings().getHighlightMaxAnalyzedOffset(); + final Integer fieldMaxAnalyzedOffset = field.fieldOptions().maxAnalyzerOffset(); + if (fieldMaxAnalyzedOffset != null && fieldMaxAnalyzedOffset > maxAnalyzedOffset) { + throw new IllegalArgumentException( + "max_analyzer_offset has exceeded [" + + maxAnalyzedOffset + + "] - maximum allowed to be analyzed for highlighting. " + + "This maximum can be set by changing the [" + + IndexSettings.MAX_ANALYZED_OFFSET_SETTING.getKey() + + "] index level setting. " + + "For large texts, indexing with offsets or term vectors is recommended!" + ); + } textsToHighlight = HighlightUtils.loadFieldValues(fieldType, context.getQueryShardContext(), hitContext, fieldContext.forceSource); for (Object textToHighlight : textsToHighlight) { String text = convertFieldValue(fieldType, textToHighlight); int textLength = text.length(); - if (textLength > maxAnalyzedOffset) { + if (fieldMaxAnalyzedOffset != null && textLength > fieldMaxAnalyzedOffset) { + text = text.substring(0, fieldMaxAnalyzedOffset); + } else if (textLength > maxAnalyzedOffset) { throw new IllegalArgumentException( "The length of [" + fieldContext.fieldName diff --git a/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java b/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java index 403b0b545c113..ec3ed2332d0b8 100644 --- a/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java +++ b/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java @@ -387,6 +387,11 @@ public BulkScorer bulkScorer(LeafReaderContext context) throws IOException { return null; } } + + @Override + public int count(LeafReaderContext context) throws IOException { + return weight.count(context); + } }; } else { return weight; diff --git a/server/src/main/java/org/opensearch/search/internal/FilteredSearchContext.java b/server/src/main/java/org/opensearch/search/internal/FilteredSearchContext.java index 151ef97a2a141..3a3b46366a6d2 100644 --- a/server/src/main/java/org/opensearch/search/internal/FilteredSearchContext.java +++ b/server/src/main/java/org/opensearch/search/internal/FilteredSearchContext.java @@ -340,6 +340,14 @@ public FieldDoc searchAfter() { return in.searchAfter(); } + public SearchContext includeNamedQueriesScore(boolean includeNamedQueriesScore) { + return in.includeNamedQueriesScore(includeNamedQueriesScore); + } + + public boolean includeNamedQueriesScore() { + return in.includeNamedQueriesScore(); + } + @Override public SearchContext parsedPostFilter(ParsedQuery postFilter) { return in.parsedPostFilter(postFilter); diff --git a/server/src/main/java/org/opensearch/search/internal/SearchContext.java b/server/src/main/java/org/opensearch/search/internal/SearchContext.java index 02837da64dafd..3d13378e58e5d 100644 --- a/server/src/main/java/org/opensearch/search/internal/SearchContext.java +++ b/server/src/main/java/org/opensearch/search/internal/SearchContext.java @@ -188,6 +188,10 @@ public final void close() { public abstract void highlight(SearchHighlightContext highlight); + public boolean hasInnerHits() { + return innerHitsContext != null; + } + public InnerHitsContext innerHits() { if (innerHitsContext == null) { innerHitsContext = new InnerHitsContext(); @@ -305,6 +309,29 @@ public final void assignRescoreDocIds(RescoreDocIds rescoreDocIds) { public abstract boolean trackScores(); + /** + * Determines whether named queries' scores should be included in the search results. + * By default, this is set to return false, indicating that scores from named queries are not included. + * + * @param includeNamedQueriesScore true to include scores from named queries, false otherwise. + */ + public SearchContext includeNamedQueriesScore(boolean includeNamedQueriesScore) { + // Default implementation does nothing and returns this for chaining. + // Implementations of SearchContext should override this method to actually store the value. + return this; + } + + /** + * Checks if scores from named queries are included in the search results. + * + * @return true if scores from named queries are included, false otherwise. + */ + public boolean includeNamedQueriesScore() { + // Default implementation returns false. + // Implementations of SearchContext should override this method to return the actual value. + return false; + } + public abstract SearchContext trackTotalHitsUpTo(int trackTotalHits); /** diff --git a/server/src/main/java/org/opensearch/search/internal/SubSearchContext.java b/server/src/main/java/org/opensearch/search/internal/SubSearchContext.java index 55315013ea8c9..b2c97baf78d91 100644 --- a/server/src/main/java/org/opensearch/search/internal/SubSearchContext.java +++ b/server/src/main/java/org/opensearch/search/internal/SubSearchContext.java @@ -82,6 +82,8 @@ public class SubSearchContext extends FilteredSearchContext { private boolean explain; private boolean trackScores; + + private boolean includeNamedQueriesScore; private boolean version; private boolean seqNoAndPrimaryTerm; @@ -234,6 +236,17 @@ public boolean trackScores() { return trackScores; } + @Override + public SearchContext includeNamedQueriesScore(boolean includeNamedQueriesScore) { + this.includeNamedQueriesScore = includeNamedQueriesScore; + return this; + } + + @Override + public boolean includeNamedQueriesScore() { + return includeNamedQueriesScore; + } + @Override public SearchContext parsedPostFilter(ParsedQuery postFilter) { throw new UnsupportedOperationException("Not supported"); diff --git a/server/src/main/java/org/opensearch/search/lookup/LeafDocLookup.java b/server/src/main/java/org/opensearch/search/lookup/LeafDocLookup.java index 3bd1c5118b5fb..70cbd8d7ad6c3 100644 --- a/server/src/main/java/org/opensearch/search/lookup/LeafDocLookup.java +++ b/server/src/main/java/org/opensearch/search/lookup/LeafDocLookup.java @@ -78,6 +78,7 @@ public void setDocument(int docId) { this.docId = docId; } + @SuppressWarnings("removal") @Override public ScriptDocValues<?> get(Object key) { // assume its a string... diff --git a/server/src/main/java/org/opensearch/search/lookup/SearchLookup.java b/server/src/main/java/org/opensearch/search/lookup/SearchLookup.java index 8813865a657dc..906616eb9ba5f 100644 --- a/server/src/main/java/org/opensearch/search/lookup/SearchLookup.java +++ b/server/src/main/java/org/opensearch/search/lookup/SearchLookup.java @@ -60,6 +60,12 @@ public class SearchLookup { */ private static final int MAX_FIELD_CHAIN_DEPTH = 5; + /** + * This constant should be used in cases when shard id is unknown. + * Mostly it should be used in tests. + */ + public static final int UNKNOWN_SHARD_ID = -1; + /** * The chain of fields for which this lookup was created, used for detecting * loops caused by runtime fields referring to other runtime fields. The chain is empty @@ -74,14 +80,27 @@ public class SearchLookup { private final SourceLookup sourceLookup; private final FieldsLookup fieldsLookup; private final BiFunction<MappedFieldType, Supplier<SearchLookup>, IndexFieldData<?>> fieldDataLookup; + private final int shardId; /** - * Create the top level field lookup for a search request. Provides a way to look up fields from doc_values, - * stored fields, or _source. + * Constructor for backwards compatibility. Use the one with explicit shardId argument. */ + @Deprecated public SearchLookup( MapperService mapperService, BiFunction<MappedFieldType, Supplier<SearchLookup>, IndexFieldData<?>> fieldDataLookup + ) { + this(mapperService, fieldDataLookup, UNKNOWN_SHARD_ID); + } + + /** + * Create the top level field lookup for a search request. Provides a way to look up fields from doc_values, + * stored fields, or _source. + */ + public SearchLookup( + MapperService mapperService, + BiFunction<MappedFieldType, Supplier<SearchLookup>, IndexFieldData<?>> fieldDataLookup, + int shardId ) { this.fieldChain = Collections.emptySet(); docMap = new DocLookup( @@ -91,6 +110,7 @@ public SearchLookup( sourceLookup = new SourceLookup(); fieldsLookup = new FieldsLookup(mapperService); this.fieldDataLookup = fieldDataLookup; + this.shardId = shardId; } /** @@ -109,6 +129,7 @@ private SearchLookup(SearchLookup searchLookup, Set<String> fieldChain) { this.sourceLookup = searchLookup.sourceLookup; this.fieldsLookup = searchLookup.fieldsLookup; this.fieldDataLookup = searchLookup.fieldDataLookup; + this.shardId = searchLookup.shardId; } /** @@ -143,4 +164,11 @@ public DocLookup doc() { public SourceLookup source() { return sourceLookup; } + + public int shardId() { + if (shardId == UNKNOWN_SHARD_ID) { + throw new IllegalStateException("Shard id is unknown for this lookup"); + } + return shardId; + } } diff --git a/server/src/main/java/org/opensearch/search/query/QueryPhaseSearcherWrapper.java b/server/src/main/java/org/opensearch/search/query/QueryPhaseSearcherWrapper.java index 631ace41090d7..19a59e9f7bebe 100644 --- a/server/src/main/java/org/opensearch/search/query/QueryPhaseSearcherWrapper.java +++ b/server/src/main/java/org/opensearch/search/query/QueryPhaseSearcherWrapper.java @@ -12,7 +12,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.search.CollectorManager; import org.apache.lucene.search.Query; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.AggregationProcessor; import org.opensearch.search.internal.ContextIndexSearcher; import org.opensearch.search.internal.SearchContext; @@ -33,9 +32,7 @@ public class QueryPhaseSearcherWrapper implements QueryPhaseSearcher { public QueryPhaseSearcherWrapper() { this.defaultQueryPhaseSearcher = new QueryPhase.DefaultQueryPhaseSearcher(); - this.concurrentQueryPhaseSearcher = FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH) - ? new ConcurrentQueryPhaseSearcher() - : null; + this.concurrentQueryPhaseSearcher = new ConcurrentQueryPhaseSearcher(); } /** @@ -58,10 +55,8 @@ public boolean searchWith( boolean hasTimeout ) throws IOException { if (searchContext.shouldUseConcurrentSearch()) { - LOGGER.debug("Using concurrent search over segments (experimental) for request with context id {}", searchContext.id()); return concurrentQueryPhaseSearcher.searchWith(searchContext, searcher, query, collectors, hasFilterCollector, hasTimeout); } else { - LOGGER.debug("Using non-concurrent search over segments for request with context id {}", searchContext.id()); return defaultQueryPhaseSearcher.searchWith(searchContext, searcher, query, collectors, hasFilterCollector, hasTimeout); } } @@ -74,13 +69,8 @@ public boolean searchWith( @Override public AggregationProcessor aggregationProcessor(SearchContext searchContext) { if (searchContext.shouldUseConcurrentSearch()) { - LOGGER.debug( - "Using concurrent aggregation processor over segments (experimental) for request with context id {}", - searchContext.id() - ); return concurrentQueryPhaseSearcher.aggregationProcessor(searchContext); } else { - LOGGER.debug("Using non-concurrent aggregation processor over segments for request with context id {}", searchContext.id()); return defaultQueryPhaseSearcher.aggregationProcessor(searchContext); } } diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index 9d2c7eb882fa1..bf2c7fc74be92 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -121,6 +121,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_VERSION_UPGRADED; import static org.opensearch.common.util.FeatureFlags.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY; import static org.opensearch.common.util.set.Sets.newHashSet; +import static org.opensearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; import static org.opensearch.index.store.remote.directory.RemoteSnapshotDirectory.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY_MINIMUM_VERSION; import static org.opensearch.index.store.remote.filecache.FileCache.DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING; import static org.opensearch.node.Node.NODE_SEARCH_CACHE_SIZE_SETTING; @@ -226,6 +227,16 @@ public RestoreService( */ public void restoreSnapshot(final RestoreSnapshotRequest request, final ActionListener<RestoreCompletionResponse> listener) { try { + // Setting INDEX_STORE_TYPE_SETTING as REMOTE_SNAPSHOT is intended to be a system-managed index setting that is configured when + // restoring a snapshot and should not be manually set by user. + String storeTypeSetting = request.indexSettings().get(INDEX_STORE_TYPE_SETTING.getKey()); + if (storeTypeSetting != null && storeTypeSetting.equals(RestoreSnapshotRequest.StorageType.REMOTE_SNAPSHOT.toString())) { + throw new SnapshotRestoreException( + request.repository(), + request.snapshot(), + "cannot restore remote snapshot with index settings \"index.store.type\" set to \"remote_snapshot\". Instead use \"storage_type\": \"remote_snapshot\" as argument to restore." + ); + } // Read snapshot info and metadata from the repository final String repositoryName = request.repository(); Repository repository = repositoriesService.repository(repositoryName); diff --git a/server/src/main/java/org/opensearch/telemetry/TelemetrySettings.java b/server/src/main/java/org/opensearch/telemetry/TelemetrySettings.java index 24dcab98c8870..4b8897a318531 100644 --- a/server/src/main/java/org/opensearch/telemetry/TelemetrySettings.java +++ b/server/src/main/java/org/opensearch/telemetry/TelemetrySettings.java @@ -66,7 +66,6 @@ public class TelemetrySettings { private volatile boolean tracingEnabled; private volatile double samplingProbability; - private final boolean tracingFeatureEnabled; private final boolean metricsFeatureEnabled; @@ -98,6 +97,7 @@ public void setSamplingProbability(double samplingProbability) { /** * Get sampling ratio + * @return double */ public double getSamplingProbability() { return samplingProbability; @@ -110,4 +110,5 @@ public boolean isTracingFeatureEnabled() { public boolean isMetricsFeatureEnabled() { return metricsFeatureEnabled; } + } diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/AttributeNames.java b/server/src/main/java/org/opensearch/telemetry/tracing/AttributeNames.java index b6b2cf360d1c5..212ef3c713d8e 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/AttributeNames.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/AttributeNames.java @@ -40,6 +40,11 @@ private AttributeNames() { */ public static final String HTTP_URI = "http.uri"; + /** + * Http Request Query Parameters. + */ + public static final String HTTP_REQ_QUERY_PARAMS = "url.query"; + /** * Rest Request ID. */ @@ -70,6 +75,16 @@ private AttributeNames() { */ public static final String TRANSPORT_ACTION = "action"; + /** + * Task id + */ + public static final String TASK_ID = "task_id"; + + /** + * Parent task id + */ + public static final String PARENT_TASK_ID = "parent_task_id"; + /** * Index Name */ @@ -94,4 +109,9 @@ private AttributeNames() { * Refresh Policy */ public static final String REFRESH_POLICY = "refresh_policy"; + + /** + * Search Response Total Hits + */ + public static final String TOTAL_HITS = "total_hits"; } diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java b/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java index 1dce422943b7a..42e64109b72fd 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java @@ -11,9 +11,11 @@ import org.opensearch.action.bulk.BulkShardRequest; import org.opensearch.action.support.replication.ReplicatedWriteRequest; import org.opensearch.common.annotation.InternalApi; +import org.opensearch.common.collect.Tuple; import org.opensearch.core.common.Strings; import org.opensearch.http.HttpRequest; import org.opensearch.rest.RestRequest; +import org.opensearch.tasks.Task; import org.opensearch.telemetry.tracing.attributes.Attributes; import org.opensearch.transport.TcpChannel; import org.opensearch.transport.Transport; @@ -75,7 +77,9 @@ public static SpanCreationContext from(String spanName, String nodeId, Replicate } private static String createSpanName(HttpRequest httpRequest) { - return httpRequest.method().name() + SEPARATOR + httpRequest.uri(); + Tuple<String, String> uriParts = splitUri(httpRequest.uri()); + String path = uriParts.v1(); + return httpRequest.method().name() + SEPARATOR + path; } private static Attributes buildSpanAttributes(HttpRequest httpRequest) { @@ -84,9 +88,26 @@ private static Attributes buildSpanAttributes(HttpRequest httpRequest) { .addAttribute(AttributeNames.HTTP_METHOD, httpRequest.method().name()) .addAttribute(AttributeNames.HTTP_PROTOCOL_VERSION, httpRequest.protocolVersion().name()); populateHeader(httpRequest, attributes); + + Tuple<String, String> uriParts = splitUri(httpRequest.uri()); + String query = uriParts.v2(); + if (query.isBlank() == false) { + attributes.addAttribute(AttributeNames.HTTP_REQ_QUERY_PARAMS, query); + } + return attributes; } + private static Tuple<String, String> splitUri(String uri) { + int index = uri.indexOf('?'); + if (index >= 0 && index < uri.length() - 1) { + String path = uri.substring(0, index); + String query = uri.substring(index + 1); + return new Tuple<>(path, query); + } + return new Tuple<>(uri, ""); + } + private static void populateHeader(HttpRequest httpRequest, Attributes attributes) { HEADERS_TO_BE_ADDED_AS_ATTRIBUTES.forEach(x -> { if (httpRequest.getHeaders() != null @@ -102,9 +123,8 @@ private static String createSpanName(RestRequest restRequest) { if (restRequest != null) { try { String methodName = restRequest.method().name(); - // path() does the decoding, which may give error - String path = restRequest.path(); - spanName = methodName + SEPARATOR + path; + String rawPath = restRequest.rawPath(); + spanName = methodName + SEPARATOR + rawPath; } catch (Exception e) { // swallow the exception and keep the default name. } @@ -114,9 +134,16 @@ private static String createSpanName(RestRequest restRequest) { private static Attributes buildSpanAttributes(RestRequest restRequest) { if (restRequest != null) { - return Attributes.create() + Attributes attributes = Attributes.create() .addAttribute(AttributeNames.REST_REQ_ID, restRequest.getRequestId()) .addAttribute(AttributeNames.REST_REQ_RAW_PATH, restRequest.rawPath()); + + Tuple<String, String> uriParts = splitUri(restRequest.uri()); + String query = uriParts.v2(); + if (query.isBlank() == false) { + attributes.addAttribute(AttributeNames.HTTP_REQ_QUERY_PARAMS, query); + } + return attributes; } else { return Attributes.EMPTY; } @@ -170,4 +197,43 @@ private static Attributes buildSpanAttributes(String nodeId, ReplicatedWriteRequ return attributes; } + /** + * Creates {@link SpanCreationContext} with parent set to specified SpanContext. + * @param spanName name of span. + * @param parentSpan target parent span. + * @return context + */ + public static SpanCreationContext from(String spanName, SpanContext parentSpan) { + return SpanCreationContext.server().name(spanName).parent(parentSpan); + } + + /** + * Creates {@link SpanCreationContext} with parent set to specified SpanContext. + * @param task search task. + * @param actionName action. + * @return context + */ + public static SpanCreationContext from(Task task, String actionName) { + return SpanCreationContext.server().name(createSpanName(task, actionName)).attributes(buildSpanAttributes(task, actionName)); + } + + private static Attributes buildSpanAttributes(Task task, String actionName) { + Attributes attributes = Attributes.create().addAttribute(AttributeNames.TRANSPORT_ACTION, actionName); + if (task != null) { + attributes.addAttribute(AttributeNames.TASK_ID, task.getId()); + if (task.getParentTaskId() != null && task.getParentTaskId().isSet()) { + attributes.addAttribute(AttributeNames.PARENT_TASK_ID, task.getParentTaskId().getId()); + } + } + return attributes; + + } + + private static String createSpanName(Task task, String actionName) { + if (task != null) { + return task.getType() + SEPARATOR + task.getAction(); + } else { + return actionName; + } + } } diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/listener/TraceableSearchRequestOperationsListener.java b/server/src/main/java/org/opensearch/telemetry/tracing/listener/TraceableSearchRequestOperationsListener.java new file mode 100644 index 0000000000000..71fb59194c447 --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/listener/TraceableSearchRequestOperationsListener.java @@ -0,0 +1,75 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.listener; + +import org.opensearch.action.search.SearchPhaseContext; +import org.opensearch.action.search.SearchRequestContext; +import org.opensearch.action.search.SearchRequestOperationsListener; +import org.opensearch.telemetry.tracing.AttributeNames; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanContext; +import org.opensearch.telemetry.tracing.Tracer; + +/** + * SearchRequestOperationsListener subscriber for search request tracing + * + * @opensearch.internal + */ +public final class TraceableSearchRequestOperationsListener extends SearchRequestOperationsListener { + private final Tracer tracer; + private final Span requestSpan; + private SpanContext phaseSpanContext; + + public TraceableSearchRequestOperationsListener(final Tracer tracer, final Span requestSpan) { + this.tracer = tracer; + this.requestSpan = requestSpan; + this.phaseSpanContext = null; + } + + public static SearchRequestOperationsListener create(final Tracer tracer, final Span requestSpan) { + if (tracer.isRecording()) { + return new TraceableSearchRequestOperationsListener(tracer, requestSpan); + } else { + return SearchRequestOperationsListener.NOOP; + } + } + + @Override + protected void onPhaseStart(SearchPhaseContext context) { + assert phaseSpanContext == null : "There should be only one search phase active at a time"; + phaseSpanContext = tracer.getCurrentSpan(); + } + + @Override + protected void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + assert phaseSpanContext != null : "There should be a search phase active at that time"; + phaseSpanContext.endSpan(); + phaseSpanContext = null; + } + + @Override + protected void onPhaseFailure(SearchPhaseContext context, Throwable cause) { + assert phaseSpanContext != null : "There should be a search phase active at that time"; + phaseSpanContext.setError((Exception) cause); + phaseSpanContext.endSpan(); + phaseSpanContext = null; + } + + @Override + public void onRequestStart(SearchRequestContext searchRequestContext) {} + + @Override + public void onRequestEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + // add response-related attributes on request end + requestSpan.addAttribute( + AttributeNames.TOTAL_HITS, + searchRequestContext.totalHits() == null ? 0 : searchRequestContext.totalHits().value + ); + } +} diff --git a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java index 12052598d3671..0b9026b81eb4e 100644 --- a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java @@ -42,7 +42,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.SizeValue; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor; import org.opensearch.common.util.concurrent.ThreadContext; @@ -187,9 +186,7 @@ public static ThreadPoolType fromType(String type) { map.put(Names.REMOTE_PURGE, ThreadPoolType.SCALING); map.put(Names.REMOTE_REFRESH_RETRY, ThreadPoolType.SCALING); map.put(Names.REMOTE_RECOVERY, ThreadPoolType.SCALING); - if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) { - map.put(Names.INDEX_SEARCHER, ThreadPoolType.RESIZABLE); - } + map.put(Names.INDEX_SEARCHER, ThreadPoolType.RESIZABLE); THREAD_POOL_TYPES = Collections.unmodifiableMap(map); } @@ -282,12 +279,16 @@ public ThreadPool( TimeValue.timeValueMinutes(5) ) ); - if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) { - builders.put( + builders.put( + Names.INDEX_SEARCHER, + new ResizableExecutorBuilder( + settings, Names.INDEX_SEARCHER, - new ResizableExecutorBuilder(settings, Names.INDEX_SEARCHER, allocatedProcessors, 1000, runnableTaskListener) - ); - } + twiceAllocatedProcessors(allocatedProcessors), + 1000, + runnableTaskListener + ) + ); for (final ExecutorBuilder<?> builder : customBuilders) { if (builders.containsKey(builder.name())) { diff --git a/server/src/main/java/org/opensearch/transport/TransportService.java b/server/src/main/java/org/opensearch/transport/TransportService.java index d50266d8c9e4a..652d57f4c5348 100644 --- a/server/src/main/java/org/opensearch/transport/TransportService.java +++ b/server/src/main/java/org/opensearch/transport/TransportService.java @@ -1105,7 +1105,8 @@ public TransportAddress[] addressesFromString(String address) throws UnknownHost "cluster:admin", "cluster:monitor", "cluster:internal", - "internal:" + "internal:", + "views:" ) ) ); diff --git a/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat b/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat index 2c92f0ecd3f51..80b1d25064885 100644 --- a/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat +++ b/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat @@ -1 +1,2 @@ org.apache.lucene.search.suggest.document.Completion50PostingsFormat +org.opensearch.index.codec.fuzzy.FuzzyFilterPostingsFormat diff --git a/server/src/main/resources/org/opensearch/bootstrap/security.policy b/server/src/main/resources/org/opensearch/bootstrap/security.policy index 77cd0ab05278e..e1226345ef961 100644 --- a/server/src/main/resources/org/opensearch/bootstrap/security.policy +++ b/server/src/main/resources/org/opensearch/bootstrap/security.policy @@ -187,5 +187,4 @@ grant { permission java.io.FilePermission "/sys/fs/cgroup/cpuacct/-", "read"; permission java.io.FilePermission "/sys/fs/cgroup/memory", "read"; permission java.io.FilePermission "/sys/fs/cgroup/memory/-", "read"; - }; diff --git a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java index 2f47bfb4df70a..d7026159d9ec0 100644 --- a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java @@ -40,6 +40,8 @@ import org.opensearch.action.OriginalIndices; import org.opensearch.action.RoutingMissingException; import org.opensearch.action.TimestampParsingException; +import org.opensearch.action.admin.indices.view.ViewAlreadyExistsException; +import org.opensearch.action.admin.indices.view.ViewNotFoundException; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.action.support.replication.ReplicationOperation; @@ -892,6 +894,8 @@ public void testIds() { ids.put(169, NodeWeighedAwayException.class); ids.put(170, SearchPipelineProcessingException.class); ids.put(171, CryptoRegistryException.class); + ids.put(172, ViewNotFoundException.class); + ids.put(173, ViewAlreadyExistsException.class); ids.put(10001, IndexCreateBlockException.class); Map<Class<? extends OpenSearchException>, Integer> reverse = new HashMap<>(); diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequestTests.java index 412b546e134b7..d0a75b007a218 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequestTests.java @@ -86,15 +86,18 @@ public void testRemoveSingleMetric() throws Exception { } /** - * Test that a newly constructed NodesInfoRequestObject requests all of the - * possible metrics defined in {@link NodesInfoRequest.Metric}. + * Test that a newly constructed NodesInfoRequestObject does not request all the + * possible metrics defined in {@link NodesInfoRequest.Metric} but only the default metrics + * according to {@link NodesInfoRequest.Metric#defaultMetrics()}. */ public void testNodesInfoRequestDefaults() { - NodesInfoRequest defaultNodesInfoRequest = new NodesInfoRequest(randomAlphaOfLength(8)); - NodesInfoRequest allMetricsNodesInfoRequest = new NodesInfoRequest(randomAlphaOfLength(8)); - allMetricsNodesInfoRequest.all(); + NodesInfoRequest requestOOTB = new NodesInfoRequest(randomAlphaOfLength(8)); + NodesInfoRequest requestAll = new NodesInfoRequest(randomAlphaOfLength(8)).all(); + NodesInfoRequest requestDefault = new NodesInfoRequest(randomAlphaOfLength(8)).defaultMetrics(); - assertThat(defaultNodesInfoRequest.requestedMetrics(), equalTo(allMetricsNodesInfoRequest.requestedMetrics())); + assertTrue(requestAll.requestedMetrics().size() > requestOOTB.requestedMetrics().size()); + assertTrue(requestDefault.requestedMetrics().size() == requestOOTB.requestedMetrics().size()); + assertThat(requestOOTB.requestedMetrics(), equalTo(requestDefault.requestedMetrics())); } /** @@ -107,6 +110,21 @@ public void testNodesInfoRequestAll() throws Exception { assertThat(request.requestedMetrics(), equalTo(NodesInfoRequest.Metric.allMetrics())); } + /** + * Test that the {@link NodesInfoRequest#defaultMetrics()} method enables default metrics. + */ + public void testNodesInfoRequestDefault() { + NodesInfoRequest request = new NodesInfoRequest("node"); + request.defaultMetrics(); + + assertEquals(11, request.requestedMetrics().size()); + assertThat(request.requestedMetrics(), equalTo(NodesInfoRequest.Metric.defaultMetrics())); + assertTrue(request.requestedMetrics().contains(NodesInfoRequest.Metric.JVM.metricName())); + assertTrue(request.requestedMetrics().contains(NodesInfoRequest.Metric.AGGREGATIONS.metricName())); + // search_pipelines metrics are not included + assertFalse(request.requestedMetrics().contains(NodesInfoRequest.Metric.SEARCH_PIPELINES.metricName())); + } + /** * Test that the {@link NodesInfoRequest#clear()} method disables all metrics. */ diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java index a5ca08f141560..1b8b6243aa805 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -65,6 +65,7 @@ import org.opensearch.monitor.os.OsStats; import org.opensearch.monitor.process.ProcessStats; import org.opensearch.node.AdaptiveSelectionStats; +import org.opensearch.node.IoUsageStats; import org.opensearch.node.NodeResourceUsageStats; import org.opensearch.node.NodesResourceUsageStats; import org.opensearch.node.ResponseCollectorService; @@ -881,7 +882,8 @@ public static NodeStats createNodeStats(boolean remoteStoreStats) throws IOExcep nodeId, System.currentTimeMillis(), randomDoubleBetween(1.0, 100.0, true), - randomDoubleBetween(1.0, 100.0, true) + randomDoubleBetween(1.0, 100.0, true), + new IoUsageStats(100.0) ); resourceUsageStatsMap.put(nodeId, stats); } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java index d94bb9a7aa88e..6c4337d267c8d 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java @@ -13,6 +13,8 @@ import org.apache.lucene.util.Constants; import org.opensearch.ExceptionsHelper; import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; +import org.opensearch.action.admin.cluster.node.tasks.get.GetTaskRequest; +import org.opensearch.action.admin.cluster.node.tasks.get.GetTaskResponse; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.opensearch.action.support.ActionTestUtils; @@ -563,8 +565,57 @@ public void testOnDemandRefreshWhileFetchingTasks() throws InterruptedException assertNotNull(taskInfo.getResourceStats()); assertNotNull(taskInfo.getResourceStats().getResourceUsageInfo()); - assertTrue(taskInfo.getResourceStats().getResourceUsageInfo().get("total") instanceof TaskResourceUsage); - TaskResourceUsage taskResourceUsage = (TaskResourceUsage) taskInfo.getResourceStats().getResourceUsageInfo().get("total"); + assertNotNull(taskInfo.getResourceStats().getResourceUsageInfo().get("total")); + TaskResourceUsage taskResourceUsage = taskInfo.getResourceStats().getResourceUsageInfo().get("total"); + assertCPUTime(taskResourceUsage.getCpuTimeInNanos()); + assertTrue(taskResourceUsage.getMemoryInBytes() > 0); + }; + + taskTestContext.operationFinishedValidator = (task, threadId) -> { assertEquals(0, resourceTasks.size()); }; + + startResourceAwareNodesAction(testNodes[0], false, taskTestContext, new ActionListener<NodesResponse>() { + @Override + public void onResponse(NodesResponse listTasksResponse) { + responseReference.set(listTasksResponse); + taskTestContext.requestCompleteLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + throwableReference.set(e); + taskTestContext.requestCompleteLatch.countDown(); + } + }); + + // Waiting for whole request to complete and return successfully till client + taskTestContext.requestCompleteLatch.await(); + + assertTasksRequestFinishedSuccessfully(responseReference.get(), throwableReference.get()); + } + + public void testOnDemandRefreshWhileGetTask() throws InterruptedException { + setup(true, false); + + final AtomicReference<Throwable> throwableReference = new AtomicReference<>(); + final AtomicReference<NodesResponse> responseReference = new AtomicReference<>(); + + TaskTestContext taskTestContext = new TaskTestContext(); + + Map<Long, Task> resourceTasks = testNodes[0].taskResourceTrackingService.getResourceAwareTasks(); + + taskTestContext.operationStartValidator = (task, threadId) -> { + assertFalse(resourceTasks.isEmpty()); + GetTaskResponse getTaskResponse = ActionTestUtils.executeBlocking( + testNodes[0].transportGetTaskAction, + new GetTaskRequest().setTaskId(new TaskId(testNodes[0].getNodeId(), new ArrayList<>(resourceTasks.values()).get(0).getId())) + ); + + TaskInfo taskInfo = getTaskResponse.getTask().getTask(); + + assertNotNull(taskInfo.getResourceStats()); + assertNotNull(taskInfo.getResourceStats().getResourceUsageInfo()); + assertNotNull(taskInfo.getResourceStats().getResourceUsageInfo().get("total")); + TaskResourceUsage taskResourceUsage = taskInfo.getResourceStats().getResourceUsageInfo().get("total"); assertCPUTime(taskResourceUsage.getCpuTimeInNanos()); assertTrue(taskResourceUsage.getMemoryInBytes() > 0); }; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java index a3fa0f9cb16e4..8d87fd5135663 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java @@ -34,6 +34,7 @@ import org.opensearch.Version; import org.opensearch.action.FailedNodeException; import org.opensearch.action.admin.cluster.node.tasks.cancel.TransportCancelTasksAction; +import org.opensearch.action.admin.cluster.node.tasks.get.TransportGetTaskAction; import org.opensearch.action.admin.cluster.node.tasks.list.TransportListTasksAction; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.nodes.BaseNodeResponse; @@ -41,6 +42,7 @@ import org.opensearch.action.support.nodes.BaseNodesResponse; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.action.support.replication.ClusterStateCreationUtils; +import org.opensearch.client.Client; import org.opensearch.cluster.ClusterModule; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.node.DiscoveryNode; @@ -57,6 +59,7 @@ import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.common.transport.BoundTransportAddress; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.tasks.TaskCancellationService; import org.opensearch.tasks.TaskManager; import org.opensearch.tasks.TaskResourceTrackingService; @@ -85,6 +88,7 @@ import static java.util.Collections.emptySet; import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.opensearch.test.ClusterServiceUtils.setState; +import static org.mockito.Mockito.mock; /** * The test case for unit testing task manager and related transport actions @@ -249,6 +253,17 @@ protected TaskManager createTaskManager( taskResourceTrackingService ); transportCancelTasksAction = new TransportCancelTasksAction(clusterService, transportService, actionFilters); + Client mockClient = mock(Client.class); + NamedXContentRegistry namedXContentRegistry = mock(NamedXContentRegistry.class); + transportGetTaskAction = new TransportGetTaskAction( + threadPool, + transportService, + actionFilters, + clusterService, + mockClient, + namedXContentRegistry, + taskResourceTrackingService + ); transportService.acceptIncomingRequests(); } @@ -258,6 +273,7 @@ protected TaskManager createTaskManager( private final SetOnce<DiscoveryNode> discoveryNode = new SetOnce<>(); public final TransportListTasksAction transportListTasksAction; public final TransportCancelTasksAction transportCancelTasksAction; + public final TransportGetTaskAction transportGetTaskAction; @Override public void close() { diff --git a/server/src/test/java/org/opensearch/action/admin/indices/create/CreateIndexRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/create/CreateIndexRequestTests.java index 398d9e3338580..89e072d783747 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/create/CreateIndexRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/create/CreateIndexRequestTests.java @@ -50,6 +50,7 @@ import java.util.Map; import java.util.Set; +import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; public class CreateIndexRequestTests extends OpenSearchTestCase { @@ -150,6 +151,20 @@ public void testSettingsType() throws IOException { assertThat(e.getMessage(), equalTo("key [settings] must be an object")); } + public void testToString() throws IOException { + CreateIndexRequest request = new CreateIndexRequest("foo"); + String mapping = JsonXContent.contentBuilder() + .startObject() + .startObject(MapperService.SINGLE_MAPPING_NAME) + .endObject() + .endObject() + .toString(); + request.mapping(mapping); + + assertThat(request.toString(), containsString("index='foo'")); + assertThat(request.toString(), containsString("mappings='{\"_doc\":{}}'")); + } + public static void assertMappingsEqual(Map<String, String> expected, Map<String, String> actual) throws IOException { assertEquals(expected.keySet(), actual.keySet()); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestTests.java index 87ba6110447c1..a80141c52b6b4 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestTests.java @@ -31,21 +31,137 @@ package org.opensearch.action.admin.indices.forcemerge; +import org.opensearch.Version; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.VersionUtils; public class ForceMergeRequestTests extends OpenSearchTestCase { public void testDescription() { ForceMergeRequest request = new ForceMergeRequest(); - assertEquals("Force-merge indices [], maxSegments[-1], onlyExpungeDeletes[false], flush[true]", request.getDescription()); + assertEquals( + "Force-merge indices [], maxSegments[-1], onlyExpungeDeletes[false], flush[true], primaryOnly[false]", + request.getDescription() + ); request = new ForceMergeRequest("shop", "blog"); - assertEquals("Force-merge indices [shop, blog], maxSegments[-1], onlyExpungeDeletes[false], flush[true]", request.getDescription()); + assertEquals( + "Force-merge indices [shop, blog], maxSegments[-1], onlyExpungeDeletes[false], flush[true], primaryOnly[false]", + request.getDescription() + ); request = new ForceMergeRequest(); request.maxNumSegments(12); request.onlyExpungeDeletes(true); request.flush(false); - assertEquals("Force-merge indices [], maxSegments[12], onlyExpungeDeletes[true], flush[false]", request.getDescription()); + request.primaryOnly(true); + assertEquals( + "Force-merge indices [], maxSegments[12], onlyExpungeDeletes[true], flush[false], primaryOnly[true]", + request.getDescription() + ); + } + + public void testToString() { + ForceMergeRequest request = new ForceMergeRequest(); + assertEquals("ForceMergeRequest{maxNumSegments=-1, onlyExpungeDeletes=false, flush=true, primaryOnly=false}", request.toString()); + + request = new ForceMergeRequest(); + request.maxNumSegments(12); + request.onlyExpungeDeletes(true); + request.flush(false); + request.primaryOnly(true); + assertEquals("ForceMergeRequest{maxNumSegments=12, onlyExpungeDeletes=true, flush=false, primaryOnly=true}", request.toString()); + } + + public void testSerialization() throws Exception { + final ForceMergeRequest request = randomRequest(); + try (BytesStreamOutput out = new BytesStreamOutput()) { + request.writeTo(out); + + final ForceMergeRequest deserializedRequest; + try (StreamInput in = out.bytes().streamInput()) { + deserializedRequest = new ForceMergeRequest(in); + } + assertEquals(request.maxNumSegments(), deserializedRequest.maxNumSegments()); + assertEquals(request.onlyExpungeDeletes(), deserializedRequest.onlyExpungeDeletes()); + assertEquals(request.flush(), deserializedRequest.flush()); + assertEquals(request.primaryOnly(), deserializedRequest.primaryOnly()); + assertEquals(request.forceMergeUUID(), deserializedRequest.forceMergeUUID()); + } + } + + public void testBwcSerialization() throws Exception { + { + final ForceMergeRequest sample = randomRequest(); + final Version compatibleVersion = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT); + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.setVersion(compatibleVersion); + sample.writeTo(out); + + final ForceMergeRequest deserializedRequest; + try (StreamInput in = out.bytes().streamInput()) { + in.setVersion(Version.CURRENT); + deserializedRequest = new ForceMergeRequest(in); + } + + assertEquals(sample.maxNumSegments(), deserializedRequest.maxNumSegments()); + assertEquals(sample.onlyExpungeDeletes(), deserializedRequest.onlyExpungeDeletes()); + assertEquals(sample.flush(), deserializedRequest.flush()); + if (compatibleVersion.onOrAfter(Version.V_3_0_0)) { + assertEquals(sample.primaryOnly(), deserializedRequest.primaryOnly()); + assertEquals(sample.forceMergeUUID(), deserializedRequest.forceMergeUUID()); + } + } + } + + { + final ForceMergeRequest sample = randomRequest(); + final Version compatibleVersion = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT); + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.setVersion(Version.CURRENT); + sample.getParentTask().writeTo(out); + out.writeStringArray(sample.indices()); + sample.indicesOptions().writeIndicesOptions(out); + out.writeInt(sample.maxNumSegments()); + out.writeBoolean(sample.onlyExpungeDeletes()); + out.writeBoolean(sample.flush()); + if (compatibleVersion.onOrAfter(Version.V_3_0_0)) { + out.writeBoolean(sample.primaryOnly()); + } + if (compatibleVersion.onOrAfter(Version.V_3_0_0)) { + out.writeString(sample.forceMergeUUID()); + } else { + out.writeOptionalString(sample.forceMergeUUID()); + } + + final ForceMergeRequest deserializedRequest; + try (StreamInput in = out.bytes().streamInput()) { + in.setVersion(compatibleVersion); + deserializedRequest = new ForceMergeRequest(in); + } + + assertEquals(sample.maxNumSegments(), deserializedRequest.maxNumSegments()); + assertEquals(sample.onlyExpungeDeletes(), deserializedRequest.onlyExpungeDeletes()); + assertEquals(sample.flush(), deserializedRequest.flush()); + if (compatibleVersion.onOrAfter(Version.V_3_0_0)) { + assertEquals(sample.primaryOnly(), deserializedRequest.primaryOnly()); + } + assertEquals(sample.forceMergeUUID(), deserializedRequest.forceMergeUUID()); + + } + } + } + + private ForceMergeRequest randomRequest() { + ForceMergeRequest request = new ForceMergeRequest(); + if (randomBoolean()) { + request.maxNumSegments(randomIntBetween(1, 10)); + } + request.onlyExpungeDeletes(true); + request.flush(randomBoolean()); + request.primaryOnly(randomBoolean()); + return request; } } diff --git a/server/src/test/java/org/opensearch/action/admin/indices/view/CreateViewRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/view/CreateViewRequestTests.java new file mode 100644 index 0000000000000..e2211bb120366 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/view/CreateViewRequestTests.java @@ -0,0 +1,98 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.test.AbstractWireSerializingTestCase; +import org.hamcrest.MatcherAssert; + +import java.util.List; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.nullValue; + +public class CreateViewRequestTests extends AbstractWireSerializingTestCase<CreateViewAction.Request> { + + @Override + protected Writeable.Reader<CreateViewAction.Request> instanceReader() { + return CreateViewAction.Request::new; + } + + @Override + protected CreateViewAction.Request createTestInstance() { + return new CreateViewAction.Request( + randomAlphaOfLength(8), + randomAlphaOfLength(8), + randomList(5, () -> new CreateViewAction.Request.Target(randomAlphaOfLength(8))) + ); + } + + public void testValidateRequest() { + final CreateViewAction.Request request = new CreateViewAction.Request( + "my-view", + "this is a description", + List.of(new CreateViewAction.Request.Target("my-indices-*")) + ); + + MatcherAssert.assertThat(request.validate(), nullValue()); + } + + public void testValidateRequestWithoutName() { + final CreateViewAction.Request request = new CreateViewAction.Request("", null, null); + final ActionRequestValidationException e = request.validate(); + + MatcherAssert.assertThat(e.validationErrors(), contains("name cannot be empty or null", "targets cannot be empty")); + } + + public void testSizeThresholds() { + final String validName = randomAlphaOfLength(8); + final String validDescription = randomAlphaOfLength(20); + final int validTargetLength = randomIntBetween(1, 5); + final String validIndexPattern = randomAlphaOfLength(8); + + final CreateViewAction.Request requestNameTooBig = new CreateViewAction.Request( + randomAlphaOfLength(65), + validDescription, + randomList(1, validTargetLength, () -> new CreateViewAction.Request.Target(validIndexPattern)) + ); + MatcherAssert.assertThat( + requestNameTooBig.validate().validationErrors(), + contains("name must be less than 64 characters in length") + ); + + final CreateViewAction.Request requestDescriptionTooBig = new CreateViewAction.Request( + validName, + randomAlphaOfLength(257), + randomList(1, validTargetLength, () -> new CreateViewAction.Request.Target(validIndexPattern)) + ); + MatcherAssert.assertThat( + requestDescriptionTooBig.validate().validationErrors(), + contains("description must be less than 256 characters in length") + ); + + final CreateViewAction.Request requestTargetsSize = new CreateViewAction.Request( + validName, + validDescription, + randomList(26, 26, () -> new CreateViewAction.Request.Target(validIndexPattern)) + ); + MatcherAssert.assertThat(requestTargetsSize.validate().validationErrors(), contains("view cannot have more than 25 targets")); + + final CreateViewAction.Request requestTargetsIndexPatternSize = new CreateViewAction.Request( + validName, + validDescription, + randomList(1, 1, () -> new CreateViewAction.Request.Target(randomAlphaOfLength(65))) + ); + MatcherAssert.assertThat( + requestTargetsIndexPatternSize.validate().validationErrors(), + contains("target index pattern must be less than 64 characters in length") + ); + } + +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/view/DeleteViewRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/view/DeleteViewRequestTests.java new file mode 100644 index 0000000000000..29305e3dfb92f --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/view/DeleteViewRequestTests.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.test.AbstractWireSerializingTestCase; +import org.hamcrest.MatcherAssert; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.nullValue; + +public class DeleteViewRequestTests extends AbstractWireSerializingTestCase<DeleteViewAction.Request> { + + @Override + protected Writeable.Reader<DeleteViewAction.Request> instanceReader() { + return DeleteViewAction.Request::new; + } + + @Override + protected DeleteViewAction.Request createTestInstance() { + return new DeleteViewAction.Request(randomAlphaOfLength(8)); + } + + public void testValidateRequest() { + final DeleteViewAction.Request request = new DeleteViewAction.Request("my-view"); + + MatcherAssert.assertThat(request.validate(), nullValue()); + } + + public void testValidateRequestWithoutName() { + final DeleteViewAction.Request request = new DeleteViewAction.Request(""); + final ActionRequestValidationException e = request.validate(); + + MatcherAssert.assertThat(e.validationErrors(), contains("name cannot be empty or null")); + } + +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/view/GetViewResponseTests.java b/server/src/test/java/org/opensearch/action/admin/indices/view/GetViewResponseTests.java new file mode 100644 index 0000000000000..44dfbe5f1d781 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/view/GetViewResponseTests.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.cluster.metadata.View; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.test.AbstractWireSerializingTestCase; + +import java.util.TreeSet; + +public class GetViewResponseTests extends AbstractWireSerializingTestCase<GetViewAction.Response> { + + @Override + protected Writeable.Reader<GetViewAction.Response> instanceReader() { + return GetViewAction.Response::new; + } + + @Override + protected GetViewAction.Response createTestInstance() { + return new GetViewAction.Response( + new View( + randomAlphaOfLength(8), + randomAlphaOfLength(8), + randomLong(), + randomLong(), + new TreeSet<>(randomList(5, () -> new View.Target(randomAlphaOfLength(8)))) + ) + ); + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/view/ListViewNamesRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/view/ListViewNamesRequestTests.java new file mode 100644 index 0000000000000..80a2827d158bb --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/view/ListViewNamesRequestTests.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.test.AbstractWireSerializingTestCase; +import org.hamcrest.MatcherAssert; + +import static org.hamcrest.Matchers.nullValue; + +public class ListViewNamesRequestTests extends AbstractWireSerializingTestCase<ListViewNamesAction.Request> { + + @Override + protected Writeable.Reader<ListViewNamesAction.Request> instanceReader() { + return ListViewNamesAction.Request::new; + } + + @Override + protected ListViewNamesAction.Request createTestInstance() { + return new ListViewNamesAction.Request(); + } + + public void testValidateRequest() { + final ListViewNamesAction.Request request = new ListViewNamesAction.Request(); + + MatcherAssert.assertThat(request.validate(), nullValue()); + } + +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/view/ListViewNamesResponseTests.java b/server/src/test/java/org/opensearch/action/admin/indices/view/ListViewNamesResponseTests.java new file mode 100644 index 0000000000000..ee8409fe3c805 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/view/ListViewNamesResponseTests.java @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.test.AbstractWireSerializingTestCase; + +public class ListViewNamesResponseTests extends AbstractWireSerializingTestCase<ListViewNamesAction.Response> { + + @Override + protected Writeable.Reader<ListViewNamesAction.Response> instanceReader() { + return ListViewNamesAction.Response::new; + } + + @Override + protected ListViewNamesAction.Response createTestInstance() { + return new ListViewNamesAction.Response(randomList(5, () -> randomAlphaOfLength(8))); + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/view/SearchViewRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/view/SearchViewRequestTests.java new file mode 100644 index 0000000000000..d49c0c1a8f2bd --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/view/SearchViewRequestTests.java @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.test.AbstractWireSerializingTestCase; +import org.hamcrest.MatcherAssert; + +import java.io.IOException; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + +public class SearchViewRequestTests extends AbstractWireSerializingTestCase<SearchViewAction.Request> { + + @Override + protected Writeable.Reader<SearchViewAction.Request> instanceReader() { + return SearchViewAction.Request::new; + } + + @Override + protected SearchViewAction.Request createTestInstance() { + try { + return new SearchViewAction.Request(randomAlphaOfLength(8), new SearchRequest()); + } catch (final Exception e) { + throw new RuntimeException(e); + } + } + + public void testValidateRequest() throws IOException { + final SearchViewAction.Request request = new SearchViewAction.Request("my-view", new SearchRequest()); + MatcherAssert.assertThat(request.validate(), nullValue()); + } + + public void testValidateRequestWithoutName() { + final SearchViewAction.Request request = new SearchViewAction.Request((String) null, new SearchRequest()); + final ActionRequestValidationException e = request.validate(); + + MatcherAssert.assertThat(e.validationErrors().size(), equalTo(1)); + MatcherAssert.assertThat(e.validationErrors().get(0), containsString("View is required")); + } + +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/view/ViewServiceTest.java b/server/src/test/java/org/opensearch/action/admin/indices/view/ViewServiceTest.java new file mode 100644 index 0000000000000..91813e1336cf2 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/view/ViewServiceTest.java @@ -0,0 +1,194 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.ResourceNotFoundException; +import org.opensearch.action.search.SearchAction; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.client.node.NodeClient; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.View; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.core.action.ActionListener; +import org.hamcrest.MatcherAssert; +import org.junit.After; +import org.junit.Before; + +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.LongSupplier; + +import static org.opensearch.test.OpenSearchTestCase.randomAlphaOfLength; +import static org.hamcrest.Matchers.equalTo; +import static org.junit.Assert.assertThrows; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +@SuppressWarnings("unchecked") +public class ViewServiceTest { + + private final View.Target typicalTarget = new View.Target(randomAlphaOfLength(8)); + private final View typicalView = new View( + "view-" + randomAlphaOfLength(8), + "description " + randomAlphaOfLength(20), + -1L, + -1L, + Set.of(typicalTarget) + ); + + private ClusterService clusterService; + private NodeClient nodeClient; + private final AtomicLong currentTime = new AtomicLong(0); + private LongSupplier timeProvider = currentTime::longValue; + private ViewService viewService; + + @Before + public void before() { + clusterService = mock(ClusterService.class); + nodeClient = mock(NodeClient.class); + timeProvider = mock(LongSupplier.class); + doAnswer(invocation -> currentTime.get()).when(timeProvider).getAsLong(); + viewService = spy(new ViewService(clusterService, nodeClient, timeProvider)); + } + + @After + public void after() { + verifyNoMoreInteractions(timeProvider, clusterService, nodeClient); + } + + private CreateViewAction.Request createTypicalViewRequest() { + return new CreateViewAction.Request( + randomAlphaOfLength(8), + randomAlphaOfLength(20), + List.of(new CreateViewAction.Request.Target(randomAlphaOfLength(8))) + ); + } + + public void createView() { + final var request = createTypicalViewRequest(); + final var listener = mock(ActionListener.class); + setGetViewOrThrowExceptionToReturnTypicalView(); + + viewService.createView(request, listener); + + verify(clusterService).submitStateUpdateTask(eq("create_view_task"), any()); + verify(timeProvider).getAsLong(); + } + + public void updateView() { + final var request = createTypicalViewRequest(); + final var listener = mock(ActionListener.class); + setGetViewOrThrowExceptionToReturnTypicalView(); + + viewService.updateView(request, listener); + + verify(clusterService).submitStateUpdateTask(eq("update_view_task"), any()); + verify(timeProvider).getAsLong(); + } + + public void updateView_doesNotExist() { + final var request = createTypicalViewRequest(); + final var listener = mock(ActionListener.class); + doThrow(new ResourceNotFoundException("abc")).when(viewService).getViewOrThrowException(anyString()); + + final Exception ex = assertThrows(ResourceNotFoundException.class, () -> viewService.updateView(request, listener)); + MatcherAssert.assertThat(ex.getMessage(), equalTo("abc")); + } + + public void deleteView() { + final var request = new DeleteViewAction.Request(randomAlphaOfLength(8)); + final var listener = mock(ActionListener.class); + setGetViewOrThrowExceptionToReturnTypicalView(); + + viewService.deleteView(request, listener); + + verify(clusterService).submitStateUpdateTask(eq("delete_view_task"), any()); + } + + public void deleteView_doesNotExist() { + final var request = new DeleteViewAction.Request(randomAlphaOfLength(8)); + final var listener = mock(ActionListener.class); + doThrow(new ResourceNotFoundException("abc")).when(viewService).getViewOrThrowException(anyString()); + + final ResourceNotFoundException ex = assertThrows(ResourceNotFoundException.class, () -> viewService.deleteView(request, listener)); + + MatcherAssert.assertThat(ex.getMessage(), equalTo("abc")); + } + + public void getView() { + final var request = new GetViewAction.Request(randomAlphaOfLength(8)); + final var listener = mock(ActionListener.class); + setGetViewOrThrowExceptionToReturnTypicalView(); + + viewService.getView(request, listener); + + verify(listener).onResponse(any()); + } + + public void getView_doesNotExist() { + final var request = new GetViewAction.Request(randomAlphaOfLength(8)); + final var listener = mock(ActionListener.class); + doThrow(new ResourceNotFoundException("abc")).when(viewService).getViewOrThrowException(anyString()); + + final ResourceNotFoundException ex = assertThrows(ResourceNotFoundException.class, () -> viewService.getView(request, listener)); + + MatcherAssert.assertThat(ex.getMessage(), equalTo("abc")); + } + + public void listViewNames() { + final var clusterState = new ClusterState.Builder(new ClusterName(randomAlphaOfLength(8))).metadata( + new Metadata.Builder().views(Map.of(typicalView.getName(), typicalView)).build() + ).build(); + final var listener = mock(ActionListener.class); + when(clusterService.state()).thenReturn(clusterState); + + viewService.listViewNames(listener); + + verify(clusterService).state(); + verify(listener).onResponse(any()); + } + + public void listViewNames_noViews() { + final var clusterState = new ClusterState.Builder(new ClusterName(randomAlphaOfLength(8))).build(); + final var listener = mock(ActionListener.class); + when(clusterService.state()).thenReturn(clusterState); + + viewService.listViewNames(listener); + + verify(clusterService).state(); + verify(listener).onResponse(any()); + } + + public void searchView() { + final var request = spy(new SearchViewAction.Request(randomAlphaOfLength(8), new SearchRequest())); + final var listener = mock(ActionListener.class); + setGetViewOrThrowExceptionToReturnTypicalView(); + + viewService.searchView(request, listener); + + verify(nodeClient).executeLocally(eq(SearchAction.INSTANCE), any(), any(ActionListener.class)); + verify(request).indices(typicalTarget.getIndexPattern()); + } + + private void setGetViewOrThrowExceptionToReturnTypicalView() { + doAnswer(invocation -> typicalView).when(viewService).getViewOrThrowException(anyString()); + } +} diff --git a/server/src/test/java/org/opensearch/action/get/MultiGetRequestTests.java b/server/src/test/java/org/opensearch/action/get/MultiGetRequestTests.java index f000b7a10a30b..c6e880fbd137e 100644 --- a/server/src/test/java/org/opensearch/action/get/MultiGetRequestTests.java +++ b/server/src/test/java/org/opensearch/action/get/MultiGetRequestTests.java @@ -32,6 +32,7 @@ package org.opensearch.action.get; +import org.opensearch.action.get.MultiGetRequest.Item; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.ParsingException; @@ -141,6 +142,13 @@ public void testXContentSerialization() throws IOException { } } + public void testToString() { + MultiGetRequest req = createTestInstance(); + for (Item items : req.getItems()) { + assertThat(req.toString(), containsString(items.toString())); + } + } + private MultiGetRequest createTestInstance() { int numItems = randomIntBetween(0, 128); MultiGetRequest request = new MultiGetRequest(); diff --git a/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java b/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java index 76129341fc9a2..420289d3ff2e5 100644 --- a/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java @@ -58,6 +58,7 @@ import org.opensearch.search.internal.ShardSearchContextId; import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.search.query.QuerySearchResult; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.InternalAggregationTestCase; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; @@ -88,13 +89,13 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.instanceOf; -import static org.mockito.Mockito.mock; public class AbstractSearchAsyncActionTests extends OpenSearchTestCase { private final List<Tuple<String, String>> resolvedNodes = new ArrayList<>(); private final Set<ShardSearchContextId> releasedContexts = new CopyOnWriteArraySet<>(); private ExecutorService executor; + private SearchRequestOperationsListenerAssertingListener assertingListener; ThreadPool threadPool; @Before @@ -103,6 +104,7 @@ public void setUp() throws Exception { super.setUp(); executor = Executors.newFixedThreadPool(1); threadPool = new TestThreadPool(getClass().getName()); + assertingListener = new SearchRequestOperationsListenerAssertingListener(); } @After @@ -112,6 +114,7 @@ public void tearDown() throws Exception { executor.shutdown(); assertTrue(executor.awaitTermination(1, TimeUnit.SECONDS)); ThreadPool.terminate(threadPool, 5, TimeUnit.SECONDS); + assertingListener.assertFinished(); } private AbstractSearchAsyncAction<SearchPhaseResult> createAction( @@ -127,6 +130,7 @@ private AbstractSearchAsyncAction<SearchPhaseResult> createAction( listener, controlled, false, + false, expected, new SearchShardIterator(null, null, Collections.emptyList(), null) ); @@ -138,6 +142,7 @@ private AbstractSearchAsyncAction<SearchPhaseResult> createAction( ActionListener<SearchResponse> listener, final boolean controlled, final boolean failExecutePhaseOnShard, + final boolean catchExceptionWhenExecutePhaseOnShard, final AtomicLong expected, final SearchShardIterator... shards ) { @@ -178,7 +183,11 @@ private AbstractSearchAsyncAction<SearchPhaseResult> createAction( results, request.getMaxConcurrentShardRequests(), SearchResponse.Clusters.EMPTY, - new SearchRequestContext(new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), request) + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(assertingListener), LogManager.getLogger()), + request + ), + NoopTracer.INSTANCE ) { @Override protected SearchPhase getNextPhase(final SearchPhaseResults<SearchPhaseResult> results, SearchPhaseContext context) { @@ -194,7 +203,15 @@ protected void executePhaseOnShard( if (failExecutePhaseOnShard) { listener.onFailure(new ShardNotFoundException(shardIt.shardId())); } else { - listener.onResponse(new QuerySearchResult()); + if (catchExceptionWhenExecutePhaseOnShard) { + try { + listener.onResponse(new QuerySearchResult()); + } catch (Exception e) { + listener.onFailure(e); + } + } else { + listener.onResponse(new QuerySearchResult()); + } } } @@ -334,7 +351,7 @@ public void testOnPhaseFailureAndVerifyListeners() { ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); SearchRequestStats testListener = new SearchRequestStats(clusterSettings); - final List<SearchRequestOperationsListener> requestOperationListeners = new ArrayList<>(List.of(testListener)); + final List<SearchRequestOperationsListener> requestOperationListeners = List.of(testListener, assertingListener); SearchQueryThenFetchAsyncAction action = createSearchQueryThenFetchAsyncAction(requestOperationListeners); action.start(); assertEquals(1, testListener.getPhaseCurrent(action.getSearchPhaseName())); @@ -366,6 +383,7 @@ public void run() { SearchShardIterator searchShardIterator = new SearchShardIterator(null, shardId, Collections.emptyList(), OriginalIndices.NONE); searchShardIterator.resetAndSkip(); action.skipShard(searchShardIterator); + action.start(); action.executeNextPhase(action, fetchPhase); assertEquals(1, testListener.getPhaseCurrent(fetchPhase.getSearchPhaseName())); action.onPhaseFailure(new SearchPhase("test") { @@ -482,6 +500,7 @@ public void onFailure(Exception e) { }, false, true, + false, new AtomicLong(), shards ); @@ -528,6 +547,7 @@ public void onFailure(Exception e) { }, false, false, + false, new AtomicLong(), shards ); @@ -543,7 +563,7 @@ public void onFailure(Exception e) { assertThat(searchResponse.getSuccessfulShards(), equalTo(shards.length)); } - public void testExecutePhaseOnShardFailure() throws InterruptedException { + private void innerTestExecutePhaseOnShardFailure(boolean catchExceptionWhenExecutePhaseOnShard) throws InterruptedException { final Index index = new Index("test", UUID.randomUUID().toString()); final SearchShardIterator[] shards = IntStream.range(0, 2 + randomInt(3)) @@ -579,6 +599,7 @@ public void onFailure(Exception e) { }, false, false, + catchExceptionWhenExecutePhaseOnShard, new AtomicLong(), shards ); @@ -594,10 +615,18 @@ public void onFailure(Exception e) { assertThat(searchResponse.getSuccessfulShards(), equalTo(shards.length)); } + public void testExecutePhaseOnShardFailure() throws InterruptedException { + innerTestExecutePhaseOnShardFailure(false); + } + + public void testExecutePhaseOnShardFailureAndThrowException() throws InterruptedException { + innerTestExecutePhaseOnShardFailure(true); + } + public void testOnPhaseListenersWithQueryAndThenFetchType() throws InterruptedException { ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); SearchRequestStats testListener = new SearchRequestStats(clusterSettings); - final List<SearchRequestOperationsListener> requestOperationListeners = new ArrayList<>(List.of(testListener)); + final List<SearchRequestOperationsListener> requestOperationListeners = new ArrayList<>(List.of(testListener, assertingListener)); long delay = (randomIntBetween(1, 5)); delay = delay * 10; @@ -637,8 +666,8 @@ public void testOnPhaseListenersWithQueryAndThenFetchType() throws InterruptedEx assertEquals(1, testListener.getPhaseCurrent(expandPhase.getSearchPhaseName())); action.executeNextPhase(expandPhase, fetchPhase); + action.onPhaseDone(); /* finish phase since we don't have reponse being sent */ - action.sendSearchResponse(mock(InternalSearchResponse.class), mock(String.valueOf(QuerySearchResult.class))); assertThat(testListener.getPhaseMetric(expandPhase.getSearchPhaseName()), greaterThanOrEqualTo(delay)); assertEquals(1, testListener.getPhaseTotal(expandPhase.getSearchPhaseName())); assertEquals(0, testListener.getPhaseCurrent(expandPhase.getSearchPhaseName())); @@ -647,7 +676,7 @@ public void testOnPhaseListenersWithQueryAndThenFetchType() throws InterruptedEx public void testOnPhaseListenersWithDfsType() throws InterruptedException { ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); SearchRequestStats testListener = new SearchRequestStats(clusterSettings); - final List<SearchRequestOperationsListener> requestOperationListeners = new ArrayList<>(List.of(testListener)); + final List<SearchRequestOperationsListener> requestOperationListeners = new ArrayList<>(List.of(testListener, assertingListener)); SearchDfsQueryThenFetchAsyncAction searchDfsQueryThenFetchAsyncAction = createSearchDfsQueryThenFetchAsyncAction( requestOperationListeners @@ -664,6 +693,11 @@ public void testOnPhaseListenersWithDfsType() throws InterruptedException { searchDfsQueryThenFetchAsyncAction.skipShard(searchShardIterator); searchDfsQueryThenFetchAsyncAction.executeNextPhase(searchDfsQueryThenFetchAsyncAction, fetchPhase); + searchDfsQueryThenFetchAsyncAction.onPhaseFailure( + fetchPhase, + "Something went wrong", + null + ); /* finalizing the fetch phase since we do adhoc phase lifecycle calls */ assertThat(testListener.getPhaseMetric(searchDfsQueryThenFetchAsyncAction.getSearchPhaseName()), greaterThanOrEqualTo(delay)); assertEquals(1, testListener.getPhaseTotal(searchDfsQueryThenFetchAsyncAction.getSearchPhaseName())); @@ -706,7 +740,7 @@ private SearchDfsQueryThenFetchAsyncAction createSearchDfsQueryThenFetchAsyncAct null, null, null, - null, + controller, executor, resultConsumer, searchRequest, @@ -719,7 +753,8 @@ private SearchDfsQueryThenFetchAsyncAction createSearchDfsQueryThenFetchAsyncAct new SearchRequestContext( new SearchRequestOperationsListener.CompositeListener(searchRequestOperationsListeners, logger), searchRequest - ) + ), + NoopTracer.INSTANCE ); } @@ -772,7 +807,8 @@ private SearchQueryThenFetchAsyncAction createSearchQueryThenFetchAsyncAction( new SearchRequestContext( new SearchRequestOperationsListener.CompositeListener(searchRequestOperationsListeners, logger), searchRequest - ) + ), + NoopTracer.INSTANCE ) { @Override ShardSearchFailure[] buildShardFailures() { diff --git a/server/src/test/java/org/opensearch/action/search/CanMatchPreFilterSearchPhaseTests.java b/server/src/test/java/org/opensearch/action/search/CanMatchPreFilterSearchPhaseTests.java index 56dcf66d5607d..1881c705fe6b3 100644 --- a/server/src/test/java/org/opensearch/action/search/CanMatchPreFilterSearchPhaseTests.java +++ b/server/src/test/java/org/opensearch/action/search/CanMatchPreFilterSearchPhaseTests.java @@ -32,6 +32,7 @@ package org.opensearch.action.search; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.BytesRef; import org.opensearch.Version; import org.opensearch.action.OriginalIndices; @@ -41,18 +42,25 @@ import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.NoopCircuitBreaker; import org.opensearch.core.index.shard.ShardId; import org.opensearch.search.SearchPhaseResult; import org.opensearch.search.SearchService; import org.opensearch.search.SearchShardTarget; import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.search.dfs.DfsSearchResult; import org.opensearch.search.internal.AliasFilter; import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.search.sort.MinAndMax; import org.opensearch.search.sort.SortBuilders; import org.opensearch.search.sort.SortOrder; +import org.opensearch.telemetry.tracing.noop.NoopTracer; +import org.opensearch.test.InternalAggregationTestCase; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.transport.Transport; +import org.junit.After; +import org.junit.Before; import java.io.IOException; import java.util.ArrayList; @@ -64,14 +72,33 @@ import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiFunction; import java.util.stream.IntStream; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.Matchers.equalTo; public class CanMatchPreFilterSearchPhaseTests extends OpenSearchTestCase { + private SearchRequestOperationsListenerAssertingListener assertingListener; + + @Before + public void setUp() throws Exception { + super.setUp(); + + assertingListener = new SearchRequestOperationsListenerAssertingListener(); + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + + assertingListener.assertFinished(); + } public void testFilterShards() throws InterruptedException { @@ -116,6 +143,10 @@ public void sendCanMatch( final SearchRequest searchRequest = new SearchRequest(); searchRequest.allowPartialSearchResults(true); + final SearchRequestOperationsListener searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener( + List.of(assertingListener), + LogManager.getLogger() + ); CanMatchPreFilterSearchPhase canMatchPhase = new CanMatchPreFilterSearchPhase( logger, searchTransportService, @@ -134,14 +165,13 @@ public void sendCanMatch( @Override public void run() throws IOException { result.set(iter); + searchRequestOperationsListener.onPhaseEnd(new MockSearchPhaseContext(1, searchRequest, this), null); latch.countDown(); } }, SearchResponse.Clusters.EMPTY, - new SearchRequestContext( - new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), - searchRequest - ) + new SearchRequestContext(searchRequestOperationsListener, searchRequest), + NoopTracer.INSTANCE ); canMatchPhase.start(); @@ -211,6 +241,10 @@ public void sendCanMatch( final SearchRequest searchRequest = new SearchRequest(); searchRequest.allowPartialSearchResults(true); + final SearchRequestOperationsListener searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener( + List.of(assertingListener), + LogManager.getLogger() + ); CanMatchPreFilterSearchPhase canMatchPhase = new CanMatchPreFilterSearchPhase( logger, searchTransportService, @@ -229,14 +263,13 @@ public void sendCanMatch( @Override public void run() throws IOException { result.set(iter); + searchRequestOperationsListener.onPhaseEnd(new MockSearchPhaseContext(1, searchRequest, this), null); latch.countDown(); } }, SearchResponse.Clusters.EMPTY, - new SearchRequestContext( - new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), - searchRequest - ) + new SearchRequestContext(searchRequestOperationsListener, searchRequest), + NoopTracer.INSTANCE ); canMatchPhase.start(); @@ -296,6 +329,10 @@ public void sendCanMatch( (e) -> { throw new AssertionError("unexpected", e); } ); Map<String, AliasFilter> aliasFilters = Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)); + final SearchRequestOperationsListener searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener( + List.of(assertingListener), + LogManager.getLogger() + ); final CanMatchPreFilterSearchPhase canMatchPhase = new CanMatchPreFilterSearchPhase( logger, searchTransportService, @@ -310,62 +347,62 @@ public void sendCanMatch( timeProvider, ClusterState.EMPTY_STATE, null, - (iter) -> new AbstractSearchAsyncAction<SearchPhaseResult>("test", logger, transportService, (cluster, node) -> { - assert cluster == null : "cluster was not null: " + cluster; - return lookup.get(node); - }, - aliasFilters, - Collections.emptyMap(), - Collections.emptyMap(), - executor, - searchRequest, - responseListener, - iter, - new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0), - ClusterState.EMPTY_STATE, - null, - new ArraySearchPhaseResults<>(iter.size()), - randomIntBetween(1, 32), - SearchResponse.Clusters.EMPTY, - new SearchRequestContext( - new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), - searchRequest - ) - ) { - - @Override - protected SearchPhase getNextPhase(SearchPhaseResults<SearchPhaseResult> results, SearchPhaseContext context) { - return new SearchPhase("test") { + (iter) -> { + return new WrappingSearchAsyncActionPhase( + new AbstractSearchAsyncAction<SearchPhaseResult>("test", logger, transportService, (cluster, node) -> { + assert cluster == null : "cluster was not null: " + cluster; + return lookup.get(node); + }, + aliasFilters, + Collections.emptyMap(), + Collections.emptyMap(), + executor, + searchRequest, + responseListener, + iter, + new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0), + ClusterState.EMPTY_STATE, + null, + new ArraySearchPhaseResults<>(iter.size()), + randomIntBetween(1, 32), + SearchResponse.Clusters.EMPTY, + new SearchRequestContext(searchRequestOperationsListener, searchRequest), + NoopTracer.INSTANCE + ) { @Override - public void run() { - latch.countDown(); + protected SearchPhase getNextPhase(SearchPhaseResults<SearchPhaseResult> results, SearchPhaseContext context) { + return new WrappingSearchAsyncActionPhase(this) { + @Override + public void run() { + latch.countDown(); + } + }; } - }; - } - @Override - protected void executePhaseOnShard( - final SearchShardIterator shardIt, - final SearchShardTarget shard, - final SearchActionListener<SearchPhaseResult> listener - ) { - if (randomBoolean()) { - listener.onResponse(new SearchPhaseResult() { - }); - } else { - listener.onFailure(new Exception("failure")); + @Override + protected void executePhaseOnShard( + final SearchShardIterator shardIt, + final SearchShardTarget shard, + final SearchActionListener<SearchPhaseResult> listener + ) { + if (randomBoolean()) { + listener.onResponse(new SearchPhaseResult() { + }); + } else { + listener.onFailure(new Exception("failure")); + } + } } - } + ); }, SearchResponse.Clusters.EMPTY, - new SearchRequestContext( - new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), - searchRequest - ) + new SearchRequestContext(searchRequestOperationsListener, searchRequest), + NoopTracer.INSTANCE ); canMatchPhase.start(); latch.await(); + executor.shutdown(); } @@ -424,6 +461,10 @@ public void sendCanMatch( searchRequest.source(new SearchSourceBuilder().sort(SortBuilders.fieldSort("timestamp").order(order))); searchRequest.allowPartialSearchResults(true); + final SearchRequestOperationsListener searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener( + List.of(assertingListener), + LogManager.getLogger() + ); CanMatchPreFilterSearchPhase canMatchPhase = new CanMatchPreFilterSearchPhase( logger, searchTransportService, @@ -442,18 +483,18 @@ public void sendCanMatch( @Override public void run() { result.set(iter); + searchRequestOperationsListener.onPhaseEnd(new MockSearchPhaseContext(1, searchRequest, this), null); latch.countDown(); } }, SearchResponse.Clusters.EMPTY, - new SearchRequestContext( - new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), - searchRequest - ) + new SearchRequestContext(searchRequestOperationsListener, searchRequest), + NoopTracer.INSTANCE ); canMatchPhase.start(); latch.await(); + ShardId[] expected = IntStream.range(0, shardIds.size()) .boxed() .sorted(Comparator.comparing(minAndMaxes::get, MinAndMax.getComparator(order)).thenComparing(shardIds::get)) @@ -527,6 +568,10 @@ public void sendCanMatch( searchRequest.source(new SearchSourceBuilder().sort(SortBuilders.fieldSort("timestamp").order(order))); searchRequest.allowPartialSearchResults(true); + final SearchRequestOperationsListener searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener( + List.of(assertingListener), + LogManager.getLogger() + ); CanMatchPreFilterSearchPhase canMatchPhase = new CanMatchPreFilterSearchPhase( logger, searchTransportService, @@ -545,18 +590,18 @@ public void sendCanMatch( @Override public void run() { result.set(iter); + searchRequestOperationsListener.onPhaseEnd(new MockSearchPhaseContext(1, searchRequest, this), null); latch.countDown(); } }, SearchResponse.Clusters.EMPTY, - new SearchRequestContext( - new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), - searchRequest - ) + new SearchRequestContext(searchRequestOperationsListener, searchRequest), + NoopTracer.INSTANCE ); canMatchPhase.start(); latch.await(); + int shardId = 0; for (SearchShardIterator i : result.get()) { assertThat(i.shardId().id(), equalTo(shardId++)); @@ -565,4 +610,192 @@ public void run() { assertThat(result.get().size(), equalTo(numShards)); } } + + public void testAsyncAction() throws InterruptedException { + + final TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider( + 0, + System.nanoTime(), + System::nanoTime + ); + + Map<String, Transport.Connection> lookup = new ConcurrentHashMap<>(); + DiscoveryNode primaryNode = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNode replicaNode = new DiscoveryNode("node_2", buildNewFakeTransportAddress(), Version.CURRENT); + lookup.put("node_1", new SearchAsyncActionTests.MockConnection(primaryNode)); + lookup.put("node_2", new SearchAsyncActionTests.MockConnection(replicaNode)); + final boolean shard1 = randomBoolean(); + final boolean shard2 = randomBoolean(); + + SearchTransportService searchTransportService = new SearchTransportService(null, null) { + @Override + public void sendCanMatch( + Transport.Connection connection, + ShardSearchRequest request, + SearchTask task, + ActionListener<SearchService.CanMatchResponse> listener + ) { + new Thread( + () -> listener.onResponse(new SearchService.CanMatchResponse(request.shardId().id() == 0 ? shard1 : shard2, null)) + ).start(); + } + }; + + AtomicReference<GroupShardsIterator<SearchShardIterator>> result = new AtomicReference<>(); + CountDownLatch latch = new CountDownLatch(1); + GroupShardsIterator<SearchShardIterator> shardsIter = SearchAsyncActionTests.getShardsIter( + "idx", + new OriginalIndices(new String[] { "idx" }, SearchRequest.DEFAULT_INDICES_OPTIONS), + 2, + randomBoolean(), + primaryNode, + replicaNode + ); + final SearchRequest searchRequest = new SearchRequest(); + searchRequest.allowPartialSearchResults(true); + + SearchTask task = new SearchTask(0, "n/a", "n/a", () -> "test", null, Collections.emptyMap()); + ExecutorService executor = OpenSearchExecutors.newDirectExecutorService(); + SearchRequestContext searchRequestContext = new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(assertingListener), LogManager.getLogger()), + searchRequest + ); + + SearchPhaseController controller = new SearchPhaseController( + writableRegistry(), + r -> InternalAggregationTestCase.emptyReduceContextBuilder() + ); + + QueryPhaseResultConsumer resultConsumer = new QueryPhaseResultConsumer( + searchRequest, + executor, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + controller, + task.getProgressListener(), + writableRegistry(), + shardsIter.size(), + exc -> {} + ); + + CanMatchPreFilterSearchPhase canMatchPhase = new CanMatchPreFilterSearchPhase( + logger, + searchTransportService, + (clusterAlias, node) -> lookup.get(node), + Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)), + Collections.emptyMap(), + Collections.emptyMap(), + executor, + searchRequest, + null, + shardsIter, + timeProvider, + ClusterState.EMPTY_STATE, + null, + (iter) -> { + AbstractSearchAsyncAction<? extends SearchPhaseResult> action = new SearchDfsQueryAsyncAction( + logger, + searchTransportService, + (clusterAlias, node) -> lookup.get(node), + Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)), + Collections.emptyMap(), + Collections.emptyMap(), + controller, + executor, + resultConsumer, + searchRequest, + null, + shardsIter, + timeProvider, + ClusterState.EMPTY_STATE, + task, + SearchResponse.Clusters.EMPTY, + searchRequestContext + ); + return new WrappingSearchAsyncActionPhase(action) { + @Override + public void run() { + super.run(); + latch.countDown(); + } + }; + }, + SearchResponse.Clusters.EMPTY, + searchRequestContext, + NoopTracer.INSTANCE + ); + + canMatchPhase.start(); + latch.await(); + + assertThat(result.get(), is(nullValue())); + } + + private static final class SearchDfsQueryAsyncAction extends AbstractSearchAsyncAction<DfsSearchResult> { + private final SearchRequestOperationsListener listener; + + SearchDfsQueryAsyncAction( + final Logger logger, + final SearchTransportService searchTransportService, + final BiFunction<String, String, Transport.Connection> nodeIdToConnection, + final Map<String, AliasFilter> aliasFilter, + final Map<String, Float> concreteIndexBoosts, + final Map<String, Set<String>> indexRoutings, + final SearchPhaseController searchPhaseController, + final Executor executor, + final QueryPhaseResultConsumer queryPhaseResultConsumer, + final SearchRequest request, + final ActionListener<SearchResponse> listener, + final GroupShardsIterator<SearchShardIterator> shardsIts, + final TransportSearchAction.SearchTimeProvider timeProvider, + final ClusterState clusterState, + final SearchTask task, + SearchResponse.Clusters clusters, + SearchRequestContext searchRequestContext + ) { + super( + SearchPhaseName.DFS_PRE_QUERY.getName(), + logger, + searchTransportService, + nodeIdToConnection, + aliasFilter, + concreteIndexBoosts, + indexRoutings, + executor, + request, + listener, + shardsIts, + timeProvider, + clusterState, + task, + new ArraySearchPhaseResults<>(shardsIts.size()), + request.getMaxConcurrentShardRequests(), + clusters, + searchRequestContext, + NoopTracer.INSTANCE + ); + this.listener = searchRequestContext.getSearchRequestOperationsListener(); + } + + @Override + protected void executePhaseOnShard( + final SearchShardIterator shardIt, + final SearchShardTarget shard, + final SearchActionListener<DfsSearchResult> listener + ) { + final DfsSearchResult response = new DfsSearchResult(shardIt.getSearchContextId(), shard, null); + response.setShardIndex(shard.getShardId().getId()); + listener.innerOnResponse(response); + } + + @Override + protected SearchPhase getNextPhase(SearchPhaseResults<DfsSearchResult> results, SearchPhaseContext context) { + return new SearchPhase("last") { + @Override + public void run() throws IOException { + listener.onPhaseEnd(context, null); + } + }; + } + } + } diff --git a/server/src/test/java/org/opensearch/action/search/MockSearchPhaseContext.java b/server/src/test/java/org/opensearch/action/search/MockSearchPhaseContext.java index 04a00a09dcbc4..cc10da8fc1f12 100644 --- a/server/src/test/java/org/opensearch/action/search/MockSearchPhaseContext.java +++ b/server/src/test/java/org/opensearch/action/search/MockSearchPhaseContext.java @@ -67,17 +67,27 @@ public final class MockSearchPhaseContext implements SearchPhaseContext { final Set<ShardSearchContextId> releasedSearchContexts = new HashSet<>(); final SearchRequest searchRequest; final AtomicReference<SearchResponse> searchResponse = new AtomicReference<>(); + final SearchPhase currentPhase; public MockSearchPhaseContext(int numShards) { this(numShards, new SearchRequest()); } public MockSearchPhaseContext(int numShards, SearchRequest searchRequest) { + this(numShards, searchRequest, null); + } + + public MockSearchPhaseContext(int numShards, SearchRequest searchRequest, SearchPhase currentPhase) { this.numShards = numShards; this.searchRequest = searchRequest; + this.currentPhase = currentPhase; numSuccess = new AtomicInteger(numShards); } + public MockSearchPhaseContext(int numShards, SearchPhase currentPhase) { + this(numShards, new SearchRequest(), currentPhase); + } + public void assertNoFailure() { if (phaseFailure.get() != null) { throw new AssertionError(phaseFailure.get()); @@ -106,7 +116,7 @@ public SearchRequest getRequest() { @Override public SearchPhase getCurrentPhase() { - return null; + return currentPhase; } @Override diff --git a/server/src/test/java/org/opensearch/action/search/MultiSearchRequestTests.java b/server/src/test/java/org/opensearch/action/search/MultiSearchRequestTests.java index 908c122edc455..2577dfdc20698 100644 --- a/server/src/test/java/org/opensearch/action/search/MultiSearchRequestTests.java +++ b/server/src/test/java/org/opensearch/action/search/MultiSearchRequestTests.java @@ -73,6 +73,7 @@ import static java.util.Collections.singletonList; import static org.opensearch.search.RandomSearchRequestGenerator.randomSearchRequest; import static org.opensearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; @@ -560,6 +561,13 @@ public void testEqualsAndHashcode() { checkEqualsAndHashCode(createMultiSearchRequest(), MultiSearchRequestTests::copyRequest, MultiSearchRequestTests::mutate); } + public void testToString() { + MultiSearchRequest req = createMultiSearchRequest(); + for (SearchRequest subReq : req.requests()) { + assertThat(req.toString(), containsString(subReq.toString())); + } + } + private static MultiSearchRequest mutate(MultiSearchRequest searchRequest) throws IOException { MultiSearchRequest mutation = copyRequest(searchRequest); List<CheckedRunnable<IOException>> mutators = new ArrayList<>(); diff --git a/server/src/test/java/org/opensearch/action/search/SearchAsyncActionTests.java b/server/src/test/java/org/opensearch/action/search/SearchAsyncActionTests.java index af7adc4e58fb8..35e90ff662b19 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchAsyncActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchAsyncActionTests.java @@ -51,11 +51,14 @@ import org.opensearch.search.internal.AliasFilter; import org.opensearch.search.internal.InternalSearchResponse; import org.opensearch.search.internal.ShardSearchContextId; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestOptions; +import org.junit.After; +import org.junit.Before; import java.io.IOException; import java.util.ArrayList; @@ -79,6 +82,23 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; public class SearchAsyncActionTests extends OpenSearchTestCase { + private SearchRequestOperationsListenerAssertingListener assertingListener; + + @Before + @Override + public void setUp() throws Exception { + super.setUp(); + + assertingListener = new SearchRequestOperationsListenerAssertingListener(); + } + + @After + @Override + public void tearDown() throws Exception { + super.tearDown(); + + assertingListener.assertFinished(); + } public void testSkipSearchShards() throws InterruptedException { SearchRequest request = new SearchRequest(); @@ -117,6 +137,10 @@ public void testSkipSearchShards() throws InterruptedException { lookup.put(replicaNode.getId(), new MockConnection(replicaNode)); Map<String, AliasFilter> aliasFilters = Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)); AtomicInteger numRequests = new AtomicInteger(0); + final SearchRequestOperationsListener searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener( + List.of(assertingListener), + LogManager.getLogger() + ); AbstractSearchAsyncAction<TestSearchPhaseResult> asyncAction = new AbstractSearchAsyncAction<TestSearchPhaseResult>( "test", logger, @@ -138,7 +162,8 @@ public void testSkipSearchShards() throws InterruptedException { new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), SearchResponse.Clusters.EMPTY, - new SearchRequestContext(new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), request) + new SearchRequestContext(searchRequestOperationsListener, request), + NoopTracer.INSTANCE ) { @Override @@ -169,6 +194,7 @@ protected SearchPhase getNextPhase(SearchPhaseResults<TestSearchPhaseResult> res @Override public void run() { assertTrue(searchPhaseDidRun.compareAndSet(false, true)); + searchRequestOperationsListener.onPhaseEnd(new MockSearchPhaseContext(1, request, this), null); } }; } @@ -236,6 +262,10 @@ public void testLimitConcurrentShardRequests() throws InterruptedException { Map<String, AliasFilter> aliasFilters = Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)); CountDownLatch awaitInitialRequests = new CountDownLatch(1); AtomicInteger numRequests = new AtomicInteger(0); + final SearchRequestOperationsListener searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener( + List.of(assertingListener), + LogManager.getLogger() + ); AbstractSearchAsyncAction<TestSearchPhaseResult> asyncAction = new AbstractSearchAsyncAction<TestSearchPhaseResult>( "test", logger, @@ -257,7 +287,8 @@ public void testLimitConcurrentShardRequests() throws InterruptedException { new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), SearchResponse.Clusters.EMPTY, - new SearchRequestContext(new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), request) + new SearchRequestContext(searchRequestOperationsListener, request), + NoopTracer.INSTANCE ) { @Override @@ -295,6 +326,7 @@ protected SearchPhase getNextPhase(SearchPhaseResults<TestSearchPhaseResult> res return new SearchPhase("test") { @Override public void run() { + searchRequestOperationsListener.onPhaseEnd(new MockSearchPhaseContext(1, request, this), null); assertTrue(searchPhaseDidRun.compareAndSet(false, true)); } }; @@ -375,7 +407,11 @@ public void sendFreeContext(Transport.Connection connection, ShardSearchContextI new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), SearchResponse.Clusters.EMPTY, - new SearchRequestContext(new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), request) + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(assertingListener), LogManager.getLogger()), + request + ), + NoopTracer.INSTANCE ) { TestSearchResponse response = new TestSearchResponse(); @@ -411,6 +447,7 @@ public void run() { sendReleaseSearchContext(result.getContextId(), new MockConnection(result.node), OriginalIndices.NONE); } responseListener.onResponse(response); + assertingListener.onPhaseEnd(new MockSearchPhaseContext(1, request, this), null); } }; } @@ -498,7 +535,11 @@ public void sendFreeContext(Transport.Connection connection, ShardSearchContextI new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), SearchResponse.Clusters.EMPTY, - new SearchRequestContext(new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), request) + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(assertingListener), LogManager.getLogger()), + request + ), + NoopTracer.INSTANCE ) { TestSearchResponse response = new TestSearchResponse(); @@ -591,6 +632,10 @@ public void testAllowPartialResults() throws InterruptedException { Map<String, AliasFilter> aliasFilters = Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)); AtomicInteger numRequests = new AtomicInteger(0); AtomicInteger numFailReplicas = new AtomicInteger(0); + final SearchRequestOperationsListener searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener( + List.of(assertingListener), + LogManager.getLogger() + ); AbstractSearchAsyncAction<TestSearchPhaseResult> asyncAction = new AbstractSearchAsyncAction<TestSearchPhaseResult>( "test", logger, @@ -612,7 +657,8 @@ public void testAllowPartialResults() throws InterruptedException { new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), SearchResponse.Clusters.EMPTY, - new SearchRequestContext(new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), request) + new SearchRequestContext(searchRequestOperationsListener, request), + NoopTracer.INSTANCE ) { @Override protected void executePhaseOnShard( @@ -645,6 +691,7 @@ protected SearchPhase getNextPhase(SearchPhaseResults<TestSearchPhaseResult> res @Override public void run() { assertTrue(searchPhaseDidRun.compareAndSet(false, true)); + searchRequestOperationsListener.onPhaseEnd(new MockSearchPhaseContext(1, request, this), null); } }; } diff --git a/server/src/test/java/org/opensearch/action/search/SearchQueryCategorizerTests.java b/server/src/test/java/org/opensearch/action/search/SearchQueryCategorizerTests.java index 17fa124890158..4878a463729f9 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchQueryCategorizerTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchQueryCategorizerTests.java @@ -97,8 +97,8 @@ public void testBoolQuery() { searchQueryCategorizer.categorize(sourceBuilder); - verify(searchQueryCategorizer.searchQueryCounters.boolCounter).add(eq(1.0d), any(Tags.class)); - verify(searchQueryCategorizer.searchQueryCounters.matchCounter).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("bool")).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("match")).add(eq(1.0d), any(Tags.class)); } public void testFunctionScoreQuery() { @@ -108,7 +108,7 @@ public void testFunctionScoreQuery() { searchQueryCategorizer.categorize(sourceBuilder); - verify(searchQueryCategorizer.searchQueryCounters.functionScoreCounter).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("function_score")).add(eq(1.0d), any(Tags.class)); } public void testMatchQuery() { @@ -118,7 +118,7 @@ public void testMatchQuery() { searchQueryCategorizer.categorize(sourceBuilder); - verify(searchQueryCategorizer.searchQueryCounters.matchCounter).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("match")).add(eq(1.0d), any(Tags.class)); } public void testMatchPhraseQuery() { @@ -128,7 +128,7 @@ public void testMatchPhraseQuery() { searchQueryCategorizer.categorize(sourceBuilder); - verify(searchQueryCategorizer.searchQueryCounters.matchPhrasePrefixCounter).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("match_phrase")).add(eq(1.0d), any(Tags.class)); } public void testMultiMatchQuery() { @@ -138,7 +138,7 @@ public void testMultiMatchQuery() { searchQueryCategorizer.categorize(sourceBuilder); - verify(searchQueryCategorizer.searchQueryCounters.multiMatchCounter).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("multi_match")).add(eq(1.0d), any(Tags.class)); } public void testOtherQuery() { @@ -152,8 +152,9 @@ public void testOtherQuery() { searchQueryCategorizer.categorize(sourceBuilder); - verify(searchQueryCategorizer.searchQueryCounters.otherQueryCounter, times(1)).add(eq(1.0d), any(Tags.class)); - verify(searchQueryCategorizer.searchQueryCounters.termCounter).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("boosting")).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("match_none")).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("term")).add(eq(1.0d), any(Tags.class)); } public void testQueryStringQuery() { @@ -164,7 +165,7 @@ public void testQueryStringQuery() { searchQueryCategorizer.categorize(sourceBuilder); - verify(searchQueryCategorizer.searchQueryCounters.queryStringCounter).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("query_string")).add(eq(1.0d), any(Tags.class)); } public void testRangeQuery() { @@ -176,7 +177,7 @@ public void testRangeQuery() { searchQueryCategorizer.categorize(sourceBuilder); - verify(searchQueryCategorizer.searchQueryCounters.rangeCounter).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("range")).add(eq(1.0d), any(Tags.class)); } public void testRegexQuery() { @@ -185,7 +186,7 @@ public void testRegexQuery() { searchQueryCategorizer.categorize(sourceBuilder); - verify(searchQueryCategorizer.searchQueryCounters.regexpCounter).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("regexp")).add(eq(1.0d), any(Tags.class)); } public void testSortQuery() { @@ -196,7 +197,7 @@ public void testSortQuery() { searchQueryCategorizer.categorize(sourceBuilder); - verify(searchQueryCategorizer.searchQueryCounters.matchCounter).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("match")).add(eq(1.0d), any(Tags.class)); verify(searchQueryCategorizer.searchQueryCounters.sortCounter, times(2)).add(eq(1.0d), any(Tags.class)); } @@ -207,7 +208,7 @@ public void testTermQuery() { searchQueryCategorizer.categorize(sourceBuilder); - verify(searchQueryCategorizer.searchQueryCounters.termCounter).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("term")).add(eq(1.0d), any(Tags.class)); } public void testWildcardQuery() { @@ -217,7 +218,7 @@ public void testWildcardQuery() { searchQueryCategorizer.categorize(sourceBuilder); - verify(searchQueryCategorizer.searchQueryCounters.wildcardCounter).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("wildcard")).add(eq(1.0d), any(Tags.class)); } public void testComplexQuery() { @@ -235,10 +236,10 @@ public void testComplexQuery() { searchQueryCategorizer.categorize(sourceBuilder); - verify(searchQueryCategorizer.searchQueryCounters.termCounter).add(eq(1.0d), any(Tags.class)); - verify(searchQueryCategorizer.searchQueryCounters.matchCounter).add(eq(1.0d), any(Tags.class)); - verify(searchQueryCategorizer.searchQueryCounters.regexpCounter).add(eq(1.0d), any(Tags.class)); - verify(searchQueryCategorizer.searchQueryCounters.boolCounter).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("term")).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("match")).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("regexp")).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("bool")).add(eq(1.0d), any(Tags.class)); verify(searchQueryCategorizer.searchQueryCounters.aggCounter).add(eq(1.0d), any(Tags.class)); } } diff --git a/server/src/test/java/org/opensearch/action/search/SearchQueryThenFetchAsyncActionTests.java b/server/src/test/java/org/opensearch/action/search/SearchQueryThenFetchAsyncActionTests.java index faf6f86c69c27..aefbbe80d5fa1 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchQueryThenFetchAsyncActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchQueryThenFetchAsyncActionTests.java @@ -59,9 +59,12 @@ import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.search.query.QuerySearchResult; import org.opensearch.search.sort.SortBuilders; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.InternalAggregationTestCase; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.transport.Transport; +import org.junit.After; +import org.junit.Before; import java.util.Collections; import java.util.List; @@ -77,6 +80,24 @@ import static org.hamcrest.Matchers.instanceOf; public class SearchQueryThenFetchAsyncActionTests extends OpenSearchTestCase { + private SearchRequestOperationsListenerAssertingListener assertingListener; + + @Before + @Override + public void setUp() throws Exception { + super.setUp(); + + assertingListener = new SearchRequestOperationsListenerAssertingListener(); + } + + @After + @Override + public void tearDown() throws Exception { + super.tearDown(); + + assertingListener.assertFinished(); + } + public void testBottomFieldSort() throws Exception { testCase(false, false); } @@ -218,15 +239,17 @@ public void sendExecuteQuery( task, SearchResponse.Clusters.EMPTY, new SearchRequestContext( - new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequestOperationsListener.CompositeListener(List.of(assertingListener), LogManager.getLogger()), searchRequest - ) + ), + NoopTracer.INSTANCE ) { @Override protected SearchPhase getNextPhase(SearchPhaseResults<SearchPhaseResult> results, SearchPhaseContext context) { return new SearchPhase("test") { @Override public void run() { + assertingListener.onPhaseEnd(new MockSearchPhaseContext(1, searchRequest, this), null); latch.countDown(); } }; diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsCompositeListenerFactoryTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsCompositeListenerFactoryTests.java index 78c5ba4412c68..845543fbd9f57 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsCompositeListenerFactoryTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsCompositeListenerFactoryTests.java @@ -119,13 +119,13 @@ public void testStandardListenerAndPerRequestListenerDisabled() { public SearchRequestOperationsListener createTestSearchRequestOperationsListener() { return new SearchRequestOperationsListener() { @Override - void onPhaseStart(SearchPhaseContext context) {} + protected void onPhaseStart(SearchPhaseContext context) {} @Override - void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} + protected void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} @Override - void onPhaseFailure(SearchPhaseContext context) {} + protected void onPhaseFailure(SearchPhaseContext context, Throwable cause) {} }; } } diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerAssertingListener.java b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerAssertingListener.java new file mode 100644 index 0000000000000..327371ebcaf0b --- /dev/null +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerAssertingListener.java @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.MatcherAssert.assertThat; + +class SearchRequestOperationsListenerAssertingListener extends SearchRequestOperationsListener { + private volatile SearchPhase phase; + + @Override + protected void onPhaseStart(SearchPhaseContext context) { + assertThat(phase, is(nullValue())); + phase = context.getCurrentPhase(); + } + + @Override + protected void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + assertThat(phase, is(context.getCurrentPhase())); + phase = null; + } + + @Override + protected void onPhaseFailure(SearchPhaseContext context, Throwable cause) { + assertThat(phase, is(context.getCurrentPhase())); + phase = null; + } + + public void assertFinished() { + assertThat(phase, is(nullValue())); + } +} diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerTests.java index a53c35a8401b3..990ed95f1aebc 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerTests.java @@ -40,7 +40,7 @@ public void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRe } @Override - public void onPhaseFailure(SearchPhaseContext context) { + public void onPhaseFailure(SearchPhaseContext context, Throwable cause) { searchPhaseMap.get(context.getCurrentPhase().getSearchPhaseName()).current.dec(); } }; diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestStatsTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestStatsTests.java index 377ccebbfd418..fb9b26e3f3ad1 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchRequestStatsTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestStatsTests.java @@ -36,7 +36,7 @@ public void testSearchRequestPhaseFailure() { when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); testRequestStats.onPhaseStart(ctx); assertEquals(1, testRequestStats.getPhaseCurrent(searchPhaseName)); - testRequestStats.onPhaseFailure(ctx); + testRequestStats.onPhaseFailure(ctx, new Throwable()); assertEquals(0, testRequestStats.getPhaseCurrent(searchPhaseName)); } } @@ -156,7 +156,7 @@ public void testSearchRequestStatsOnPhaseFailureConcurrently() throws Interrupte threads[i] = new Thread(() -> { phaser.arriveAndAwaitAdvance(); testRequestStats.onPhaseStart(ctx); - testRequestStats.onPhaseFailure(ctx); + testRequestStats.onPhaseFailure(ctx, new Throwable()); countDownLatch.countDown(); }); threads[i].start(); diff --git a/server/src/test/java/org/opensearch/action/termvectors/AbstractTermVectorsTestCase.java b/server/src/test/java/org/opensearch/action/termvectors/AbstractTermVectorsTestCase.java index 4b0bde0984ad1..e84b5213be39e 100644 --- a/server/src/test/java/org/opensearch/action/termvectors/AbstractTermVectorsTestCase.java +++ b/server/src/test/java/org/opensearch/action/termvectors/AbstractTermVectorsTestCase.java @@ -64,9 +64,8 @@ import org.apache.lucene.store.Directory; import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; @@ -82,10 +81,10 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; -public abstract class AbstractTermVectorsTestCase extends ParameterizedOpenSearchIntegTestCase { +public abstract class AbstractTermVectorsTestCase extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - public AbstractTermVectorsTestCase(Settings dynamicSettings) { - super(dynamicSettings); + public AbstractTermVectorsTestCase(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -96,11 +95,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - protected static class TestFieldSetting { public final String name; public final boolean storedOffset; diff --git a/server/src/test/java/org/opensearch/bootstrap/OpenSearchPolicyTests.java b/server/src/test/java/org/opensearch/bootstrap/OpenSearchPolicyTests.java index b1e27ea9c66e3..2b4d2a755f543 100644 --- a/server/src/test/java/org/opensearch/bootstrap/OpenSearchPolicyTests.java +++ b/server/src/test/java/org/opensearch/bootstrap/OpenSearchPolicyTests.java @@ -49,6 +49,7 @@ public class OpenSearchPolicyTests extends OpenSearchTestCase { /** * test restricting privileges to no permissions actually works */ + @SuppressWarnings("removal") public void testRestrictPrivileges() { assumeTrue("test requires security manager", System.getSecurityManager() != null); try { diff --git a/server/src/test/java/org/opensearch/bootstrap/SecurityTests.java b/server/src/test/java/org/opensearch/bootstrap/SecurityTests.java index ea4ef96ec0f77..76353aea03257 100644 --- a/server/src/test/java/org/opensearch/bootstrap/SecurityTests.java +++ b/server/src/test/java/org/opensearch/bootstrap/SecurityTests.java @@ -35,8 +35,12 @@ import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; +import java.net.URL; import java.nio.file.Files; import java.nio.file.Path; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.Map; public class SecurityTests extends OpenSearchTestCase { @@ -72,6 +76,7 @@ public void testEnsureRegularFile() throws IOException { } /** can't execute processes */ + @SuppressWarnings("removal") public void testProcessExecution() throws Exception { assumeTrue("test requires security manager", System.getSecurityManager() != null); try { @@ -79,4 +84,23 @@ public void testProcessExecution() throws Exception { fail("didn't get expected exception"); } catch (SecurityException expected) {} } + + public void testReadPolicyWithCodebases() throws IOException { + final Map<String, URL> codebases = Map.of( + "test-netty-tcnative-boringssl-static-2.0.61.Final-linux-x86_64.jar", + new URL("file://test-netty-tcnative-boringssl-static-2.0.61.Final-linux-x86_64.jar"), + "test-kafka-server-common-3.6.1.jar", + new URL("file://test-kafka-server-common-3.6.1.jar"), + "test-kafka-server-common-3.6.1-test.jar", + new URL("file://test-kafka-server-common-3.6.1-test.jar"), + "test-lucene-core-9.11.0-snapshot-8a555eb.jar", + new URL("file://test-lucene-core-9.11.0-snapshot-8a555eb.jar"), + "test-zstd-jni-1.5.5-5.jar", + new URL("file://test-zstd-jni-1.5.5-5.jar") + ); + + AccessController.doPrivileged( + (PrivilegedAction<?>) () -> Security.readPolicy(SecurityTests.class.getResource("test-codebases.policy"), codebases) + ); + } } diff --git a/server/src/test/java/org/opensearch/cluster/ClusterModuleTests.java b/server/src/test/java/org/opensearch/cluster/ClusterModuleTests.java index 535444cd866b8..b30ebaf183084 100644 --- a/server/src/test/java/org/opensearch/cluster/ClusterModuleTests.java +++ b/server/src/test/java/org/opensearch/cluster/ClusterModuleTests.java @@ -51,6 +51,7 @@ import org.opensearch.cluster.routing.allocation.decider.NodeLoadAwareAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.NodeVersionAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.RebalanceOnlyWhenActiveAllocationDecider; +import org.opensearch.cluster.routing.allocation.decider.RemoteStoreMigrationAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.ResizeAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.RestoreInProgressAllocationDecider; @@ -67,6 +68,7 @@ import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsModule; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.gateway.GatewayAllocator; import org.opensearch.plugins.ClusterPlugin; @@ -242,6 +244,9 @@ public void testAllocationDeciderOrder() { NodeLoadAwareAllocationDecider.class, TargetPoolAllocationDecider.class ); + if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING)) { + expectedDeciders.add(RemoteStoreMigrationAllocationDecider.class); + } Collection<AllocationDecider> deciders = ClusterModule.createAllocationDeciders( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java index 5952cc1bcaac2..5eafe63e63fad 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java @@ -52,6 +52,7 @@ import org.opensearch.common.SetOnce; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.blobstore.BlobStoreRepository; @@ -67,11 +68,14 @@ import java.util.Map; import java.util.stream.Collectors; +import static org.opensearch.common.util.FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; import static org.opensearch.test.VersionUtils.allVersions; import static org.opensearch.test.VersionUtils.maxCompatibleVersion; import static org.opensearch.test.VersionUtils.randomCompatibleVersion; @@ -373,7 +377,8 @@ public void testJoinClusterWithNonRemoteStoreNodeJoining() { } public void testJoinClusterWithRemoteStoreNodeJoining() { - DiscoveryNode joiningNode = newDiscoveryNode(remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO)); + Map<String, String> map = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO); + DiscoveryNode joiningNode = newDiscoveryNode(map); ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) .nodes(DiscoveryNodes.builder().add(joiningNode).build()) .build(); @@ -393,6 +398,7 @@ public void testJoinClusterWithNonRemoteStoreNodeJoiningNonRemoteStoreCluster() } public void testPreventJoinClusterWithRemoteStoreNodeJoiningNonRemoteStoreCluster() { + final DiscoveryNode existingNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) .nodes(DiscoveryNodes.builder().add(existingNode).localNodeId(existingNode.getId()).build()) @@ -406,6 +412,62 @@ public void testPreventJoinClusterWithRemoteStoreNodeJoiningNonRemoteStoreCluste assertTrue(e.getMessage().equals("a remote store node [" + joiningNode + "] is trying to join a non remote " + "store cluster")); } + public void testRemoteStoreNodeJoiningNonRemoteStoreClusterMixedMode() { + final DiscoveryNode existingNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); + final Settings settings = Settings.builder() + .put(MIGRATION_DIRECTION_SETTING.getKey(), RemoteStoreNodeService.Direction.REMOTE_STORE) + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") + .build(); + final Settings nodeSettings = Settings.builder().put(REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build(); + FeatureFlags.initializeFeatureFlags(nodeSettings); + Metadata metadata = Metadata.builder().persistentSettings(settings).build(); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode).localNodeId(existingNode.getId()).build()) + .metadata(metadata) + .build(); + + DiscoveryNode joiningNode = newDiscoveryNode(remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO)); + JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()); + } + + public void testAllTypesNodeJoiningRemoteStoreClusterMixedMode() { + final DiscoveryNode docrepNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNode remoteNode = newDiscoveryNode(remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO)); + final Settings settings = Settings.builder() + .put(MIGRATION_DIRECTION_SETTING.getKey(), RemoteStoreNodeService.Direction.REMOTE_STORE) + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") + .build(); + final Settings nodeSettings = Settings.builder().put(REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build(); + FeatureFlags.initializeFeatureFlags(nodeSettings); + Metadata metadata = Metadata.builder().persistentSettings(settings).build(); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes( + DiscoveryNodes.builder() + .add(docrepNode) + .localNodeId(docrepNode.getId()) + .add(remoteNode) + .localNodeId(remoteNode.getId()) + .build() + ) + .metadata(metadata) + .build(); + + // compatible remote node should not be able to join a mixed mode having a remote node + DiscoveryNode goodRemoteNode = newDiscoveryNode(remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO)); + JoinTaskExecutor.ensureNodesCompatibility(goodRemoteNode, currentState.getNodes(), currentState.metadata()); + + // incompatible node should not be able to join a mixed mode + DiscoveryNode badRemoteNode = newDiscoveryNode(remoteStoreNodeAttributes(TRANSLOG_REPO, TRANSLOG_REPO)); + assertThrows( + IllegalStateException.class, + () -> JoinTaskExecutor.ensureNodesCompatibility(badRemoteNode, currentState.getNodes(), currentState.metadata()) + ); + + // DocRep node should be able to join a mixed mode + DiscoveryNode docrepNode2 = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); + JoinTaskExecutor.ensureNodesCompatibility(docrepNode2, currentState.getNodes(), currentState.metadata()); + } + public void testJoinClusterWithRemoteStoreNodeJoiningRemoteStoreCluster() { final DiscoveryNode existingNode = new DiscoveryNode( UUIDs.base64UUID(), @@ -521,12 +583,94 @@ public void testPreventJoinClusterWithRemoteStoreNodeWithPartialAttributesJoinin ); assertTrue( e.getMessage().equals("joining node [" + joiningNode + "] doesn't have the node attribute [" + nodeAttribute.getKey() + "]") + || e.getMessage() + .equals( + "a remote store node [" + + joiningNode + + "] is trying to join a remote store cluster with incompatible node attributes in comparison with existing node [" + + currentState.getNodes().getNodes().values().stream().findFirst().get() + + "]" + ) ); remoteStoreNodeAttributes.put(nodeAttribute.getKey(), nodeAttribute.getValue()); } } + public void testJoinClusterWithRemoteStateNodeJoiningRemoteStateCluster() { + Map<String, String> existingNodeAttributes = remoteStateNodeAttributes(CLUSTER_STATE_REPO); + final DiscoveryNode existingNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + existingNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode).localNodeId(existingNode.getId()).build()) + .build(); + DiscoveryNode joiningNode = newDiscoveryNode(remoteStateNodeAttributes(CLUSTER_STATE_REPO)); + JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()); + } + + public void testPreventJoinClusterWithRemoteStateNodeJoiningRemoteStoreCluster() { + Map<String, String> existingNodeAttributes = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO); + final DiscoveryNode existingNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + existingNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode).localNodeId(existingNode.getId()).build()) + .build(); + DiscoveryNode joiningNode = newDiscoveryNode(remoteStateNodeAttributes(CLUSTER_STATE_REPO)); + Exception e = assertThrows( + IllegalStateException.class, + () -> JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()) + ); + assertTrue( + e.getMessage() + .equals( + "a remote store node [" + + joiningNode + + "] is trying to join a remote store cluster with incompatible node attributes in comparison with existing node [" + + currentState.getNodes().getNodes().values().stream().findFirst().get() + + "]" + ) + ); + } + + public void testPreventJoinClusterWithRemoteStoreNodeJoiningRemoteStateCluster() { + Map<String, String> existingNodeAttributes = remoteStateNodeAttributes(CLUSTER_STATE_REPO); + final DiscoveryNode existingNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + existingNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode).localNodeId(existingNode.getId()).build()) + .build(); + DiscoveryNode joiningNode = newDiscoveryNode(remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO)); + Exception e = assertThrows( + IllegalStateException.class, + () -> JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()) + ); + assertTrue( + e.getMessage() + .equals( + "a remote store node [" + + joiningNode + + "] is trying to join a remote store cluster with incompatible node attributes in comparison with existing node [" + + currentState.getNodes().getNodes().values().stream().findFirst().get() + + "]" + ) + ); + } + public void testUpdatesClusterStateWithSingleNodeCluster() throws Exception { Map<String, String> remoteStoreNodeAttributes = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO); final AllocationService allocationService = mock(AllocationService.class); @@ -808,6 +952,23 @@ private Map<String, String> remoteStoreNodeAttributes(String segmentRepoName, St REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, translogRepoName ); + + return new HashMap<>() { + { + put(REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, segmentRepoName); + put(segmentRepositoryTypeAttributeKey, "s3"); + put(segmentRepositorySettingsAttributeKeyPrefix + "bucket", "segment_bucket"); + put(segmentRepositorySettingsAttributeKeyPrefix + "base_path", "/segment/path"); + put(REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, translogRepoName); + putIfAbsent(translogRepositoryTypeAttributeKey, "s3"); + putIfAbsent(translogRepositorySettingsAttributeKeyPrefix + "bucket", "translog_bucket"); + putIfAbsent(translogRepositorySettingsAttributeKeyPrefix + "base_path", "/translog/path"); + putAll(remoteStateNodeAttributes(clusterStateRepo)); + } + }; + } + + private Map<String, String> remoteStateNodeAttributes(String clusterStateRepo) { String clusterStateRepositoryTypeAttributeKey = String.format( Locale.getDefault(), REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, @@ -821,14 +982,6 @@ private Map<String, String> remoteStoreNodeAttributes(String segmentRepoName, St return new HashMap<>() { { - put(REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, segmentRepoName); - put(segmentRepositoryTypeAttributeKey, "s3"); - put(segmentRepositorySettingsAttributeKeyPrefix + "bucket", "segment_bucket"); - put(segmentRepositorySettingsAttributeKeyPrefix + "base_path", "/segment/path"); - put(REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, translogRepoName); - putIfAbsent(translogRepositoryTypeAttributeKey, "s3"); - putIfAbsent(translogRepositorySettingsAttributeKeyPrefix + "bucket", "translog_bucket"); - putIfAbsent(translogRepositorySettingsAttributeKeyPrefix + "base_path", "/translog/path"); put(REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, clusterStateRepo); putIfAbsent(clusterStateRepositoryTypeAttributeKey, "s3"); putIfAbsent(clusterStateRepositorySettingsAttributeKeyPrefix + "bucket", "state_bucket"); diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index cea151748bfb6..cc605878119a2 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -35,6 +35,7 @@ import org.opensearch.ExceptionsHelper; import org.opensearch.ResourceAlreadyExistsException; import org.opensearch.Version; +import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest; import org.opensearch.action.admin.indices.shrink.ResizeType; @@ -133,6 +134,7 @@ import static org.opensearch.cluster.metadata.MetadataCreateIndexService.getIndexNumberOfRoutingShards; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.parseV1Mappings; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.resolveAndValidateAliases; +import static org.opensearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; import static org.opensearch.index.IndexSettings.INDEX_REFRESH_INTERVAL_SETTING; import static org.opensearch.index.IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; import static org.opensearch.index.IndexSettings.INDEX_SOFT_DELETES_SETTING; @@ -146,6 +148,7 @@ import static org.opensearch.node.Node.NODE_ATTRIBUTES; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasKey; @@ -1898,7 +1901,7 @@ public void testAsyncDurabilityThrowsExceptionWhenRestrictSettingTrue() { request, Settings.EMPTY, null, - Settings.builder().put("node.attr.remote_store.setting", "test").build(), + Settings.builder().put("node.attr.remote_store.segment.repository", "test").build(), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), Collections.emptySet(), @@ -1936,6 +1939,35 @@ public void testRequestDurabilityWhenRestrictSettingTrue() { assertEquals(Translog.Durability.REQUEST, INDEX_TRANSLOG_DURABILITY_SETTING.get(indexSettings)); } + public void testIndexCreationWithIndexStoreTypeRemoteStoreThrowsException() { + // This checks that aggregateIndexSettings throws exception for the case when the index setting + // index.store.type is set to remote_snapshot + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); + final Settings.Builder requestSettings = Settings.builder(); + requestSettings.put(INDEX_STORE_TYPE_SETTING.getKey(), RestoreSnapshotRequest.StorageType.REMOTE_SNAPSHOT); + request.settings(requestSettings.build()); + final IllegalArgumentException error = expectThrows( + IllegalArgumentException.class, + () -> aggregateIndexSettings( + ClusterState.EMPTY_STATE, + request, + Settings.EMPTY, + null, + Settings.EMPTY, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Collections.emptySet(), + clusterSettings + ) + ); + assertThat( + error.getMessage(), + containsString( + "cannot create index with index setting \"index.store.type\" set to \"remote_snapshot\". Store type can be set to \"remote_snapshot\" only when restoring a remote snapshot by using \"storage_type\": \"remote_snapshot\"" + ) + ); + } + private IndexTemplateMetadata addMatchingTemplate(Consumer<IndexTemplateMetadata.Builder> configurator) { IndexTemplateMetadata.Builder builder = templateMetadataBuilder("template1", "te*"); configurator.accept(builder); diff --git a/server/src/test/java/org/opensearch/cluster/metadata/ViewTests.java b/server/src/test/java/org/opensearch/cluster/metadata/ViewTests.java new file mode 100644 index 0000000000000..ad39e2b103087 --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/metadata/ViewTests.java @@ -0,0 +1,80 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.metadata; + +import org.opensearch.cluster.metadata.View.Target; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.test.AbstractSerializingTestCase; +import org.hamcrest.MatcherAssert; + +import java.io.IOException; +import java.util.Set; +import java.util.TreeSet; + +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; + +public class ViewTests extends AbstractSerializingTestCase<View> { + + private static Set<Target> randomTargets() { + int numTargets = randomIntBetween(1, 25); + return new TreeSet<>(randomList(1, numTargets, () -> new View.Target(randomAlphaOfLength(8)))); + } + + private static View randomInstance() { + final Set<Target> targets = randomTargets(); + final String viewName = randomAlphaOfLength(10); + final String description = randomAlphaOfLength(100); + return new View(viewName, description, Math.abs(randomLong()), Math.abs(randomLong()), targets); + } + + @Override + protected View doParseInstance(XContentParser parser) throws IOException { + return View.fromXContent(parser); + } + + @Override + protected Writeable.Reader<View> instanceReader() { + return View::new; + } + + @Override + protected View createTestInstance() { + return randomInstance(); + } + + public void testNullName() { + final NullPointerException npe = assertThrows(NullPointerException.class, () -> new View(null, null, null, null, null)); + + MatcherAssert.assertThat(npe.getMessage(), equalTo("Name must be provided")); + } + + public void testNullTargets() { + final NullPointerException npe = assertThrows(NullPointerException.class, () -> new View("name", null, null, null, null)); + + MatcherAssert.assertThat(npe.getMessage(), equalTo("Targets are required on a view")); + } + + public void testNullTargetIndexPattern() { + final NullPointerException npe = assertThrows(NullPointerException.class, () -> new View.Target((String) null)); + + MatcherAssert.assertThat(npe.getMessage(), equalTo("IndexPattern is required")); + } + + public void testDefaultValues() { + final View view = new View("myName", null, null, null, Set.of()); + + MatcherAssert.assertThat(view.getName(), equalTo("myName")); + MatcherAssert.assertThat(view.getDescription(), equalTo(null)); + MatcherAssert.assertThat(view.getCreatedAt(), equalTo(-1L)); + MatcherAssert.assertThat(view.getModifiedAt(), equalTo(-1L)); + MatcherAssert.assertThat(view.getTargets(), empty()); + } +} diff --git a/server/src/test/java/org/opensearch/cluster/metadata/WeightedRoutingMetadataTests.java b/server/src/test/java/org/opensearch/cluster/metadata/WeightedRoutingMetadataTests.java index b78d1b56364eb..e19bde5d53d8a 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/WeightedRoutingMetadataTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/WeightedRoutingMetadataTests.java @@ -8,29 +8,60 @@ package org.opensearch.cluster.metadata; +import org.opensearch.cluster.ClusterModule; +import org.opensearch.cluster.Diff; import org.opensearch.cluster.routing.WeightedRouting; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.test.AbstractXContentTestCase; +import org.opensearch.test.AbstractDiffableSerializationTestCase; import java.io.IOException; +import java.util.HashMap; import java.util.Map; -public class WeightedRoutingMetadataTests extends AbstractXContentTestCase<WeightedRoutingMetadata> { +public class WeightedRoutingMetadataTests extends AbstractDiffableSerializationTestCase<Metadata.Custom> { + + @Override + protected Writeable.Reader<Metadata.Custom> instanceReader() { + return WeightedRoutingMetadata::new; + } + @Override protected WeightedRoutingMetadata createTestInstance() { + String attributeName = "zone"; Map<String, Double> weights = Map.of("a", 1.0, "b", 1.0, "c", 0.0); - WeightedRouting weightedRouting = new WeightedRouting("zone", weights); + if (randomBoolean()) { + weights = new HashMap<>(); + attributeName = ""; + } + WeightedRouting weightedRouting = new WeightedRouting(attributeName, weights); WeightedRoutingMetadata weightedRoutingMetadata = new WeightedRoutingMetadata(weightedRouting, -1); + return weightedRoutingMetadata; } + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(ClusterModule.getNamedWriteables()); + } + @Override protected WeightedRoutingMetadata doParseInstance(XContentParser parser) throws IOException { return WeightedRoutingMetadata.fromXContent(parser); } @Override - protected boolean supportsUnknownFields() { - return false; + protected Metadata.Custom makeTestChanges(Metadata.Custom testInstance) { + + WeightedRouting weightedRouting = new WeightedRouting("", new HashMap<>()); + WeightedRoutingMetadata weightedRoutingMetadata = new WeightedRoutingMetadata(weightedRouting, -1); + return weightedRoutingMetadata; } + + @Override + protected Writeable.Reader<Diff<Metadata.Custom>> diffReader() { + return WeightedRoutingMetadata::readDiffFrom; + } + } diff --git a/server/src/test/java/org/opensearch/cluster/routing/RoutingNodesTests.java b/server/src/test/java/org/opensearch/cluster/routing/RoutingNodesTests.java index 780d041c25d04..7a0fd76b0fbd9 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/RoutingNodesTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/RoutingNodesTests.java @@ -46,8 +46,6 @@ import java.util.Iterator; import java.util.List; -import org.mockito.Mockito; - import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -206,40 +204,4 @@ public void testInterleavedShardIteratorReplicaFirst() { } assertEquals(shardCount, this.totalNumberOfShards); } - - public void testSwapPrimaryWithReplica() { - // Initialize all the shards for test index 1 and 2 - initPrimaries(); - startInitializingShards(TEST_INDEX_1); - startInitializingShards(TEST_INDEX_1); - startInitializingShards(TEST_INDEX_2); - startInitializingShards(TEST_INDEX_2); - - // Create primary shard count imbalance between two nodes - final RoutingNodes routingNodes = this.clusterState.getRoutingNodes(); - final RoutingNode node0 = routingNodes.node("node0"); - final RoutingNode node1 = routingNodes.node("node1"); - final List<ShardRouting> shardRoutingList = node0.shardsWithState(TEST_INDEX_1, ShardRoutingState.STARTED); - final RoutingChangesObserver routingChangesObserver = Mockito.mock(RoutingChangesObserver.class); - int swaps = 0; - - for (ShardRouting routing : shardRoutingList) { - if (routing.primary()) { - ShardRouting swap = node1.getByShardId(routing.shardId()); - routingNodes.swapPrimaryWithReplica(logger, routing, swap, routingChangesObserver); - swaps++; - } - } - Mockito.verify(routingChangesObserver, Mockito.times(swaps)).replicaPromoted(Mockito.any()); - - final List<ShardRouting> shards = node1.shardsWithState(TEST_INDEX_1, ShardRoutingState.STARTED); - int shardCount = 0; - for (ShardRouting shard : shards) { - if (shard.primary()) { - shardCount++; - } - } - - assertTrue(shardCount >= swaps); - } } diff --git a/server/src/test/java/org/opensearch/cluster/routing/RoutingTableTests.java b/server/src/test/java/org/opensearch/cluster/routing/RoutingTableTests.java index 8542ff53c6ff1..97283f561d6d4 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/RoutingTableTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/RoutingTableTests.java @@ -279,6 +279,31 @@ public void testAllShardsMatchingPredicate() { ); } + public void testAllShardsMatchingPredicateWithSpecificIndices() { + MockAllocationService allocation = createAllocationService(Settings.EMPTY, new DelayedShardsMockGatewayAllocator()); + Metadata metadata = Metadata.builder() + .put(IndexMetadata.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)) + .put(IndexMetadata.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)) + .build(); + ClusterState clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(RoutingTable.builder().addAsNew(metadata.index("test1")).addAsNew(metadata.index("test2")).build()) + .build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))) + .build(); + clusterState = allocation.reroute(clusterState, "reroute"); + + String[] indices = new String[] { "test1", "test2" }; + // Verifies against all primary shards on the node + assertThat(clusterState.routingTable().allShardsSatisfyingPredicate(indices, ShardRouting::primary).size(), is(2)); + // Verifies against all replica shards on the node + assertThat( + clusterState.routingTable().allShardsSatisfyingPredicate(indices, shardRouting -> !shardRouting.primary()).size(), + is(2) + ); + } + public void testActivePrimaryShardsGrouped() { assertThat(this.emptyRoutingTable.activePrimaryShardsGrouped(new String[0], true).size(), is(0)); assertThat(this.emptyRoutingTable.activePrimaryShardsGrouped(new String[0], false).size(), is(0)); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java index a1db6cd83ab6c..6a03a1f79bcde 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java @@ -229,6 +229,11 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing return Decision.ALWAYS; } } + + @Override + public Decision canAllocateAnyShardToNode(RoutingNode node, RoutingAllocation allocation) { + return throttle ? Decision.THROTTLE : Decision.YES; + } }); Collections.shuffle(deciders, random()); return new AllocationDeciders(deciders); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsMoveShardsTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsMoveShardsTests.java index f2e79b319d0dd..b840b78eff448 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsMoveShardsTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsMoveShardsTests.java @@ -47,7 +47,7 @@ public void testExcludeNodeIdMoveBlocked() { /** * Test move operations for index level allocation settings. - * Supported for local indices, not supported for remote indices. + * Supported for local indices and remote indices. */ public void testIndexLevelExclusions() throws InterruptedException { int localOnlyNodes = 7; @@ -102,8 +102,9 @@ public void testIndexLevelExclusions() throws InterruptedException { // No shard of updated local index should be on excluded local capable node assertTrue(routingTable.allShards(localIndex).stream().noneMatch(shard -> shard.currentNodeId().equals(excludedLocalOnlyNode))); - // Since remote index shards are untouched, at least one shard should - // continue to stay on the excluded remote capable node - assertTrue(routingTable.allShards(remoteIndex).stream().anyMatch(shard -> shard.currentNodeId().equals(excludedRemoteCapableNode))); + // No shard of updated remote index should be on excluded remote capable node + assertTrue( + routingTable.allShards(remoteIndex).stream().noneMatch(shard -> shard.currentNodeId().equals(excludedRemoteCapableNode)) + ); } } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteStoreMigrationAllocationDeciderTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteStoreMigrationAllocationDeciderTests.java new file mode 100644 index 0000000000000..43363407d9249 --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteStoreMigrationAllocationDeciderTests.java @@ -0,0 +1,681 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.cluster.routing.allocation; + +import org.opensearch.Version; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.OpenSearchAllocationTestCase; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.IndexShardRoutingTable; +import org.opensearch.cluster.routing.RoutingNode; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.cluster.routing.TestShardRouting; +import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.opensearch.cluster.routing.allocation.decider.Decision; +import org.opensearch.cluster.routing.allocation.decider.RemoteStoreMigrationAllocationDecider; +import org.opensearch.common.UUIDs; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.node.remotestore.RemoteStoreNodeService; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; + +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; +import static org.opensearch.common.util.FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; +import static org.hamcrest.core.Is.is; + +public class RemoteStoreMigrationAllocationDeciderTests extends OpenSearchAllocationTestCase { + + private final static String TEST_INDEX = "test_index"; + private final static String TEST_REPO = "test_repo"; + + private final Settings directionEnabledNodeSettings = Settings.builder().put(REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build(); + + private final Settings strictModeCompatibilitySettings = Settings.builder() + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), RemoteStoreNodeService.CompatibilityMode.STRICT) + .build(); + private final Settings mixedModeCompatibilitySettings = Settings.builder() + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), RemoteStoreNodeService.CompatibilityMode.MIXED) + .build(); + + private final Settings remoteStoreDirectionSettings = Settings.builder() + .put(MIGRATION_DIRECTION_SETTING.getKey(), RemoteStoreNodeService.Direction.REMOTE_STORE) + .build(); + private final Settings docrepDirectionSettings = Settings.builder() + .put(MIGRATION_DIRECTION_SETTING.getKey(), RemoteStoreNodeService.Direction.DOCREP) + .build(); + + private Boolean isRemoteStoreBackedIndex = null, isMixedMode; + private int shardCount, replicaCount; + private IndexMetadata.Builder indexMetadataBuilder; + private Settings customSettings; + private DiscoveryNodes discoveryNodes; + private ClusterState clusterState; + private RemoteStoreMigrationAllocationDecider remoteStoreMigrationAllocationDecider; + private RoutingAllocation routingAllocation; + private Metadata metadata; + private RoutingTable routingTable = null; + + private void beforeAllocation() { + FeatureFlags.initializeFeatureFlags(directionEnabledNodeSettings); + if (isRemoteStoreBackedIndex == null) { + isRemoteStoreBackedIndex = randomBoolean(); + } + indexMetadataBuilder = getIndexMetadataBuilder(isRemoteStoreBackedIndex, shardCount, replicaCount); + + String compatibilityMode = isMixedMode + ? RemoteStoreNodeService.CompatibilityMode.MIXED.mode + : RemoteStoreNodeService.CompatibilityMode.STRICT.mode; + customSettings = getCustomSettings( + RemoteStoreNodeService.Direction.REMOTE_STORE.direction, + compatibilityMode, + indexMetadataBuilder + ); + + if (routingTable != null) { + metadata = Metadata.builder().put(indexMetadataBuilder).build(); + clusterState = ClusterState.builder(ClusterName.DEFAULT) + .metadata(metadata) + .routingTable(routingTable) + .nodes(discoveryNodes) + .build(); + } else { + clusterState = getInitialClusterState(customSettings, indexMetadataBuilder, discoveryNodes); + } + + remoteStoreMigrationAllocationDecider = new RemoteStoreMigrationAllocationDecider( + customSettings, + getClusterSettings(customSettings) + ); + + routingAllocation = new RoutingAllocation( + new AllocationDeciders(Collections.singleton(remoteStoreMigrationAllocationDecider)), + clusterState.getRoutingNodes(), + clusterState, + null, + null, + 0L + ); + routingAllocation.debugDecision(true); + } + + // tests for primary shard copy allocation with MIXED mode and REMOTE_STORE direction + + public void testDontAllocateNewPrimaryShardOnNonRemoteNodeForMixedModeAndRemoteStoreDirection() { + shardCount = 1; + replicaCount = 0; + isMixedMode = true; + + DiscoveryNode remoteNode = getRemoteNode(); + DiscoveryNode nonRemoteNode = getNonRemoteNode(); + + discoveryNodes = DiscoveryNodes.builder() + .add(nonRemoteNode) + .localNodeId(nonRemoteNode.getId()) + .add(remoteNode) + .localNodeId(remoteNode.getId()) + .build(); + + beforeAllocation(); + + ShardRouting primaryShardRouting = clusterState.getRoutingTable().shardRoutingTable(TEST_INDEX, 0).primaryShard(); + RoutingNode nonRemoteRoutingNode = clusterState.getRoutingNodes().node(nonRemoteNode.getId()); + + Decision decision = remoteStoreMigrationAllocationDecider.canAllocate(primaryShardRouting, nonRemoteRoutingNode, routingAllocation); + assertThat(decision.type(), is(Decision.Type.NO)); + String reason = "[remote_store migration_direction]: primary shard copy can not be allocated to a non-remote node"; + if (isRemoteStoreBackedIndex) { + reason = + "[remote_store migration_direction]: primary shard copy can not be allocated to a non-remote node because a remote store backed index's shard copy can only be allocated to a remote node"; + } + assertThat(decision.getExplanation().toLowerCase(Locale.ROOT), is(reason)); + } + + public void testAllocateNewPrimaryShardOnRemoteNodeForMixedModeAndRemoteStoreDirection() { + shardCount = 1; + replicaCount = 0; + isMixedMode = true; + + DiscoveryNode remoteNode = getRemoteNode(); + DiscoveryNode nonRemoteNode = getNonRemoteNode(); + + discoveryNodes = DiscoveryNodes.builder() + .add(nonRemoteNode) + .localNodeId(nonRemoteNode.getId()) + .add(remoteNode) + .localNodeId(remoteNode.getId()) + .build(); + + beforeAllocation(); + + ShardRouting primaryShardRouting = clusterState.getRoutingTable().shardRoutingTable(TEST_INDEX, 0).primaryShard(); + RoutingNode remoteRoutingNode = clusterState.getRoutingNodes().node(remoteNode.getId()); + + Decision decision = remoteStoreMigrationAllocationDecider.canAllocate(primaryShardRouting, remoteRoutingNode, routingAllocation); + assertThat(decision.type(), is(Decision.Type.YES)); + assertThat( + decision.getExplanation().toLowerCase(Locale.ROOT), + is("[remote_store migration_direction]: primary shard copy can be allocated to a remote node") + ); + } + + // tests for replica shard copy allocation with MIXED mode and REMOTE_STORE direction + + public void testDontAllocateNewReplicaShardOnRemoteNodeIfPrimaryShardOnNonRemoteNodeForMixedModeAndRemoteStoreDirection() { + shardCount = 1; + replicaCount = 1; + isMixedMode = true; + + ShardId shardId = new ShardId(TEST_INDEX, "_na_", 0); + + DiscoveryNode nonRemoteNode = getNonRemoteNode(); + DiscoveryNode remoteNode = getRemoteNode(); + + routingTable = RoutingTable.builder() + .add( + IndexRoutingTable.builder(shardId.getIndex()) + .addIndexShard( + new IndexShardRoutingTable.Builder(shardId).addShard( + // primary on non-remote node + TestShardRouting.newShardRouting( + shardId.getIndexName(), + shardId.getId(), + nonRemoteNode.getId(), + true, + ShardRoutingState.STARTED + ) + ) + .addShard( + // new replica's allocation + TestShardRouting.newShardRouting( + shardId.getIndexName(), + shardId.getId(), + null, + false, + ShardRoutingState.UNASSIGNED + ) + ) + .build() + ) + ) + .build(); + + discoveryNodes = DiscoveryNodes.builder() + .add(nonRemoteNode) + .localNodeId(nonRemoteNode.getId()) + .add(remoteNode) + .localNodeId(remoteNode.getId()) + .build(); + + beforeAllocation(); + + assertEquals(2, clusterState.getRoutingTable().allShards().size()); + ShardRouting replicaShardRouting = clusterState.getRoutingTable().shardRoutingTable(TEST_INDEX, 0).replicaShards().get(0); + RoutingNode remoteRoutingNode = clusterState.getRoutingNodes().node(remoteNode.getId()); + + Decision decision = remoteStoreMigrationAllocationDecider.canAllocate(replicaShardRouting, remoteRoutingNode, routingAllocation); + assertThat(decision.type(), is(Decision.Type.NO)); + assertThat( + decision.getExplanation().toLowerCase(Locale.ROOT), + is( + "[remote_store migration_direction]: replica shard copy can not be allocated to a remote node since primary shard copy is not yet migrated to remote" + ) + ); + } + + public void testAllocateNewReplicaShardOnRemoteNodeIfPrimaryShardOnRemoteNodeForMixedModeAndRemoteStoreDirection() { + shardCount = 1; + replicaCount = 1; + isMixedMode = true; + + ShardId shardId = new ShardId(TEST_INDEX, "_na_", 0); + + DiscoveryNode remoteNode1 = getRemoteNode(); + DiscoveryNode remoteNode2 = getRemoteNode(); + DiscoveryNode nonRemoteNode = getNonRemoteNode(); + + routingTable = RoutingTable.builder() + .add( + IndexRoutingTable.builder(shardId.getIndex()) + .addIndexShard( + new IndexShardRoutingTable.Builder(shardId).addShard( + // primary on remote node + TestShardRouting.newShardRouting( + shardId.getIndexName(), + shardId.getId(), + remoteNode1.getId(), + true, + ShardRoutingState.STARTED + ) + ) + .addShard( + // new replica's allocation + TestShardRouting.newShardRouting( + shardId.getIndexName(), + shardId.getId(), + null, + false, + ShardRoutingState.UNASSIGNED + ) + ) + .build() + ) + ) + .build(); + + discoveryNodes = DiscoveryNodes.builder() + .add(remoteNode1) + .localNodeId(remoteNode1.getId()) + .add(remoteNode2) + .localNodeId(remoteNode2.getId()) + .add(nonRemoteNode) + .localNodeId(nonRemoteNode.getId()) + .build(); + + beforeAllocation(); + + assertEquals(2, clusterState.getRoutingTable().allShards().size()); + ShardRouting replicaShardRouting = clusterState.getRoutingTable().shardRoutingTable(TEST_INDEX, 0).replicaShards().get(0); + RoutingNode remoteRoutingNode = clusterState.getRoutingNodes().node(remoteNode2.getId()); + + Decision decision = remoteStoreMigrationAllocationDecider.canAllocate(replicaShardRouting, remoteRoutingNode, routingAllocation); + assertThat(decision.type(), is(Decision.Type.YES)); + assertThat( + decision.getExplanation().toLowerCase(Locale.ROOT), + is( + "[remote_store migration_direction]: replica shard copy can be allocated to a remote node since primary shard copy has been migrated to remote" + ) + ); + } + + public void testAllocateNewReplicaShardOnNonRemoteNodeIfPrimaryShardOnNonRemoteNodeForMixedModeAndRemoteStoreDirection() { + shardCount = 1; + replicaCount = 1; + isMixedMode = true; + + ShardId shardId = new ShardId(TEST_INDEX, "_na_", 0); + + DiscoveryNode remoteNode = getRemoteNode(); + DiscoveryNode nonRemoteNode1 = getNonRemoteNode(); + DiscoveryNode nonRemoteNode2 = getNonRemoteNode(); + + routingTable = RoutingTable.builder() + .add( + IndexRoutingTable.builder(shardId.getIndex()) + .addIndexShard( + new IndexShardRoutingTable.Builder(shardId).addShard( + // primary shard on non-remote node + TestShardRouting.newShardRouting( + shardId.getIndexName(), + shardId.getId(), + nonRemoteNode1.getId(), + true, + ShardRoutingState.STARTED + ) + ) + .addShard( + // new replica's allocation + TestShardRouting.newShardRouting( + shardId.getIndexName(), + shardId.getId(), + null, + false, + ShardRoutingState.UNASSIGNED + ) + ) + .build() + ) + ) + .build(); + + discoveryNodes = DiscoveryNodes.builder() + .add(remoteNode) + .localNodeId(remoteNode.getId()) + .add(nonRemoteNode1) + .localNodeId(nonRemoteNode1.getId()) + .add(nonRemoteNode2) + .localNodeId(nonRemoteNode2.getId()) + .build(); + + beforeAllocation(); + + assertEquals(2, clusterState.getRoutingTable().allShards().size()); + + ShardRouting replicaShardRouting = clusterState.getRoutingTable().shardRoutingTable(TEST_INDEX, 0).replicaShards().get(0); + RoutingNode nonRemoteRoutingNode = clusterState.getRoutingNodes().node(nonRemoteNode2.getId()); + + Decision decision = remoteStoreMigrationAllocationDecider.canAllocate(replicaShardRouting, nonRemoteRoutingNode, routingAllocation); + Decision.Type type = Decision.Type.YES; + String reason = "[remote_store migration_direction]: replica shard copy can be allocated to a non-remote node"; + if (isRemoteStoreBackedIndex) { + type = Decision.Type.NO; + reason = + "[remote_store migration_direction]: replica shard copy can not be allocated to a non-remote node because a remote store backed index's shard copy can only be allocated to a remote node"; + } + assertThat(decision.type(), is(type)); + assertThat(decision.getExplanation().toLowerCase(Locale.ROOT), is(reason)); + } + + public void testAllocateNewReplicaShardOnNonRemoteNodeIfPrimaryShardOnRemoteNodeForRemoteStoreDirection() { + shardCount = 1; + replicaCount = 1; + isMixedMode = true; + + ShardId shardId = new ShardId(TEST_INDEX, "_na_", 0); + + DiscoveryNode nonRemoteNode = getNonRemoteNode(); + DiscoveryNode remoteNode = getRemoteNode(); + + routingTable = RoutingTable.builder() + .add( + IndexRoutingTable.builder(shardId.getIndex()) + .addIndexShard( + new IndexShardRoutingTable.Builder(shardId).addShard( + // primary shard on non-remote node + TestShardRouting.newShardRouting( + shardId.getIndexName(), + shardId.getId(), + remoteNode.getId(), + true, + ShardRoutingState.STARTED + ) + ) + .addShard( + // new replica's allocation + TestShardRouting.newShardRouting( + shardId.getIndexName(), + shardId.getId(), + null, + false, + ShardRoutingState.UNASSIGNED + ) + ) + .build() + ) + ) + .build(); + + discoveryNodes = DiscoveryNodes.builder() + .add(nonRemoteNode) + .localNodeId(nonRemoteNode.getId()) + .add(remoteNode) + .localNodeId(remoteNode.getId()) + .build(); + + beforeAllocation(); + + assertEquals(2, clusterState.getRoutingTable().allShards().size()); + ShardRouting replicaShardRouting = clusterState.getRoutingTable().shardRoutingTable(TEST_INDEX, 0).replicaShards().get(0); + RoutingNode nonRemoteRoutingNode = clusterState.getRoutingNodes().node(nonRemoteNode.getId()); + + Decision decision = remoteStoreMigrationAllocationDecider.canAllocate(replicaShardRouting, nonRemoteRoutingNode, routingAllocation); + Decision.Type type = Decision.Type.YES; + String reason = "[remote_store migration_direction]: replica shard copy can be allocated to a non-remote node"; + if (isRemoteStoreBackedIndex) { + type = Decision.Type.NO; + reason = + "[remote_store migration_direction]: replica shard copy can not be allocated to a non-remote node because a remote store backed index's shard copy can only be allocated to a remote node"; + } + assertThat(decision.type(), is(type)); + assertThat(decision.getExplanation().toLowerCase(Locale.ROOT), is(reason)); + } + + // test for STRICT mode + + public void testAlwaysAllocateNewShardForStrictMode() { + shardCount = 1; + replicaCount = 1; + isMixedMode = false; + isRemoteStoreBackedIndex = false; + + ShardId shardId = new ShardId(TEST_INDEX, "_na_", 0); + + DiscoveryNode nonRemoteNode1 = getNonRemoteNode(); + DiscoveryNode nonRemoteNode2 = getNonRemoteNode(); + + boolean isReplicaAllocation = randomBoolean(); + + routingTable = RoutingTable.builder() + .add( + IndexRoutingTable.builder(shardId.getIndex()) + .addIndexShard( + new IndexShardRoutingTable.Builder(shardId).addShard( + TestShardRouting.newShardRouting( + shardId.getIndexName(), + shardId.getId(), + (isReplicaAllocation ? nonRemoteNode1.getId() : null), + true, + (isReplicaAllocation ? ShardRoutingState.STARTED : ShardRoutingState.UNASSIGNED) + ) + ) + .addShard( + TestShardRouting.newShardRouting( + shardId.getIndexName(), + shardId.getId(), + null, + false, + ShardRoutingState.UNASSIGNED + ) + ) + .build() + ) + ) + .build(); + + discoveryNodes = DiscoveryNodes.builder() + .add(nonRemoteNode1) + .localNodeId(nonRemoteNode1.getId()) + .add(nonRemoteNode2) + .localNodeId(nonRemoteNode2.getId()) + .build(); + + beforeAllocation(); + + assertEquals(2, clusterState.getRoutingTable().allShards().size()); + + ShardRouting shardRouting = clusterState.getRoutingTable().shardRoutingTable(TEST_INDEX, 0).primaryShard(); + if (isReplicaAllocation) { + shardRouting = clusterState.getRoutingTable().shardRoutingTable(TEST_INDEX, 0).replicaShards().get(0); + } + RoutingNode nonRemoteRoutingNode = clusterState.getRoutingNodes().node(nonRemoteNode2.getId()); + + Decision decision = remoteStoreMigrationAllocationDecider.canAllocate(shardRouting, nonRemoteRoutingNode, routingAllocation); + assertThat(decision.type(), is(Decision.Type.YES)); + String reason = String.format( + Locale.ROOT, + "[remote_store migration_direction]: %s shard copy can be allocated to a non-remote node for strict compatibility mode", + (isReplicaAllocation ? "replica" : "primary") + ); + assertThat(decision.getExplanation().toLowerCase(Locale.ROOT), is(reason)); + + isRemoteStoreBackedIndex = true; + + DiscoveryNode remoteNode1 = getRemoteNode(); + DiscoveryNode remoteNode2 = getRemoteNode(); + + routingTable = RoutingTable.builder() + .add( + IndexRoutingTable.builder(shardId.getIndex()) + .addIndexShard( + new IndexShardRoutingTable.Builder(shardId).addShard( + TestShardRouting.newShardRouting( + shardId.getIndexName(), + shardId.getId(), + (isReplicaAllocation ? remoteNode1.getId() : null), + true, + (isReplicaAllocation ? ShardRoutingState.STARTED : ShardRoutingState.UNASSIGNED) + ) + ) + .addShard( + // new replica's allocation + TestShardRouting.newShardRouting( + shardId.getIndexName(), + shardId.getId(), + null, + false, + ShardRoutingState.UNASSIGNED + ) + ) + .build() + ) + ) + .build(); + + discoveryNodes = DiscoveryNodes.builder() + .add(remoteNode1) + .localNodeId(remoteNode1.getId()) + .add(remoteNode2) + .localNodeId(remoteNode2.getId()) + .build(); + + beforeAllocation(); + + assertEquals(2, clusterState.getRoutingTable().allShards().size()); + + shardRouting = clusterState.getRoutingTable().shardRoutingTable(TEST_INDEX, 0).primaryShard(); + if (isReplicaAllocation) { + shardRouting = clusterState.getRoutingTable().shardRoutingTable(TEST_INDEX, 0).replicaShards().get(0); + } + RoutingNode remoteRoutingNode = clusterState.getRoutingNodes().node(remoteNode2.getId()); + + decision = remoteStoreMigrationAllocationDecider.canAllocate(shardRouting, remoteRoutingNode, routingAllocation); + assertThat(decision.type(), is(Decision.Type.YES)); + reason = String.format( + Locale.ROOT, + "[remote_store migration_direction]: %s shard copy can be allocated to a remote node for strict compatibility mode", + (isReplicaAllocation ? "replica" : "primary") + ); + assertThat(decision.getExplanation().toLowerCase(Locale.ROOT), is(reason)); + } + + // prepare index metadata for test-index + private IndexMetadata.Builder getIndexMetadataBuilder(boolean isRemoteStoreBackedIndex, int shardCount, int replicaCount) { + Settings.Builder builder = settings(Version.CURRENT); + if (isRemoteStoreBackedIndex) { + builder.put(SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, TEST_REPO) + .put(SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, TEST_REPO) + .put(SETTING_REMOTE_STORE_ENABLED, true); + } + return IndexMetadata.builder(TEST_INDEX).settings(builder).numberOfShards(shardCount).numberOfReplicas(replicaCount); + } + + // get node-level settings + private Settings getCustomSettings(String direction, String compatibilityMode, IndexMetadata.Builder indexMetadataBuilder) { + Settings.Builder builder = Settings.builder(); + // direction settings + if (direction.toLowerCase(Locale.ROOT).equals(RemoteStoreNodeService.Direction.REMOTE_STORE.direction)) { + builder.put(remoteStoreDirectionSettings); + } else if (direction.toLowerCase(Locale.ROOT).equals(RemoteStoreNodeService.Direction.DOCREP.direction)) { + builder.put(docrepDirectionSettings); + } + + // compatibility mode settings + if (compatibilityMode.toLowerCase(Locale.ROOT).equals(RemoteStoreNodeService.CompatibilityMode.STRICT.mode)) { + builder.put(strictModeCompatibilitySettings); + } else if (compatibilityMode.toLowerCase(Locale.ROOT).equals(RemoteStoreNodeService.CompatibilityMode.MIXED.mode)) { + builder.put(mixedModeCompatibilitySettings); + } + + // index metadata settings + builder.put(indexMetadataBuilder.build().getSettings()); + + builder.put(directionEnabledNodeSettings); + + return builder.build(); + } + + private String getRandomCompatibilityMode() { + return randomFrom(RemoteStoreNodeService.CompatibilityMode.STRICT.mode, RemoteStoreNodeService.CompatibilityMode.MIXED.mode); + } + + private ClusterSettings getClusterSettings(Settings settings) { + return new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + } + + private ClusterState getInitialClusterState( + Settings settings, + IndexMetadata.Builder indexMetadataBuilder, + DiscoveryNodes discoveryNodes + ) { + Metadata metadata = Metadata.builder().persistentSettings(settings).put(indexMetadataBuilder).build(); + + RoutingTable routingTable = RoutingTable.builder() + .addAsNew(indexMetadataBuilder.build()) + .addAsNew(metadata.index(TEST_INDEX)) + .build(); + + return ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).routingTable(routingTable).nodes(discoveryNodes).build(); + } + + // get a dummy non-remote node + private DiscoveryNode getNonRemoteNode() { + return new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); + } + + // get a dummy remote node + public DiscoveryNode getRemoteNode() { + Map<String, String> attributes = new HashMap<>(); + attributes.put( + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, + "REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_VALUE" + ); + return new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + attributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + } +} diff --git a/server/src/test/java/org/opensearch/common/cache/module/CacheModuleTests.java b/server/src/test/java/org/opensearch/common/cache/module/CacheModuleTests.java new file mode 100644 index 0000000000000..35d7877343909 --- /dev/null +++ b/server/src/test/java/org/opensearch/common/cache/module/CacheModuleTests.java @@ -0,0 +1,55 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.module; + +import org.opensearch.common.cache.ICache; +import org.opensearch.common.settings.Settings; +import org.opensearch.plugins.CachePlugin; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.List; +import java.util.Map; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class CacheModuleTests extends OpenSearchTestCase { + + public void testWithMultiplePlugins() { + CachePlugin mockPlugin1 = mock(CachePlugin.class); + ICache.Factory factory1 = mock(ICache.Factory.class); + CachePlugin mockPlugin2 = mock(CachePlugin.class); + ICache.Factory factory2 = mock(ICache.Factory.class); + when(mockPlugin1.getCacheFactoryMap()).thenReturn(Map.of("cache1", factory1)); + when(mockPlugin2.getCacheFactoryMap()).thenReturn(Map.of("cache2", factory2)); + + CacheModule cacheModule = new CacheModule(List.of(mockPlugin1, mockPlugin2), Settings.EMPTY); + + Map<String, ICache.Factory> factoryMap = cacheModule.getCacheStoreTypeFactories(); + assertEquals(factoryMap.get("cache1"), factory1); + assertEquals(factoryMap.get("cache2"), factory2); + } + + public void testWithSameCacheStoreTypeAndName() { + CachePlugin mockPlugin1 = mock(CachePlugin.class); + ICache.Factory factory1 = mock(ICache.Factory.class); + CachePlugin mockPlugin2 = mock(CachePlugin.class); + ICache.Factory factory2 = mock(ICache.Factory.class); + when(factory1.getCacheName()).thenReturn("cache"); + when(factory2.getCacheName()).thenReturn("cache"); + when(mockPlugin1.getCacheFactoryMap()).thenReturn(Map.of("cache", factory1)); + when(mockPlugin2.getCacheFactoryMap()).thenReturn(Map.of("cache", factory2)); + + IllegalArgumentException ex = assertThrows( + IllegalArgumentException.class, + () -> new CacheModule(List.of(mockPlugin1, mockPlugin2), Settings.EMPTY) + ); + assertEquals("Cache name: cache is already registered", ex.getMessage()); + } +} diff --git a/server/src/test/java/org/opensearch/common/cache/serializer/BytesReferenceSerializerTests.java b/server/src/test/java/org/opensearch/common/cache/serializer/BytesReferenceSerializerTests.java new file mode 100644 index 0000000000000..b1d9e762d5df7 --- /dev/null +++ b/server/src/test/java/org/opensearch/common/cache/serializer/BytesReferenceSerializerTests.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.serializer; + +import org.opensearch.common.Randomness; +import org.opensearch.common.bytes.ReleasableBytesReference; +import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.bytes.CompositeBytesReference; +import org.opensearch.core.common.util.ByteArray; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Random; + +public class BytesReferenceSerializerTests extends OpenSearchTestCase { + public void testEquality() throws Exception { + BytesReferenceSerializer ser = new BytesReferenceSerializer(); + // Test that values are equal before and after serialization, for each implementation of BytesReference. + byte[] bytesValue = new byte[1000]; + Random rand = Randomness.get(); + rand.nextBytes(bytesValue); + + BytesReference ba = new BytesArray(bytesValue); + byte[] serialized = ser.serialize(ba); + assertTrue(ser.equals(ba, serialized)); + BytesReference deserialized = ser.deserialize(serialized); + assertEquals(ba, deserialized); + + ba = new BytesArray(new byte[] {}); + serialized = ser.serialize(ba); + assertTrue(ser.equals(ba, serialized)); + deserialized = ser.deserialize(serialized); + assertEquals(ba, deserialized); + + BytesReference cbr = CompositeBytesReference.of(new BytesArray(bytesValue), new BytesArray(bytesValue)); + serialized = ser.serialize(cbr); + assertTrue(ser.equals(cbr, serialized)); + deserialized = ser.deserialize(serialized); + assertEquals(cbr, deserialized); + + // We need the PagedBytesReference to be larger than the page size (16 KB) in order to actually create it + byte[] pbrValue = new byte[PageCacheRecycler.PAGE_SIZE_IN_BYTES * 2]; + rand.nextBytes(pbrValue); + ByteArray arr = BigArrays.NON_RECYCLING_INSTANCE.newByteArray(pbrValue.length); + arr.set(0L, pbrValue, 0, pbrValue.length); + assert !arr.hasArray(); + BytesReference pbr = BytesReference.fromByteArray(arr, pbrValue.length); + serialized = ser.serialize(pbr); + assertTrue(ser.equals(pbr, serialized)); + deserialized = ser.deserialize(serialized); + assertEquals(pbr, deserialized); + + BytesReference rbr = new ReleasableBytesReference(new BytesArray(bytesValue), ReleasableBytesReference.NO_OP); + serialized = ser.serialize(rbr); + assertTrue(ser.equals(rbr, serialized)); + deserialized = ser.deserialize(serialized); + assertEquals(rbr, deserialized); + } +} diff --git a/server/src/test/java/org/opensearch/common/cache/service/CacheServiceTests.java b/server/src/test/java/org/opensearch/common/cache/service/CacheServiceTests.java new file mode 100644 index 0000000000000..b355161f6f310 --- /dev/null +++ b/server/src/test/java/org/opensearch/common/cache/service/CacheServiceTests.java @@ -0,0 +1,154 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.service; + +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.ICache; +import org.opensearch.common.cache.RemovalListener; +import org.opensearch.common.cache.module.CacheModule; +import org.opensearch.common.cache.settings.CacheSettings; +import org.opensearch.common.cache.store.OpenSearchOnHeapCache; +import org.opensearch.common.cache.store.config.CacheConfig; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.plugins.CachePlugin; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.List; +import java.util.Map; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class CacheServiceTests extends OpenSearchTestCase { + + public void testWithCreateCacheForIndicesRequestCacheType() { + CachePlugin mockPlugin1 = mock(CachePlugin.class); + ICache.Factory factory1 = mock(ICache.Factory.class); + ICache.Factory onHeapCacheFactory = mock(OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.class); + Map<String, ICache.Factory> factoryMap = Map.of( + "cache1", + factory1, + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME, + onHeapCacheFactory + ); + when(mockPlugin1.getCacheFactoryMap()).thenReturn(factoryMap); + + Setting<String> indicesRequestCacheSetting = CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE); + CacheService cacheService = new CacheService( + factoryMap, + Settings.builder().put(indicesRequestCacheSetting.getKey(), "cache1").build() + ); + CacheConfig<String, String> config = mock(CacheConfig.class); + ICache<String, String> mockOnHeapCache = mock(OpenSearchOnHeapCache.class); + when(onHeapCacheFactory.create(eq(config), eq(CacheType.INDICES_REQUEST_CACHE), any(Map.class))).thenReturn(mockOnHeapCache); + + ICache<String, String> ircCache = cacheService.createCache(config, CacheType.INDICES_REQUEST_CACHE); + assertEquals(mockOnHeapCache, ircCache); + } + + public void testWithCreateCacheForIndicesRequestCacheTypeWithFeatureFlagTrue() { + CachePlugin mockPlugin1 = mock(CachePlugin.class); + ICache.Factory factory1 = mock(ICache.Factory.class); + ICache.Factory onHeapCacheFactory = mock(OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.class); + Map<String, ICache.Factory> factoryMap = Map.of( + "cache1", + factory1, + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME, + onHeapCacheFactory + ); + when(mockPlugin1.getCacheFactoryMap()).thenReturn(factoryMap); + + Setting<String> indicesRequestCacheSetting = CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE); + CacheService cacheService = new CacheService( + factoryMap, + Settings.builder().put(indicesRequestCacheSetting.getKey(), "cache1").put(FeatureFlags.PLUGGABLE_CACHE, "true").build() + ); + CacheConfig<String, String> config = mock(CacheConfig.class); + ICache<String, String> mockOnHeapCache = mock(OpenSearchOnHeapCache.class); + when(factory1.create(eq(config), eq(CacheType.INDICES_REQUEST_CACHE), any(Map.class))).thenReturn(mockOnHeapCache); + + ICache<String, String> ircCache = cacheService.createCache(config, CacheType.INDICES_REQUEST_CACHE); + assertEquals(mockOnHeapCache, ircCache); + } + + public void testWithCreateCacheForIndicesRequestCacheTypeWithFeatureFlagTrueAndStoreNameIsNull() { + CachePlugin mockPlugin1 = mock(CachePlugin.class); + ICache.Factory factory1 = mock(ICache.Factory.class); + ICache.Factory onHeapCacheFactory = mock(OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.class); + Map<String, ICache.Factory> factoryMap = Map.of( + "cache1", + factory1, + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME, + onHeapCacheFactory + ); + when(mockPlugin1.getCacheFactoryMap()).thenReturn(factoryMap); + + CacheService cacheService = new CacheService(factoryMap, Settings.builder().put(FeatureFlags.PLUGGABLE_CACHE, "true").build()); + CacheConfig<String, String> config = mock(CacheConfig.class); + ICache<String, String> mockOnHeapCache = mock(OpenSearchOnHeapCache.class); + when(onHeapCacheFactory.create(eq(config), eq(CacheType.INDICES_REQUEST_CACHE), any(Map.class))).thenReturn(mockOnHeapCache); + + ICache<String, String> ircCache = cacheService.createCache(config, CacheType.INDICES_REQUEST_CACHE); + assertEquals(mockOnHeapCache, ircCache); + } + + public void testWithCreateCacheWithNoStoreNamePresentForCacheType() { + ICache.Factory factory1 = mock(ICache.Factory.class); + Map<String, ICache.Factory> factoryMap = Map.of("cache1", factory1); + CacheService cacheService = new CacheService(factoryMap, Settings.builder().build()); + + CacheConfig<String, String> config = mock(CacheConfig.class); + IllegalArgumentException ex = assertThrows( + IllegalArgumentException.class, + () -> cacheService.createCache(config, CacheType.INDICES_REQUEST_CACHE) + ); + assertEquals("No store name: [opensearch_onheap] is registered for cache type: INDICES_REQUEST_CACHE", ex.getMessage()); + } + + public void testWithCreateCacheWithDefaultStoreNameForIRC() { + CachePlugin mockPlugin1 = mock(CachePlugin.class); + ICache.Factory factory1 = mock(ICache.Factory.class); + Map<String, ICache.Factory> factoryMap = Map.of("cache1", factory1); + when(mockPlugin1.getCacheFactoryMap()).thenReturn(factoryMap); + + CacheModule cacheModule = new CacheModule(List.of(mockPlugin1), Settings.EMPTY); + CacheConfig<String, String> config = mock(CacheConfig.class); + when(config.getSettings()).thenReturn(Settings.EMPTY); + when(config.getWeigher()).thenReturn((k, v) -> 100); + when(config.getRemovalListener()).thenReturn(mock(RemovalListener.class)); + + CacheService cacheService = cacheModule.getCacheService(); + ICache<String, String> iCache = cacheService.createCache(config, CacheType.INDICES_REQUEST_CACHE); + assertTrue(iCache instanceof OpenSearchOnHeapCache); + } + + public void testWithCreateCacheWithInvalidStoreNameAssociatedForCacheType() { + ICache.Factory factory1 = mock(ICache.Factory.class); + Setting<String> indicesRequestCacheSetting = CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE); + Map<String, ICache.Factory> factoryMap = Map.of("cache1", factory1); + CacheService cacheService = new CacheService( + factoryMap, + Settings.builder().put(indicesRequestCacheSetting.getKey(), "cache").build() + ); + + CacheConfig<String, String> config = mock(CacheConfig.class); + ICache<String, String> onHeapCache = mock(OpenSearchOnHeapCache.class); + when(factory1.create(config, CacheType.INDICES_REQUEST_CACHE, factoryMap)).thenReturn(onHeapCache); + + IllegalArgumentException ex = assertThrows( + IllegalArgumentException.class, + () -> cacheService.createCache(config, CacheType.INDICES_REQUEST_CACHE) + ); + assertEquals("No store name: [opensearch_onheap] is registered for cache type: INDICES_REQUEST_CACHE", ex.getMessage()); + } +} diff --git a/server/src/test/java/org/opensearch/common/cache/tier/TieredSpilloverCacheTests.java b/server/src/test/java/org/opensearch/common/cache/tier/TieredSpilloverCacheTests.java deleted file mode 100644 index eb75244c6f8b1..0000000000000 --- a/server/src/test/java/org/opensearch/common/cache/tier/TieredSpilloverCacheTests.java +++ /dev/null @@ -1,786 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.common.cache.tier; - -import org.opensearch.common.cache.LoadAwareCacheLoader; -import org.opensearch.common.cache.RemovalReason; -import org.opensearch.common.cache.store.OpenSearchOnHeapCache; -import org.opensearch.common.cache.store.StoreAwareCache; -import org.opensearch.common.cache.store.StoreAwareCacheRemovalNotification; -import org.opensearch.common.cache.store.builders.StoreAwareCacheBuilder; -import org.opensearch.common.cache.store.enums.CacheStoreType; -import org.opensearch.common.cache.store.listeners.StoreAwareCacheEventListener; -import org.opensearch.common.metrics.CounterMetric; -import org.opensearch.test.OpenSearchTestCase; - -import java.util.ArrayList; -import java.util.EnumMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.Phaser; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; - -public class TieredSpilloverCacheTests extends OpenSearchTestCase { - - public void testComputeIfAbsentWithoutAnyOnHeapCacheEviction() throws Exception { - int onHeapCacheSize = randomIntBetween(10, 30); - MockCacheEventListener<String, String> eventListener = new MockCacheEventListener<String, String>(); - TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( - onHeapCacheSize, - randomIntBetween(1, 4), - eventListener, - 0 - ); - int numOfItems1 = randomIntBetween(1, onHeapCacheSize / 2 - 1); - List<String> keys = new ArrayList<>(); - // Put values in cache. - for (int iter = 0; iter < numOfItems1; iter++) { - String key = UUID.randomUUID().toString(); - keys.add(key); - LoadAwareCacheLoader<String, String> tieredCacheLoader = getLoadAwareCacheLoader(); - tieredSpilloverCache.computeIfAbsent(key, tieredCacheLoader); - } - assertEquals(numOfItems1, eventListener.enumMap.get(CacheStoreType.ON_HEAP).missCount.count()); - assertEquals(0, eventListener.enumMap.get(CacheStoreType.ON_HEAP).hitCount.count()); - assertEquals(0, eventListener.enumMap.get(CacheStoreType.ON_HEAP).evictionsMetric.count()); - - // Try to hit cache again with some randomization. - int numOfItems2 = randomIntBetween(1, onHeapCacheSize / 2 - 1); - int cacheHit = 0; - int cacheMiss = 0; - for (int iter = 0; iter < numOfItems2; iter++) { - if (randomBoolean()) { - // Hit cache with stored key - cacheHit++; - int index = randomIntBetween(0, keys.size() - 1); - tieredSpilloverCache.computeIfAbsent(keys.get(index), getLoadAwareCacheLoader()); - } else { - // Hit cache with randomized key which is expected to miss cache always. - tieredSpilloverCache.computeIfAbsent(UUID.randomUUID().toString(), getLoadAwareCacheLoader()); - cacheMiss++; - } - } - assertEquals(cacheHit, eventListener.enumMap.get(CacheStoreType.ON_HEAP).hitCount.count()); - assertEquals(numOfItems1 + cacheMiss, eventListener.enumMap.get(CacheStoreType.ON_HEAP).missCount.count()); - assertEquals(0, eventListener.enumMap.get(CacheStoreType.ON_HEAP).evictionsMetric.count()); - } - - public void testComputeIfAbsentWithEvictionsFromOnHeapCache() throws Exception { - int onHeapCacheSize = randomIntBetween(10, 30); - int diskCacheSize = randomIntBetween(60, 100); - int totalSize = onHeapCacheSize + diskCacheSize; - MockCacheEventListener<String, String> eventListener = new MockCacheEventListener<String, String>(); - StoreAwareCacheBuilder<String, String> cacheBuilder = new OpenSearchOnHeapCache.Builder<String, String>().setMaximumWeightInBytes( - onHeapCacheSize * 50 - ).setWeigher((k, v) -> 50); // Will support onHeapCacheSize entries. - - StoreAwareCacheBuilder<String, String> diskCacheBuilder = new MockOnDiskCache.Builder<String, String>().setMaxSize(diskCacheSize) - .setDeliberateDelay(0); - - TieredSpilloverCache<String, String> tieredSpilloverCache = new TieredSpilloverCache.Builder<String, String>() - .setOnHeapCacheBuilder(cacheBuilder) - .setOnDiskCacheBuilder(diskCacheBuilder) - .setListener(eventListener) - .build(); - - // Put values in cache more than it's size and cause evictions from onHeap. - int numOfItems1 = randomIntBetween(onHeapCacheSize + 1, totalSize); - List<String> onHeapKeys = new ArrayList<>(); - List<String> diskTierKeys = new ArrayList<>(); - for (int iter = 0; iter < numOfItems1; iter++) { - String key = UUID.randomUUID().toString(); - LoadAwareCacheLoader<String, String> tieredCacheLoader = getLoadAwareCacheLoader(); - tieredSpilloverCache.computeIfAbsent(key, tieredCacheLoader); - } - long actualDiskCacheSize = tieredSpilloverCache.getOnDiskCache().get().count(); - assertEquals(numOfItems1, eventListener.enumMap.get(CacheStoreType.ON_HEAP).missCount.count()); - assertEquals(0, eventListener.enumMap.get(CacheStoreType.ON_HEAP).hitCount.count()); - assertEquals(actualDiskCacheSize, eventListener.enumMap.get(CacheStoreType.ON_HEAP).evictionsMetric.count()); - - assertEquals( - eventListener.enumMap.get(CacheStoreType.ON_HEAP).evictionsMetric.count(), - eventListener.enumMap.get(CacheStoreType.DISK).cachedCount.count() - ); - assertEquals(actualDiskCacheSize, eventListener.enumMap.get(CacheStoreType.DISK).cachedCount.count()); - - tieredSpilloverCache.getOnHeapCache().keys().forEach(onHeapKeys::add); - tieredSpilloverCache.getOnDiskCache().get().keys().forEach(diskTierKeys::add); - - assertEquals(tieredSpilloverCache.getOnHeapCache().count(), onHeapKeys.size()); - assertEquals(tieredSpilloverCache.getOnDiskCache().get().count(), diskTierKeys.size()); - - // Try to hit cache again with some randomization. - int numOfItems2 = randomIntBetween(50, 200); - int onHeapCacheHit = 0; - int diskCacheHit = 0; - int cacheMiss = 0; - for (int iter = 0; iter < numOfItems2; iter++) { - if (randomBoolean()) { // Hit cache with key stored in onHeap cache. - onHeapCacheHit++; - int index = randomIntBetween(0, onHeapKeys.size() - 1); - LoadAwareCacheLoader<String, String> loadAwareCacheLoader = getLoadAwareCacheLoader(); - tieredSpilloverCache.computeIfAbsent(onHeapKeys.get(index), loadAwareCacheLoader); - assertFalse(loadAwareCacheLoader.isLoaded()); - } else { // Hit cache with key stored in disk cache. - diskCacheHit++; - int index = randomIntBetween(0, diskTierKeys.size() - 1); - LoadAwareCacheLoader<String, String> loadAwareCacheLoader = getLoadAwareCacheLoader(); - tieredSpilloverCache.computeIfAbsent(diskTierKeys.get(index), loadAwareCacheLoader); - assertFalse(loadAwareCacheLoader.isLoaded()); - } - } - for (int iter = 0; iter < randomIntBetween(50, 200); iter++) { - // Hit cache with randomized key which is expected to miss cache always. - LoadAwareCacheLoader<String, String> tieredCacheLoader = getLoadAwareCacheLoader(); - tieredSpilloverCache.computeIfAbsent(UUID.randomUUID().toString(), tieredCacheLoader); - cacheMiss++; - } - // On heap cache misses would also include diskCacheHits as it means it missed onHeap cache. - assertEquals(numOfItems1 + cacheMiss + diskCacheHit, eventListener.enumMap.get(CacheStoreType.ON_HEAP).missCount.count()); - assertEquals(onHeapCacheHit, eventListener.enumMap.get(CacheStoreType.ON_HEAP).hitCount.count()); - assertEquals(cacheMiss + numOfItems1, eventListener.enumMap.get(CacheStoreType.DISK).missCount.count()); - assertEquals(diskCacheHit, eventListener.enumMap.get(CacheStoreType.DISK).hitCount.count()); - } - - public void testComputeIfAbsentWithEvictionsFromBothTier() throws Exception { - int onHeapCacheSize = randomIntBetween(10, 30); - int diskCacheSize = randomIntBetween(onHeapCacheSize + 1, 100); - int totalSize = onHeapCacheSize + diskCacheSize; - - MockCacheEventListener<String, String> eventListener = new MockCacheEventListener<String, String>(); - TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( - onHeapCacheSize, - diskCacheSize, - eventListener, - 0 - ); - - int numOfItems = randomIntBetween(totalSize + 1, totalSize * 3); - for (int iter = 0; iter < numOfItems; iter++) { - LoadAwareCacheLoader<String, String> tieredCacheLoader = getLoadAwareCacheLoader(); - tieredSpilloverCache.computeIfAbsent(UUID.randomUUID().toString(), tieredCacheLoader); - } - assertTrue(eventListener.enumMap.get(CacheStoreType.ON_HEAP).evictionsMetric.count() > 0); - assertTrue(eventListener.enumMap.get(CacheStoreType.DISK).evictionsMetric.count() > 0); - } - - public void testGetAndCount() throws Exception { - int onHeapCacheSize = randomIntBetween(10, 30); - int diskCacheSize = randomIntBetween(onHeapCacheSize + 1, 100); - int totalSize = onHeapCacheSize + diskCacheSize; - - MockCacheEventListener<String, String> eventListener = new MockCacheEventListener<String, String>(); - TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( - onHeapCacheSize, - diskCacheSize, - eventListener, - 0 - ); - - int numOfItems1 = randomIntBetween(onHeapCacheSize + 1, totalSize); - List<String> onHeapKeys = new ArrayList<>(); - List<String> diskTierKeys = new ArrayList<>(); - for (int iter = 0; iter < numOfItems1; iter++) { - String key = UUID.randomUUID().toString(); - if (iter > (onHeapCacheSize - 1)) { - // All these are bound to go to disk based cache. - diskTierKeys.add(key); - } else { - onHeapKeys.add(key); - } - LoadAwareCacheLoader<String, String> loadAwareCacheLoader = getLoadAwareCacheLoader(); - tieredSpilloverCache.computeIfAbsent(key, loadAwareCacheLoader); - } - - for (int iter = 0; iter < numOfItems1; iter++) { - if (randomBoolean()) { - if (randomBoolean()) { - int index = randomIntBetween(0, onHeapKeys.size() - 1); - assertNotNull(tieredSpilloverCache.get(onHeapKeys.get(index))); - } else { - int index = randomIntBetween(0, diskTierKeys.size() - 1); - assertNotNull(tieredSpilloverCache.get(diskTierKeys.get(index))); - } - } else { - assertNull(tieredSpilloverCache.get(UUID.randomUUID().toString())); - } - } - assertEquals(numOfItems1, tieredSpilloverCache.count()); - } - - public void testWithDiskTierNull() throws Exception { - int onHeapCacheSize = randomIntBetween(10, 30); - MockCacheEventListener<String, String> eventListener = new MockCacheEventListener<String, String>(); - - StoreAwareCacheBuilder<String, String> onHeapCacheBuilder = new OpenSearchOnHeapCache.Builder<String, String>() - .setMaximumWeightInBytes(onHeapCacheSize * 20) - .setWeigher((k, v) -> 20); // Will support upto onHeapCacheSize entries - TieredSpilloverCache<String, String> tieredSpilloverCache = new TieredSpilloverCache.Builder<String, String>() - .setOnHeapCacheBuilder(onHeapCacheBuilder) - .setListener(eventListener) - .build(); - - int numOfItems = randomIntBetween(onHeapCacheSize + 1, onHeapCacheSize * 3); - for (int iter = 0; iter < numOfItems; iter++) { - LoadAwareCacheLoader<String, String> loadAwareCacheLoader = getLoadAwareCacheLoader(); - tieredSpilloverCache.computeIfAbsent(UUID.randomUUID().toString(), loadAwareCacheLoader); - } - assertTrue(eventListener.enumMap.get(CacheStoreType.ON_HEAP).evictionsMetric.count() > 0); - assertEquals(0, eventListener.enumMap.get(CacheStoreType.DISK).cachedCount.count()); - assertEquals(0, eventListener.enumMap.get(CacheStoreType.DISK).evictionsMetric.count()); - assertEquals(0, eventListener.enumMap.get(CacheStoreType.DISK).missCount.count()); - } - - public void testPut() { - int onHeapCacheSize = randomIntBetween(10, 30); - int diskCacheSize = randomIntBetween(onHeapCacheSize + 1, 100); - - MockCacheEventListener<String, String> eventListener = new MockCacheEventListener<>(); - TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( - onHeapCacheSize, - diskCacheSize, - eventListener, - 0 - ); - String key = UUID.randomUUID().toString(); - String value = UUID.randomUUID().toString(); - tieredSpilloverCache.put(key, value); - assertEquals(1, eventListener.enumMap.get(CacheStoreType.ON_HEAP).cachedCount.count()); - assertEquals(1, tieredSpilloverCache.count()); - } - - public void testPutAndVerifyNewItemsArePresentOnHeapCache() throws Exception { - int onHeapCacheSize = randomIntBetween(200, 400); - int diskCacheSize = randomIntBetween(450, 800); - - MockCacheEventListener<String, String> eventListener = new MockCacheEventListener<>(); - - TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( - onHeapCacheSize, - diskCacheSize, - eventListener, - 0 - ); - - for (int i = 0; i < onHeapCacheSize; i++) { - tieredSpilloverCache.computeIfAbsent(UUID.randomUUID().toString(), new LoadAwareCacheLoader<>() { - @Override - public boolean isLoaded() { - return false; - } - - @Override - public String load(String key) throws Exception { - return UUID.randomUUID().toString(); - } - }); - } - - assertEquals(onHeapCacheSize, tieredSpilloverCache.getOnHeapCache().count()); - assertEquals(0, tieredSpilloverCache.getOnDiskCache().get().count()); - - // Again try to put OnHeap cache capacity amount of new items. - List<String> newKeyList = new ArrayList<>(); - for (int i = 0; i < onHeapCacheSize; i++) { - newKeyList.add(UUID.randomUUID().toString()); - } - - for (int i = 0; i < newKeyList.size(); i++) { - tieredSpilloverCache.computeIfAbsent(newKeyList.get(i), new LoadAwareCacheLoader<>() { - @Override - public boolean isLoaded() { - return false; - } - - @Override - public String load(String key) { - return UUID.randomUUID().toString(); - } - }); - } - - // Verify that new items are part of onHeap cache. - List<String> actualOnHeapCacheKeys = new ArrayList<>(); - tieredSpilloverCache.getOnHeapCache().keys().forEach(actualOnHeapCacheKeys::add); - - assertEquals(newKeyList.size(), actualOnHeapCacheKeys.size()); - for (int i = 0; i < actualOnHeapCacheKeys.size(); i++) { - assertTrue(newKeyList.contains(actualOnHeapCacheKeys.get(i))); - } - - assertEquals(onHeapCacheSize, tieredSpilloverCache.getOnHeapCache().count()); - assertEquals(onHeapCacheSize, tieredSpilloverCache.getOnDiskCache().get().count()); - } - - public void testInvalidate() { - int onHeapCacheSize = 1; - int diskCacheSize = 10; - - MockCacheEventListener<String, String> eventListener = new MockCacheEventListener<>(); - TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( - onHeapCacheSize, - diskCacheSize, - eventListener, - 0 - ); - String key = UUID.randomUUID().toString(); - String value = UUID.randomUUID().toString(); - // First try to invalidate without the key present in cache. - tieredSpilloverCache.invalidate(key); - assertEquals(0, eventListener.enumMap.get(CacheStoreType.ON_HEAP).invalidationMetric.count()); - - // Now try to invalidate with the key present in onHeap cache. - tieredSpilloverCache.put(key, value); - tieredSpilloverCache.invalidate(key); - assertEquals(1, eventListener.enumMap.get(CacheStoreType.ON_HEAP).invalidationMetric.count()); - assertEquals(0, tieredSpilloverCache.count()); - - tieredSpilloverCache.put(key, value); - // Put another key/value so that one of the item is evicted to disk cache. - String key2 = UUID.randomUUID().toString(); - tieredSpilloverCache.put(key2, UUID.randomUUID().toString()); - assertEquals(2, tieredSpilloverCache.count()); - // Again invalidate older key - tieredSpilloverCache.invalidate(key); - assertEquals(1, eventListener.enumMap.get(CacheStoreType.DISK).invalidationMetric.count()); - assertEquals(1, tieredSpilloverCache.count()); - } - - public void testCacheKeys() throws Exception { - int onHeapCacheSize = randomIntBetween(10, 30); - int diskCacheSize = randomIntBetween(60, 100); - int totalSize = onHeapCacheSize + diskCacheSize; - - MockCacheEventListener<String, String> eventListener = new MockCacheEventListener<>(); - TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( - onHeapCacheSize, - diskCacheSize, - eventListener, - 0 - ); - List<String> onHeapKeys = new ArrayList<>(); - List<String> diskTierKeys = new ArrayList<>(); - // During first round add onHeapCacheSize entries. Will go to onHeap cache initially. - for (int i = 0; i < onHeapCacheSize; i++) { - String key = UUID.randomUUID().toString(); - diskTierKeys.add(key); - tieredSpilloverCache.computeIfAbsent(key, getLoadAwareCacheLoader()); - } - // In another round, add another onHeapCacheSize entries. These will go to onHeap and above ones will be - // evicted to onDisk cache. - for (int i = 0; i < onHeapCacheSize; i++) { - String key = UUID.randomUUID().toString(); - onHeapKeys.add(key); - tieredSpilloverCache.computeIfAbsent(key, getLoadAwareCacheLoader()); - } - - List<String> actualOnHeapKeys = new ArrayList<>(); - List<String> actualOnDiskKeys = new ArrayList<>(); - Iterable<String> onHeapiterable = tieredSpilloverCache.getOnHeapCache().keys(); - Iterable<String> onDiskiterable = tieredSpilloverCache.getOnDiskCache().get().keys(); - onHeapiterable.iterator().forEachRemaining(actualOnHeapKeys::add); - onDiskiterable.iterator().forEachRemaining(actualOnDiskKeys::add); - for (String onHeapKey : onHeapKeys) { - assertTrue(actualOnHeapKeys.contains(onHeapKey)); - } - for (String onDiskKey : actualOnDiskKeys) { - assertTrue(actualOnDiskKeys.contains(onDiskKey)); - } - - // Testing keys() which returns all keys. - List<String> actualMergedKeys = new ArrayList<>(); - List<String> expectedMergedKeys = new ArrayList<>(); - expectedMergedKeys.addAll(onHeapKeys); - expectedMergedKeys.addAll(diskTierKeys); - - Iterable<String> mergedIterable = tieredSpilloverCache.keys(); - mergedIterable.iterator().forEachRemaining(actualMergedKeys::add); - - assertEquals(expectedMergedKeys.size(), actualMergedKeys.size()); - for (String key : expectedMergedKeys) { - assertTrue(actualMergedKeys.contains(key)); - } - } - - public void testRefresh() { - int onHeapCacheSize = randomIntBetween(10, 30); - int diskCacheSize = randomIntBetween(60, 100); - - MockCacheEventListener<String, String> eventListener = new MockCacheEventListener<>(); - TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( - onHeapCacheSize, - diskCacheSize, - eventListener, - 0 - ); - tieredSpilloverCache.refresh(); - } - - public void testInvalidateAll() throws Exception { - int onHeapCacheSize = randomIntBetween(10, 30); - int diskCacheSize = randomIntBetween(60, 100); - int totalSize = onHeapCacheSize + diskCacheSize; - - MockCacheEventListener<String, String> eventListener = new MockCacheEventListener<>(); - TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( - onHeapCacheSize, - diskCacheSize, - eventListener, - 0 - ); - // Put values in cache more than it's size and cause evictions from onHeap. - int numOfItems1 = randomIntBetween(onHeapCacheSize + 1, totalSize); - List<String> onHeapKeys = new ArrayList<>(); - List<String> diskTierKeys = new ArrayList<>(); - for (int iter = 0; iter < numOfItems1; iter++) { - String key = UUID.randomUUID().toString(); - if (iter > (onHeapCacheSize - 1)) { - // All these are bound to go to disk based cache. - diskTierKeys.add(key); - } else { - onHeapKeys.add(key); - } - LoadAwareCacheLoader<String, String> tieredCacheLoader = getLoadAwareCacheLoader(); - tieredSpilloverCache.computeIfAbsent(key, tieredCacheLoader); - } - assertEquals(numOfItems1, tieredSpilloverCache.count()); - tieredSpilloverCache.invalidateAll(); - assertEquals(0, tieredSpilloverCache.count()); - } - - public void testComputeIfAbsentConcurrently() throws Exception { - int onHeapCacheSize = randomIntBetween(100, 300); - int diskCacheSize = randomIntBetween(200, 400); - - MockCacheEventListener<String, String> eventListener = new MockCacheEventListener<>(); - - TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( - onHeapCacheSize, - diskCacheSize, - eventListener, - 0 - ); - - int numberOfSameKeys = randomIntBetween(10, onHeapCacheSize - 1); - String key = UUID.randomUUID().toString(); - String value = UUID.randomUUID().toString(); - - Thread[] threads = new Thread[numberOfSameKeys]; - Phaser phaser = new Phaser(numberOfSameKeys + 1); - CountDownLatch countDownLatch = new CountDownLatch(numberOfSameKeys); // To wait for all threads to finish. - - List<LoadAwareCacheLoader<String, String>> loadAwareCacheLoaderList = new CopyOnWriteArrayList<>(); - - for (int i = 0; i < numberOfSameKeys; i++) { - threads[i] = new Thread(() -> { - try { - LoadAwareCacheLoader<String, String> loadAwareCacheLoader = new LoadAwareCacheLoader() { - boolean isLoaded = false; - - @Override - public boolean isLoaded() { - return isLoaded; - } - - @Override - public Object load(Object key) throws Exception { - isLoaded = true; - return value; - } - }; - loadAwareCacheLoaderList.add(loadAwareCacheLoader); - phaser.arriveAndAwaitAdvance(); - tieredSpilloverCache.computeIfAbsent(key, loadAwareCacheLoader); - } catch (Exception e) { - throw new RuntimeException(e); - } - countDownLatch.countDown(); - }); - threads[i].start(); - } - phaser.arriveAndAwaitAdvance(); - countDownLatch.await(); // Wait for rest of tasks to be cancelled. - int numberOfTimesKeyLoaded = 0; - assertEquals(numberOfSameKeys, loadAwareCacheLoaderList.size()); - for (int i = 0; i < loadAwareCacheLoaderList.size(); i++) { - LoadAwareCacheLoader<String, String> loader = loadAwareCacheLoaderList.get(i); - if (loader.isLoaded()) { - numberOfTimesKeyLoaded++; - } - } - assertEquals(1, numberOfTimesKeyLoaded); // It should be loaded only once. - } - - public void testConcurrencyForEvictionFlow() throws Exception { - int diskCacheSize = randomIntBetween(450, 800); - - MockCacheEventListener<String, String> eventListener = new MockCacheEventListener<>(); - - StoreAwareCacheBuilder<String, String> cacheBuilder = new OpenSearchOnHeapCache.Builder<String, String>().setMaximumWeightInBytes( - 200 - ).setWeigher((k, v) -> 150); - - StoreAwareCacheBuilder<String, String> diskCacheBuilder = new MockOnDiskCache.Builder<String, String>().setMaxSize(diskCacheSize) - .setDeliberateDelay(500); - - TieredSpilloverCache<String, String> tieredSpilloverCache = new TieredSpilloverCache.Builder<String, String>() - .setOnHeapCacheBuilder(cacheBuilder) - .setOnDiskCacheBuilder(diskCacheBuilder) - .setListener(eventListener) - .build(); - - String keyToBeEvicted = "key1"; - String secondKey = "key2"; - - // Put first key on tiered cache. Will go into onHeap cache. - tieredSpilloverCache.computeIfAbsent(keyToBeEvicted, new LoadAwareCacheLoader<>() { - @Override - public boolean isLoaded() { - return false; - } - - @Override - public String load(String key) { - return UUID.randomUUID().toString(); - } - }); - CountDownLatch countDownLatch = new CountDownLatch(1); - CountDownLatch countDownLatch1 = new CountDownLatch(1); - // Put second key on tiered cache. Will cause eviction of first key from onHeap cache and should go into - // disk cache. - LoadAwareCacheLoader<String, String> loadAwareCacheLoader = getLoadAwareCacheLoader(); - Thread thread = new Thread(() -> { - try { - tieredSpilloverCache.computeIfAbsent(secondKey, loadAwareCacheLoader); - countDownLatch1.countDown(); - } catch (Exception e) { - throw new RuntimeException(e); - } - }); - thread.start(); - assertBusy(() -> { assertTrue(loadAwareCacheLoader.isLoaded()); }, 100, TimeUnit.MILLISECONDS); // We wait for new key to be loaded - // after which it eviction flow is - // guaranteed to occur. - StoreAwareCache<String, String> onDiskCache = tieredSpilloverCache.getOnDiskCache().get(); - - // Now on a different thread, try to get key(above one which got evicted) from tiered cache. We expect this - // should return not null value as it should be present on diskCache. - AtomicReference<String> actualValue = new AtomicReference<>(); - Thread thread1 = new Thread(() -> { - try { - actualValue.set(tieredSpilloverCache.get(keyToBeEvicted)); - } catch (Exception e) { - throw new RuntimeException(e); - } - countDownLatch.countDown(); - }); - thread1.start(); - countDownLatch.await(); - assertNotNull(actualValue.get()); - countDownLatch1.await(); - assertEquals(1, eventListener.enumMap.get(CacheStoreType.ON_HEAP).evictionsMetric.count()); - assertEquals(1, tieredSpilloverCache.getOnHeapCache().count()); - assertEquals(1, onDiskCache.count()); - assertNotNull(onDiskCache.get(keyToBeEvicted)); - } - - class MockCacheEventListener<K, V> implements StoreAwareCacheEventListener<K, V> { - - EnumMap<CacheStoreType, TestStatsHolder> enumMap = new EnumMap<>(CacheStoreType.class); - - MockCacheEventListener() { - for (CacheStoreType cacheStoreType : CacheStoreType.values()) { - enumMap.put(cacheStoreType, new TestStatsHolder()); - } - } - - @Override - public void onMiss(K key, CacheStoreType cacheStoreType) { - enumMap.get(cacheStoreType).missCount.inc(); - } - - @Override - public void onRemoval(StoreAwareCacheRemovalNotification<K, V> notification) { - if (notification.getRemovalReason().equals(RemovalReason.EVICTED)) { - enumMap.get(notification.getCacheStoreType()).evictionsMetric.inc(); - } else if (notification.getRemovalReason().equals(RemovalReason.INVALIDATED)) { - enumMap.get(notification.getCacheStoreType()).invalidationMetric.inc(); - } - } - - @Override - public void onHit(K key, V value, CacheStoreType cacheStoreType) { - enumMap.get(cacheStoreType).hitCount.inc(); - } - - @Override - public void onCached(K key, V value, CacheStoreType cacheStoreType) { - enumMap.get(cacheStoreType).cachedCount.inc(); - } - - class TestStatsHolder { - final CounterMetric evictionsMetric = new CounterMetric(); - final CounterMetric hitCount = new CounterMetric(); - final CounterMetric missCount = new CounterMetric(); - final CounterMetric cachedCount = new CounterMetric(); - final CounterMetric invalidationMetric = new CounterMetric(); - } - } - - private LoadAwareCacheLoader<String, String> getLoadAwareCacheLoader() { - return new LoadAwareCacheLoader<String, String>() { - boolean isLoaded = false; - - @Override - public String load(String key) { - isLoaded = true; - return UUID.randomUUID().toString(); - } - - @Override - public boolean isLoaded() { - return isLoaded; - } - }; - } - - private TieredSpilloverCache<String, String> intializeTieredSpilloverCache( - int onHeapCacheSize, - int diksCacheSize, - StoreAwareCacheEventListener<String, String> eventListener, - long diskDeliberateDelay - ) { - StoreAwareCacheBuilder<String, String> diskCacheBuilder = new MockOnDiskCache.Builder<String, String>().setMaxSize(diksCacheSize) - .setDeliberateDelay(diskDeliberateDelay); - StoreAwareCacheBuilder<String, String> onHeapCacheBuilder = new OpenSearchOnHeapCache.Builder<String, String>() - .setMaximumWeightInBytes(onHeapCacheSize * 20) - .setWeigher((k, v) -> 20); // Will support upto onHeapCacheSize entries - return new TieredSpilloverCache.Builder<String, String>().setOnHeapCacheBuilder(onHeapCacheBuilder) - .setOnDiskCacheBuilder(diskCacheBuilder) - .setListener(eventListener) - .build(); - } -} - -class MockOnDiskCache<K, V> implements StoreAwareCache<K, V> { - - Map<K, V> cache; - int maxSize; - - long delay; - StoreAwareCacheEventListener<K, V> eventListener; - - MockOnDiskCache(int maxSize, StoreAwareCacheEventListener<K, V> eventListener, long delay) { - this.maxSize = maxSize; - this.eventListener = eventListener; - this.delay = delay; - this.cache = new ConcurrentHashMap<K, V>(); - } - - @Override - public V get(K key) { - V value = cache.get(key); - if (value != null) { - eventListener.onHit(key, value, CacheStoreType.DISK); - } else { - eventListener.onMiss(key, CacheStoreType.DISK); - } - return value; - } - - @Override - public void put(K key, V value) { - if (this.cache.size() >= maxSize) { // For simplification - eventListener.onRemoval(new StoreAwareCacheRemovalNotification<>(key, value, RemovalReason.EVICTED, CacheStoreType.DISK)); - return; - } - try { - Thread.sleep(delay); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - this.cache.put(key, value); - eventListener.onCached(key, value, CacheStoreType.DISK); - } - - @Override - public V computeIfAbsent(K key, LoadAwareCacheLoader<K, V> loader) throws Exception { - V value = cache.computeIfAbsent(key, key1 -> { - try { - return loader.load(key); - } catch (Exception e) { - throw new RuntimeException(e); - } - }); - if (!loader.isLoaded()) { - eventListener.onHit(key, value, CacheStoreType.DISK); - } else { - eventListener.onMiss(key, CacheStoreType.DISK); - eventListener.onCached(key, value, CacheStoreType.DISK); - } - return value; - } - - @Override - public void invalidate(K key) { - if (this.cache.containsKey(key)) { - eventListener.onRemoval(new StoreAwareCacheRemovalNotification<>(key, null, RemovalReason.INVALIDATED, CacheStoreType.DISK)); - } - this.cache.remove(key); - } - - @Override - public void invalidateAll() { - this.cache.clear(); - } - - @Override - public Iterable<K> keys() { - return this.cache.keySet(); - } - - @Override - public long count() { - return this.cache.size(); - } - - @Override - public void refresh() {} - - @Override - public CacheStoreType getTierType() { - return CacheStoreType.DISK; - } - - public static class Builder<K, V> extends StoreAwareCacheBuilder<K, V> { - - int maxSize; - long delay; - - @Override - public StoreAwareCache<K, V> build() { - return new MockOnDiskCache<K, V>(maxSize, this.getEventListener(), delay); - } - - public Builder<K, V> setMaxSize(int maxSize) { - this.maxSize = maxSize; - return this; - } - - public Builder<K, V> setDeliberateDelay(long millis) { - this.delay = millis; - return this; - } - } -} diff --git a/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java b/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java index e8ddfde11f4cc..4fd8986d0b428 100644 --- a/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java +++ b/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java @@ -779,7 +779,9 @@ public void testSamePrinterOutput() { DateTime jodaDate = new DateTime(year, month, day, hour, minute, second, DateTimeZone.UTC); for (FormatNames format : FormatNames.values()) { - if (format == FormatNames.ISO8601 || format == FormatNames.STRICT_DATE_OPTIONAL_TIME_NANOS) { + if (format == FormatNames.ISO8601 + || format == FormatNames.STRICT_DATE_OPTIONAL_TIME_NANOS + || format == FormatNames.RFC3339_LENIENT) { // Nanos aren't supported by joda continue; } diff --git a/server/src/test/java/org/opensearch/common/lucene/store/ByteArrayIndexInputTests.java b/server/src/test/java/org/opensearch/common/lucene/store/ByteArrayIndexInputTests.java index 827f9dd992294..ee71cfef7d925 100644 --- a/server/src/test/java/org/opensearch/common/lucene/store/ByteArrayIndexInputTests.java +++ b/server/src/test/java/org/opensearch/common/lucene/store/ByteArrayIndexInputTests.java @@ -32,6 +32,8 @@ package org.opensearch.common.lucene.store; +import org.apache.lucene.store.IndexInput; + import java.io.EOFException; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -153,4 +155,34 @@ public void testRandomAccessReads() throws IOException { // 10001001 00100101 10001001 00110000 11100111 00100100 10110001 00101110 assertEquals(-8564288273245753042L, indexInput.readLong(1)); } + + public void testReadBytesWithSlice() throws IOException { + int inputLength = randomIntBetween(100, 1000); + + byte[] input = randomUnicodeOfLength(inputLength).getBytes(StandardCharsets.UTF_8); + ByteArrayIndexInput indexInput = new ByteArrayIndexInput("test", input); + + int sliceOffset = randomIntBetween(1, inputLength - 10); + int sliceLength = randomIntBetween(2, inputLength - sliceOffset); + IndexInput slice = indexInput.slice("slice", sliceOffset, sliceLength); + + // read a byte from sliced index input and verify if the read value is correct + assertEquals(input[sliceOffset], slice.readByte()); + + // read few more bytes into a byte array + int bytesToRead = randomIntBetween(1, sliceLength - 1); + slice.readBytes(new byte[bytesToRead], 0, bytesToRead); + + // now try to read beyond the boundary of the slice, but within the + // boundary of the original IndexInput. We've already read few bytes + // so this is expected to fail + assertThrows(EOFException.class, () -> slice.readBytes(new byte[sliceLength], 0, sliceLength)); + + // seek to EOF and then try to read + slice.seek(sliceLength); + assertThrows(EOFException.class, () -> slice.readBytes(new byte[1], 0, 1)); + + slice.close(); + indexInput.close(); + } } diff --git a/server/src/test/java/org/opensearch/common/path/PathTrieTests.java b/server/src/test/java/org/opensearch/common/path/PathTrieTests.java index e366972feeaf2..2f0618ee299b4 100644 --- a/server/src/test/java/org/opensearch/common/path/PathTrieTests.java +++ b/server/src/test/java/org/opensearch/common/path/PathTrieTests.java @@ -36,8 +36,10 @@ import org.opensearch.rest.RestUtils; import org.opensearch.test.OpenSearchTestCase; +import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; +import java.util.List; import java.util.Map; import static org.hamcrest.Matchers.equalTo; @@ -286,4 +288,33 @@ public void testEscapedSlashWithinUrl() { assertThat(params.get("type"), equalTo("type")); assertThat(params.get("id"), equalTo("id")); } + + public void testRetrieveAllEmpty() { + PathTrie<String> trie = new PathTrie<>(NO_DECODER); + Iterator<String> allPaths = trie.retrieveAll(); + assertFalse(allPaths.hasNext()); + } + + public void testRetrieveAll() { + PathTrie<String> trie = new PathTrie<>(NO_DECODER); + trie.insert("{testA}", "test1"); + trie.insert("{testA}/{testB}", "test2"); + trie.insert("a/{testB}", "test3"); + trie.insert("{testA}/b", "test4"); + trie.insert("{testA}/b/c", "test5"); + + Iterator<String> iterator = trie.retrieveAll(); + assertTrue(iterator.hasNext()); + List<String> paths = new ArrayList<>(); + iterator.forEachRemaining(paths::add); + assertEquals(paths, List.of("test1", "test4", "test5", "test2", "test3")); + assertFalse(iterator.hasNext()); + } + + public void testRetrieveAllWithNllValue() { + PathTrie<String> trie = new PathTrie<>(NO_DECODER); + trie.insert("{testA}", null); + Iterator<String> iterator = trie.retrieveAll(); + assertFalse(iterator.hasNext()); + } } diff --git a/server/src/test/java/org/opensearch/common/settings/SettingTests.java b/server/src/test/java/org/opensearch/common/settings/SettingTests.java index 13cecc7157d82..c6da96b521276 100644 --- a/server/src/test/java/org/opensearch/common/settings/SettingTests.java +++ b/server/src/test/java/org/opensearch/common/settings/SettingTests.java @@ -909,6 +909,18 @@ public void testDynamicKeySetting() { } } + public void testAffixKeySettingWithDynamicPrefix() { + Setting.AffixSetting<Boolean> setting = Setting.suffixKeySetting( + "enable", + (key) -> Setting.boolSetting(key, false, Property.NodeScope) + ); + Setting<Boolean> concreteSetting = setting.getConcreteSettingForNamespace("foo.bar"); + assertEquals("foo.bar.enable", concreteSetting.getKey()); + + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> setting.getConcreteSettingForNamespace("foo.")); + assertEquals("key [foo..enable] must match [*.enable] but didn't.", ex.getMessage()); + } + public void testAffixKeySetting() { Setting<Boolean> setting = Setting.affixKeySetting("foo.", "enable", (key) -> Setting.boolSetting(key, false, Property.NodeScope)); assertTrue(setting.hasComplexMatcher()); diff --git a/server/src/test/java/org/opensearch/common/settings/SettingsModuleTests.java b/server/src/test/java/org/opensearch/common/settings/SettingsModuleTests.java index 0becb6cde5e64..66c9801d16598 100644 --- a/server/src/test/java/org/opensearch/common/settings/SettingsModuleTests.java +++ b/server/src/test/java/org/opensearch/common/settings/SettingsModuleTests.java @@ -286,24 +286,9 @@ public void testDynamicIndexSettingsRegistration() { } public void testConcurrentSegmentSearchClusterSettings() { - // Test that we throw an exception without the feature flag - Settings settings = Settings.builder().put(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build(); - SettingsException ex = expectThrows(SettingsException.class, () -> new SettingsModule(settings)); - assertEquals( - "unknown setting [" - + SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey() - + "] please check that any required plugins are installed, or check the breaking " - + "changes documentation for removed settings", - ex.getMessage() - ); - - // Test that the settings updates correctly with the feature flag - FeatureFlagSetter.set(FeatureFlags.CONCURRENT_SEGMENT_SEARCH); boolean settingValue = randomBoolean(); - Settings settingsWithFeatureFlag = Settings.builder() - .put(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), settingValue) - .build(); - SettingsModule settingsModule = new SettingsModule(settingsWithFeatureFlag); + Settings settings = Settings.builder().put(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), settingValue).build(); + SettingsModule settingsModule = new SettingsModule(settings); assertEquals(settingValue, SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(settingsModule.getSettings())); } @@ -311,24 +296,9 @@ public void testConcurrentSegmentSearchIndexSettings() { Settings.Builder target = Settings.builder().put(Settings.EMPTY); Settings.Builder update = Settings.builder(); - // Test that we throw an exception without the feature flag SettingsModule module = new SettingsModule(Settings.EMPTY); IndexScopedSettings indexScopedSettings = module.getIndexScopedSettings(); - expectThrows( - SettingsException.class, - () -> indexScopedSettings.updateDynamicSettings( - Settings.builder().put(IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build(), - target, - update, - "node" - ) - ); - - // Test that the settings updates correctly with the feature flag - FeatureFlagSetter.set(FeatureFlags.CONCURRENT_SEGMENT_SEARCH); - SettingsModule moduleWithFeatureFlag = new SettingsModule(Settings.EMPTY); - IndexScopedSettings indexScopedSettingsWithFeatureFlag = moduleWithFeatureFlag.getIndexScopedSettings(); - indexScopedSettingsWithFeatureFlag.updateDynamicSettings( + indexScopedSettings.updateDynamicSettings( Settings.builder().put(IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build(), target, update, @@ -337,23 +307,11 @@ public void testConcurrentSegmentSearchIndexSettings() { } public void testMaxSliceCountClusterSettingsForConcurrentSearch() { - // Test that we throw an exception without the feature flag - Settings settings = Settings.builder() - .put(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.getKey(), 2) - .build(); - SettingsException ex = expectThrows(SettingsException.class, () -> new SettingsModule(settings)); - assertTrue( - ex.getMessage() - .contains("unknown setting [" + SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.getKey() + "]") - ); - - // Test that the settings updates correctly with the feature flag - FeatureFlagSetter.set(FeatureFlags.CONCURRENT_SEGMENT_SEARCH); int settingValue = randomIntBetween(0, 10); - Settings settingsWithFeatureFlag = Settings.builder() + Settings settings = Settings.builder() .put(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.getKey(), settingValue) .build(); - SettingsModule settingsModule = new SettingsModule(settingsWithFeatureFlag); + SettingsModule settingsModule = new SettingsModule(settings); assertEquals( settingValue, (int) SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.get(settingsModule.getSettings()) @@ -361,10 +319,10 @@ public void testMaxSliceCountClusterSettingsForConcurrentSearch() { // Test that negative value is not allowed settingValue = -1; - final Settings settingsWithFeatureFlag_2 = Settings.builder() + final Settings settings_2 = Settings.builder() .put(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.getKey(), settingValue) .build(); - IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> new SettingsModule(settingsWithFeatureFlag_2)); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> new SettingsModule(settings_2)); assertTrue(iae.getMessage().contains(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.getKey())); } } diff --git a/server/src/test/java/org/opensearch/common/time/DateFormattersTests.java b/server/src/test/java/org/opensearch/common/time/DateFormattersTests.java index 681daf1755890..2ccee197686ec 100644 --- a/server/src/test/java/org/opensearch/common/time/DateFormattersTests.java +++ b/server/src/test/java/org/opensearch/common/time/DateFormattersTests.java @@ -211,6 +211,10 @@ public void testEpochMillisParser() { assertThat(formatter.format(instant), is("-0.12345")); assertThat(Instant.from(formatter.parse(formatter.format(instant))), is(instant)); } + { + Instant instant = Instant.ofEpochMilli(Long.MIN_VALUE); + assertThat(formatter.format(instant), is("-" + Long.MAX_VALUE)); // We actually truncate to Long.MAX_VALUE to avoid overflow + } } public void testInvalidEpochMilliParser() { @@ -249,10 +253,19 @@ public void testEpochSecondParserWithFraction() { } public void testEpochMilliParsersWithDifferentFormatters() { - DateFormatter formatter = DateFormatter.forPattern("strict_date_optional_time||epoch_millis"); - TemporalAccessor accessor = formatter.parse("123"); - assertThat(DateFormatters.from(accessor).toInstant().toEpochMilli(), is(123L)); - assertThat(formatter.pattern(), is("strict_date_optional_time||epoch_millis")); + { + DateFormatter formatter = DateFormatter.forPattern("strict_date_optional_time||epoch_millis"); + TemporalAccessor accessor = formatter.parse("123"); + assertThat(DateFormatters.from(accessor).toInstant().toEpochMilli(), is(123L)); + assertThat(formatter.pattern(), is("strict_date_optional_time||epoch_millis")); + } + + { + DateFormatter formatter = DateFormatter.forPattern("rfc3339_lenient||epoch_millis"); + TemporalAccessor accessor = formatter.parse("123"); + assertThat(DateFormatters.from(accessor).toInstant().toEpochMilli(), is(123L)); + assertThat(formatter.pattern(), is("rfc3339_lenient||epoch_millis")); + } } public void testParsersWithMultipleInternalFormats() throws Exception { @@ -317,6 +330,11 @@ public void testEqualsAndHashcode() { assertThat(epochMillisFormatter.hashCode(), is(DateFormatters.forPattern("epoch_millis").hashCode())); assertThat(epochMillisFormatter, sameInstance(DateFormatters.forPattern("epoch_millis"))); assertThat(epochMillisFormatter, equalTo(DateFormatters.forPattern("epoch_millis"))); + + DateFormatter rfc339Formatter = DateFormatters.forPattern("rfc3339_lenient"); + assertThat(rfc339Formatter.hashCode(), is(DateFormatters.forPattern("rfc3339_lenient").hashCode())); + assertThat(rfc339Formatter, sameInstance(DateFormatters.forPattern("rfc3339_lenient"))); + assertThat(rfc339Formatter, equalTo(DateFormatters.forPattern("rfc3339_lenient"))); } public void testSupportBackwardsJava8Format() { @@ -461,6 +479,132 @@ public void testIso8601Parsing() { formatter.format(formatter.parse("2018-05-15T17:14:56,123456789+01:00")); } + public void testRFC3339Parsing() { + DateFormatter formatter = DateFormatters.forPattern("rfc3339_lenient"); + + // timezone not allowed with just date + formatter.format(formatter.parse("2018")); + formatter.format(formatter.parse("2018-05")); + formatter.format(formatter.parse("2018-05-15")); + + formatter.format(formatter.parse("2018-05-15T17:14Z")); + formatter.format(formatter.parse("2018-05-15T17:14z")); + formatter.format(formatter.parse("2018-05-15T17:14+01:00")); + formatter.format(formatter.parse("2018-05-15T17:14-01:00")); + + formatter.format(formatter.parse("2018-05-15T17:14:56Z")); + formatter.format(formatter.parse("2018-05-15T17:14:56z")); + formatter.format(formatter.parse("2018-05-15T17:14:56+01:00")); + formatter.format(formatter.parse("2018-05-15T17:14:56-01:00")); + + // milliseconds can be separated using comma or decimal point + formatter.format(formatter.parse("2018-05-15T17:14:56.123Z")); + formatter.format(formatter.parse("2018-05-15T17:14:56.123z")); + formatter.format(formatter.parse("2018-05-15T17:14:56.123-01:00")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123Z")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123z")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123+01:00")); + + // microseconds can be separated using comma or decimal point + formatter.format(formatter.parse("2018-05-15T17:14:56.123456Z")); + formatter.format(formatter.parse("2018-05-15T17:14:56.123456z")); + formatter.format(formatter.parse("2018-05-15T17:14:56.123456+01:00")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123456Z")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123456z")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123456-01:00")); + + // nanoseconds can be separated using comma or decimal point + formatter.format(formatter.parse("2018-05-15T17:14:56.123456789Z")); + formatter.format(formatter.parse("2018-05-15T17:14:56.123456789-01:00")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123456789Z")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123456789z")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123456789+01:00")); + + // 1994-11-05T08:15:30-05:00 corresponds to November 5, 1994, 8:15:30 am, US Eastern Standard Time/ + // 1994-11-05T13:15:30Z corresponds to the same instant. + final Instant instantA = DateFormatters.from(formatter.parse("1994-11-05T08:15:30-05:00")).toInstant(); + final Instant instantB = DateFormatters.from(formatter.parse("1994-11-05T13:15:30Z")).toInstant(); + assertThat(instantA, is(instantB)); + + // Invalid dates should throw an exception + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("abc")); + assertThat(e.getMessage(), is("failed to parse date field [abc] with format [rfc3339_lenient]")); + // Invalid offset + e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("2018-05-15T17:14:56-00:00")); + assertThat(e.getMessage(), is("failed to parse date field [2018-05-15T17:14:56-00:00] with format [rfc3339_lenient]")); + e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("2018-05-15T17:14:56.+00:00")); + assertThat(e.getMessage(), is("failed to parse date field [2018-05-15T17:14:56.+00:00] with format [rfc3339_lenient]")); + e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("2018-05-15T17:14:56_00:00")); + assertThat(e.getMessage(), is("failed to parse date field [2018-05-15T17:14:56_00:00] with format [rfc3339_lenient]")); + // No offset + e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("2018-05-15T17:14:56")); + assertThat(e.getMessage(), is("failed to parse date field [2018-05-15T17:14:56] with format [rfc3339_lenient]")); + e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("2018-05-15T17:14:56.123")); + assertThat(e.getMessage(), is("failed to parse date field [2018-05-15T17:14:56.123] with format [rfc3339_lenient]")); + // No end of fraction + e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("2018-05-15T17:14:56.123")); + assertThat(e.getMessage(), is("failed to parse date field [2018-05-15T17:14:56.123] with format [rfc3339_lenient]")); + // Invalid fraction + e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("2018-05-15T17:14:56.abcZ")); + assertThat(e.getMessage(), is("failed to parse date field [2018-05-15T17:14:56.abcZ] with format [rfc3339_lenient]")); + // Invalid date + e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("201805-15T17:14:56.123456+0000")); + assertThat(e.getMessage(), is("failed to parse date field [201805-15T17:14:56.123456+0000] with format [rfc3339_lenient]")); + // More than 9 digits of nanosecond resolution + e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("2018-05-15T17:14:56.1234567891Z")); + assertThat(e.getMessage(), is("failed to parse date field [2018-05-15T17:14:56.1234567891Z] with format [rfc3339_lenient]")); + } + + public void testRFC3339ParserWithDifferentFormatters() { + { + DateFormatter formatter = DateFormatter.forPattern("strict_date_optional_time||rfc3339_lenient"); + TemporalAccessor accessor = formatter.parse("2018-05-15T17:14:56+0100"); + assertThat(DateFormatters.from(accessor).toInstant().toEpochMilli(), is(1526400896000L)); + assertThat(formatter.pattern(), is("strict_date_optional_time||rfc3339_lenient")); + } + + { + DateFormatter formatter = DateFormatter.forPattern("rfc3339_lenient||strict_date_optional_time"); + TemporalAccessor accessor = formatter.parse("2018-05-15T17:14:56.123+0100"); + assertThat(DateFormatters.from(accessor).toInstant().toEpochMilli(), is(1526400896123L)); + assertThat(formatter.pattern(), is("rfc3339_lenient||strict_date_optional_time")); + } + + { + DateFormatter formatter = DateFormatter.forPattern("rfc3339_lenient||strict_date_optional_time"); + TemporalAccessor accessor = formatter.parse("2018-05-15T17:14:56.123456789+0100"); + assertThat(DateFormatters.from(accessor).toInstant().getNano(), is(123456789)); + assertThat(formatter.pattern(), is("rfc3339_lenient||strict_date_optional_time")); + } + } + + public void testRFC3339ParserAgainstDifferentFormatters() { + DateFormatter rfc3339Formatter = DateFormatter.forPattern("rfc3339_lenient"); + { + DateFormatter isoFormatter = DateFormatter.forPattern("strict_date_optional_time"); + + assertDateTimeEquals("2018-05-15T17:14Z", rfc3339Formatter, isoFormatter); + assertDateTimeEquals("2018-05-15T17:14+01:00", rfc3339Formatter, isoFormatter); + assertDateTimeEquals("2018-05-15T17:14-01:00", rfc3339Formatter, isoFormatter); + + assertDateTimeEquals("2018-05-15T17:14:56Z", rfc3339Formatter, isoFormatter); + assertDateTimeEquals("2018-05-15T17:14:56+01:00", rfc3339Formatter, isoFormatter); + assertDateTimeEquals("2018-05-15T17:14:56-01:00", rfc3339Formatter, isoFormatter); + + assertDateTimeEquals("2018-05-15T17:14:56.123Z", rfc3339Formatter, isoFormatter); + assertDateTimeEquals("2018-05-15T17:14:56.123+01:00", rfc3339Formatter, isoFormatter); + assertDateTimeEquals("2018-05-15T17:14:56.123-01:00", rfc3339Formatter, isoFormatter); + assertDateTimeEquals("2018-05-15T17:14:56,123+01:00", rfc3339Formatter, isoFormatter); + assertDateTimeEquals("2018-05-15T17:14:56,123-01:00", rfc3339Formatter, isoFormatter); + + assertDateTimeEquals("2018-05-15T17:14:56.123456Z", rfc3339Formatter, isoFormatter); + assertDateTimeEquals("2018-05-15T17:14:56.123456789+01:00", rfc3339Formatter, isoFormatter); + assertDateTimeEquals("2018-05-15T17:14:56.123456789-01:00", rfc3339Formatter, isoFormatter); + assertDateTimeEquals("2018-05-15T17:14:56,123456789+01:00", rfc3339Formatter, isoFormatter); + assertDateTimeEquals("2018-05-15T17:14:56,123456789-01:00", rfc3339Formatter, isoFormatter); + } + } + public void testRoundupFormatterWithEpochDates() { assertRoundupFormatter("epoch_millis", "1234567890", 1234567890L); // also check nanos of the epoch_millis formatter if it is rounded up to the nano second @@ -683,4 +827,10 @@ public void testCamelCaseDeprecation() { } } } + + void assertDateTimeEquals(String toTest, DateFormatter candidateParser, DateFormatter baselineParser) { + Instant gotInstant = DateFormatters.from(candidateParser.parse(toTest)).toInstant(); + Instant expectedInstant = DateFormatters.from(baselineParser.parse(toTest)).toInstant(); + assertThat(gotInstant, is(expectedInstant)); + } } diff --git a/server/src/test/java/org/opensearch/common/util/concurrent/OpenSearchExecutorsTests.java b/server/src/test/java/org/opensearch/common/util/concurrent/OpenSearchExecutorsTests.java index 2063cd26a9e8e..4823ce7a238e3 100644 --- a/server/src/test/java/org/opensearch/common/util/concurrent/OpenSearchExecutorsTests.java +++ b/server/src/test/java/org/opensearch/common/util/concurrent/OpenSearchExecutorsTests.java @@ -49,6 +49,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.lessThanOrEqualTo; /** * Tests for OpenSearchExecutors and its components like OpenSearchAbortPolicy. @@ -279,6 +280,41 @@ public void testScaleDown() throws Exception { terminate(pool); } + /** + * The test case is adapted from https://bugs.openjdk.org/browse/JDK-8323659 reproducer. + */ + public void testScaleUpWithSpawningTask() throws Exception { + ThreadPoolExecutor pool = OpenSearchExecutors.newScaling( + getClass().getName() + "/" + getTestName(), + 0, + 1, + between(1, 100), + randomTimeUnit(), + OpenSearchExecutors.daemonThreadFactory("test"), + threadContext + ); + assertThat("Min property", pool.getCorePoolSize(), equalTo(0)); + assertThat("Max property", pool.getMaximumPoolSize(), equalTo(1)); + + final CountDownLatch latch = new CountDownLatch(10); + class TestTask implements Runnable { + @Override + public void run() { + latch.countDown(); + if (latch.getCount() > 0) { + pool.execute(TestTask.this); + } + } + } + pool.execute(new TestTask()); + latch.await(); + + assertThat("wrong pool size", pool.getPoolSize(), lessThanOrEqualTo(1)); + assertThat("wrong active size", pool.getActiveCount(), lessThanOrEqualTo(1)); + + terminate(pool); + } + public void testRejectionMessageAndShuttingDownFlag() throws InterruptedException { int pool = between(1, 10); int queue = between(0, 100); diff --git a/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java b/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java index 7f669934579ee..962eb743dca6e 100644 --- a/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java +++ b/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java @@ -359,6 +359,57 @@ protected void doRun() throws Exception { env.close(); } + public void testIndexStoreListener() throws Exception { + final AtomicInteger shardCounter = new AtomicInteger(0); + final AtomicInteger indexCounter = new AtomicInteger(0); + final Index index = new Index("foo", "fooUUID"); + final ShardId shardId = new ShardId(index, 0); + final NodeEnvironment.IndexStoreListener listener = new NodeEnvironment.IndexStoreListener() { + @Override + public void beforeShardPathDeleted(ShardId inShardId, IndexSettings indexSettings, NodeEnvironment env) { + assertEquals(shardId, inShardId); + shardCounter.incrementAndGet(); + } + + @Override + public void beforeIndexPathDeleted(Index inIndex, IndexSettings indexSettings, NodeEnvironment env) { + assertEquals(index, inIndex); + indexCounter.incrementAndGet(); + } + }; + final NodeEnvironment env = newNodeEnvironment(listener); + + for (Path path : env.indexPaths(index)) { + Files.createDirectories(path.resolve("0")); + } + + for (Path path : env.indexPaths(index)) { + assertTrue(Files.exists(path.resolve("0"))); + } + assertEquals(0, shardCounter.get()); + + env.deleteShardDirectorySafe(new ShardId(index, 0), idxSettings); + + for (Path path : env.indexPaths(index)) { + assertFalse(Files.exists(path.resolve("0"))); + } + assertEquals(1, shardCounter.get()); + + for (Path path : env.indexPaths(index)) { + assertTrue(Files.exists(path)); + } + assertEquals(0, indexCounter.get()); + + env.deleteIndexDirectorySafe(index, 5000, idxSettings); + + for (Path path : env.indexPaths(index)) { + assertFalse(Files.exists(path)); + } + assertEquals(1, indexCounter.get()); + assertTrue("LockedShards: " + env.lockedShards(), env.lockedShards().isEmpty()); + env.close(); + } + public void testStressShardLock() throws IOException, InterruptedException { class Int { int value = 0; @@ -629,6 +680,11 @@ public NodeEnvironment newNodeEnvironment() throws IOException { return newNodeEnvironment(Settings.EMPTY); } + public NodeEnvironment newNodeEnvironment(NodeEnvironment.IndexStoreListener listener) throws IOException { + Settings build = buildEnvSettings(Settings.EMPTY); + return new NodeEnvironment(build, TestEnvironment.newEnvironment(build), listener); + } + @Override public NodeEnvironment newNodeEnvironment(Settings settings) throws IOException { Settings build = buildEnvSettings(settings); diff --git a/server/src/test/java/org/opensearch/gateway/PrimaryShardAllocatorTests.java b/server/src/test/java/org/opensearch/gateway/PrimaryShardAllocatorTests.java index dceda6433575c..e849f12143b4d 100644 --- a/server/src/test/java/org/opensearch/gateway/PrimaryShardAllocatorTests.java +++ b/server/src/test/java/org/opensearch/gateway/PrimaryShardAllocatorTests.java @@ -843,10 +843,12 @@ public TestAllocator addData( node, new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards( node, - allocationId, - primary, - replicationCheckpoint, - storeException + new TransportNodesGatewayStartedShardHelper.GatewayStartedShard( + allocationId, + primary, + replicationCheckpoint, + storeException + ) ) ); return this; diff --git a/server/src/test/java/org/opensearch/gateway/PrimaryShardBatchAllocatorTests.java b/server/src/test/java/org/opensearch/gateway/PrimaryShardBatchAllocatorTests.java new file mode 100644 index 0000000000000..4796def2b8902 --- /dev/null +++ b/server/src/test/java/org/opensearch/gateway/PrimaryShardBatchAllocatorTests.java @@ -0,0 +1,340 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.gateway; + +import org.apache.lucene.codecs.Codec; +import org.opensearch.Version; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.OpenSearchAllocationTestCase; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.RoutingNodes; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.UnassignedInfo; +import org.opensearch.cluster.routing.allocation.AllocateUnassignedDecision; +import org.opensearch.cluster.routing.allocation.AllocationDecision; +import org.opensearch.cluster.routing.allocation.RoutingAllocation; +import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.opensearch.common.Nullable; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.set.Sets; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.env.Environment; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.codec.CodecService; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.test.IndexSettingsModule; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.opensearch.cluster.routing.UnassignedInfo.Reason.CLUSTER_RECOVERED; + +public class PrimaryShardBatchAllocatorTests extends OpenSearchAllocationTestCase { + + private final ShardId shardId = new ShardId("test", "_na_", 0); + private static Set<ShardId> shardsInBatch; + private final DiscoveryNode node1 = newNode("node1"); + private final DiscoveryNode node2 = newNode("node2"); + private final DiscoveryNode node3 = newNode("node3"); + private TestBatchAllocator batchAllocator; + + public static void setUpShards(int numberOfShards) { + shardsInBatch = new HashSet<>(); + for (int shardNumber = 0; shardNumber < numberOfShards; shardNumber++) { + ShardId shardId = new ShardId("test", "_na_", shardNumber); + shardsInBatch.add(shardId); + } + } + + @Before + public void buildTestAllocator() { + this.batchAllocator = new TestBatchAllocator(); + } + + private void allocateAllUnassigned(final RoutingAllocation allocation) { + final RoutingNodes.UnassignedShards.UnassignedIterator iterator = allocation.routingNodes().unassigned().iterator(); + while (iterator.hasNext()) { + batchAllocator.allocateUnassigned(iterator.next(), allocation, iterator); + } + } + + private void allocateAllUnassignedBatch(final RoutingAllocation allocation) { + final RoutingNodes.UnassignedShards.UnassignedIterator iterator = allocation.routingNodes().unassigned().iterator(); + List<ShardRouting> shardsToBatch = new ArrayList<>(); + while (iterator.hasNext()) { + shardsToBatch.add(iterator.next()); + } + batchAllocator.allocateUnassignedBatch(shardsToBatch, allocation); + } + + public void testMakeAllocationDecisionDataFetching() { + final RoutingAllocation allocation = routingAllocationWithOnePrimary(noAllocationDeciders(), CLUSTER_RECOVERED, "allocId1"); + + List<ShardRouting> shards = new ArrayList<>(); + allocateAllUnassignedBatch(allocation); + ShardRouting shard = allocation.routingTable().getIndicesRouting().get("test").shard(shardId.id()).primaryShard(); + shards.add(shard); + HashMap<ShardRouting, AllocateUnassignedDecision> allDecisions = batchAllocator.makeAllocationDecision(shards, allocation, logger); + // verify we get decisions for all the shards + assertEquals(shards.size(), allDecisions.size()); + assertEquals(shards, new ArrayList<>(allDecisions.keySet())); + assertEquals(AllocationDecision.AWAITING_INFO, allDecisions.get(shard).getAllocationDecision()); + } + + public void testMakeAllocationDecisionForReplicaShard() { + final RoutingAllocation allocation = routingAllocationWithOnePrimary(noAllocationDeciders(), CLUSTER_RECOVERED, "allocId1"); + + List<ShardRouting> replicaShards = allocation.routingTable().getIndicesRouting().get("test").shard(shardId.id()).replicaShards(); + List<ShardRouting> shards = new ArrayList<>(replicaShards); + HashMap<ShardRouting, AllocateUnassignedDecision> allDecisions = batchAllocator.makeAllocationDecision(shards, allocation, logger); + // verify we get decisions for all the shards + assertEquals(shards.size(), allDecisions.size()); + assertEquals(shards, new ArrayList<>(allDecisions.keySet())); + assertFalse(allDecisions.get(replicaShards.get(0)).isDecisionTaken()); + } + + public void testMakeAllocationDecisionDataFetched() { + final RoutingAllocation allocation = routingAllocationWithOnePrimary(noAllocationDeciders(), CLUSTER_RECOVERED, "allocId1"); + + List<ShardRouting> shards = new ArrayList<>(); + ShardRouting shard = allocation.routingTable().getIndicesRouting().get("test").shard(shardId.id()).primaryShard(); + shards.add(shard); + batchAllocator.addData(node1, "allocId1", true, new ReplicationCheckpoint(shardId, 20, 101, 1, Codec.getDefault().getName())); + HashMap<ShardRouting, AllocateUnassignedDecision> allDecisions = batchAllocator.makeAllocationDecision(shards, allocation, logger); + // verify we get decisions for all the shards + assertEquals(shards.size(), allDecisions.size()); + assertEquals(shards, new ArrayList<>(allDecisions.keySet())); + assertEquals(AllocationDecision.YES, allDecisions.get(shard).getAllocationDecision()); + } + + public void testMakeAllocationDecisionDataFetchedMultipleShards() { + setUpShards(2); + final RoutingAllocation allocation = routingAllocationWithMultiplePrimaries( + noAllocationDeciders(), + CLUSTER_RECOVERED, + 2, + 0, + "allocId-0", + "allocId-1" + ); + List<ShardRouting> shards = new ArrayList<>(); + for (ShardId shardId : shardsInBatch) { + ShardRouting shard = allocation.routingTable().getIndicesRouting().get("test").shard(shardId.id()).primaryShard(); + allocation.routingTable().getIndicesRouting().get("test").shard(shardId.id()).primaryShard().recoverySource(); + shards.add(shard); + batchAllocator.addShardData( + node1, + "allocId-" + shardId.id(), + shardId, + true, + new ReplicationCheckpoint(shardId, 20, 101, 1, Codec.getDefault().getName()), + null + ); + } + HashMap<ShardRouting, AllocateUnassignedDecision> allDecisions = batchAllocator.makeAllocationDecision(shards, allocation, logger); + // verify we get decisions for all the shards + assertEquals(shards.size(), allDecisions.size()); + assertEquals(new HashSet<>(shards), allDecisions.keySet()); + for (ShardRouting shard : shards) { + assertEquals(AllocationDecision.YES, allDecisions.get(shard).getAllocationDecision()); + } + } + + private RoutingAllocation routingAllocationWithOnePrimary( + AllocationDeciders deciders, + UnassignedInfo.Reason reason, + String... activeAllocationIds + ) { + Metadata metadata = Metadata.builder() + .put( + IndexMetadata.builder(shardId.getIndexName()) + .settings(settings(Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(1) + .putInSyncAllocationIds(shardId.id(), Sets.newHashSet(activeAllocationIds)) + ) + .build(); + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + switch (reason) { + + case INDEX_CREATED: + routingTableBuilder.addAsNew(metadata.index(shardId.getIndex())); + break; + case CLUSTER_RECOVERED: + routingTableBuilder.addAsRecovery(metadata.index(shardId.getIndex())); + break; + case INDEX_REOPENED: + routingTableBuilder.addAsFromCloseToOpen(metadata.index(shardId.getIndex())); + break; + default: + throw new IllegalArgumentException("can't do " + reason + " for you. teach me"); + } + ClusterState state = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(routingTableBuilder.build()) + .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3)) + .build(); + return new RoutingAllocation(deciders, new RoutingNodes(state, false), state, null, null, System.nanoTime()); + } + + private RoutingAllocation routingAllocationWithMultiplePrimaries( + AllocationDeciders deciders, + UnassignedInfo.Reason reason, + int numberOfShards, + int replicas, + String... activeAllocationIds + ) { + Iterator<ShardId> shardIterator = shardsInBatch.iterator(); + Metadata metadata = Metadata.builder() + .put( + IndexMetadata.builder(shardId.getIndexName()) + .settings(settings(Version.CURRENT)) + .numberOfShards(numberOfShards) + .numberOfReplicas(replicas) + .putInSyncAllocationIds(shardIterator.next().id(), Sets.newHashSet(activeAllocationIds[0])) + .putInSyncAllocationIds(shardIterator.next().id(), Sets.newHashSet(activeAllocationIds[1])) + ) + .build(); + + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + for (ShardId shardIdFromBatch : shardsInBatch) { + switch (reason) { + case INDEX_CREATED: + routingTableBuilder.addAsNew(metadata.index(shardIdFromBatch.getIndex())); + break; + case CLUSTER_RECOVERED: + routingTableBuilder.addAsRecovery(metadata.index(shardIdFromBatch.getIndex())); + break; + case INDEX_REOPENED: + routingTableBuilder.addAsFromCloseToOpen(metadata.index(shardIdFromBatch.getIndex())); + break; + default: + throw new IllegalArgumentException("can't do " + reason + " for you. teach me"); + } + } + ClusterState state = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(routingTableBuilder.build()) + .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3)) + .build(); + return new RoutingAllocation(deciders, new RoutingNodes(state, false), state, null, null, System.nanoTime()); + } + + class TestBatchAllocator extends PrimaryShardBatchAllocator { + + private Map<DiscoveryNode, TransportNodesListGatewayStartedShardsBatch.NodeGatewayStartedShardsBatch> data; + + public TestBatchAllocator clear() { + data = null; + return this; + } + + public TestBatchAllocator addData( + DiscoveryNode node, + String allocationId, + boolean primary, + ReplicationCheckpoint replicationCheckpoint + ) { + return addData(node, allocationId, primary, replicationCheckpoint, null); + } + + public TestBatchAllocator addData(DiscoveryNode node, String allocationId, boolean primary) { + Settings nodeSettings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()).build(); + IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", nodeSettings); + return addData( + node, + allocationId, + primary, + ReplicationCheckpoint.empty(shardId, new CodecService(null, indexSettings, null).codec("default").getName()), + null + ); + } + + public TestBatchAllocator addData(DiscoveryNode node, String allocationId, boolean primary, @Nullable Exception storeException) { + Settings nodeSettings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()).build(); + IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", nodeSettings); + return addData( + node, + allocationId, + primary, + ReplicationCheckpoint.empty(shardId, new CodecService(null, indexSettings, null).codec("default").getName()), + storeException + ); + } + + public TestBatchAllocator addData( + DiscoveryNode node, + String allocationId, + boolean primary, + ReplicationCheckpoint replicationCheckpoint, + @Nullable Exception storeException + ) { + if (data == null) { + data = new HashMap<>(); + } + Map<ShardId, TransportNodesGatewayStartedShardHelper.GatewayStartedShard> shardData = Map.of( + shardId, + new TransportNodesGatewayStartedShardHelper.GatewayStartedShard( + allocationId, + primary, + replicationCheckpoint, + storeException + ) + ); + data.put(node, new TransportNodesListGatewayStartedShardsBatch.NodeGatewayStartedShardsBatch(node, shardData)); + return this; + } + + public TestBatchAllocator addShardData( + DiscoveryNode node, + String allocationId, + ShardId shardId, + boolean primary, + ReplicationCheckpoint replicationCheckpoint, + @Nullable Exception storeException + ) { + if (data == null) { + data = new HashMap<>(); + } + Map<ShardId, TransportNodesGatewayStartedShardHelper.GatewayStartedShard> shardData = new HashMap<>(); + shardData.put( + shardId, + new TransportNodesGatewayStartedShardHelper.GatewayStartedShard( + allocationId, + primary, + replicationCheckpoint, + storeException + ) + ); + if (data.get(node) != null) shardData.putAll(data.get(node).getNodeGatewayStartedShardsBatch()); + data.put(node, new TransportNodesListGatewayStartedShardsBatch.NodeGatewayStartedShardsBatch(node, shardData)); + return this; + } + + @Override + protected AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShardsBatch.NodeGatewayStartedShardsBatch> fetchData( + List<ShardRouting> shardsEligibleForFetch, + List<ShardRouting> inEligibleShards, + RoutingAllocation allocation + ) { + return new AsyncShardFetch.FetchResult<>(data, Collections.<ShardId, Set<String>>emptyMap()); + } + } +} diff --git a/server/src/test/java/org/opensearch/gateway/ReplicaShardAllocatorTests.java b/server/src/test/java/org/opensearch/gateway/ReplicaShardAllocatorTests.java index 5833d9c4f187f..ae56bc0f8b3d2 100644 --- a/server/src/test/java/org/opensearch/gateway/ReplicaShardAllocatorTests.java +++ b/server/src/test/java/org/opensearch/gateway/ReplicaShardAllocatorTests.java @@ -68,6 +68,7 @@ import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.store.TransportNodesListShardStoreMetadata; +import org.opensearch.indices.store.TransportNodesListShardStoreMetadataHelper.StoreFilesMetadata; import org.opensearch.snapshots.SnapshotShardSizeInfo; import org.junit.Before; @@ -665,7 +666,7 @@ static String randomSyncId() { class TestAllocator extends ReplicaShardAllocator { - private Map<DiscoveryNode, TransportNodesListShardStoreMetadata.StoreFilesMetadata> data = null; + private Map<DiscoveryNode, StoreFilesMetadata> data = null; private AtomicBoolean fetchDataCalled = new AtomicBoolean(false); public void clean() { @@ -703,7 +704,7 @@ TestAllocator addData( } data.put( node, - new TransportNodesListShardStoreMetadata.StoreFilesMetadata( + new StoreFilesMetadata( shardId, new Store.MetadataSnapshot(unmodifiableMap(filesAsMap), unmodifiableMap(commitData), randomInt()), peerRecoveryRetentionLeases @@ -721,7 +722,7 @@ protected AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetadata.NodeS Map<DiscoveryNode, TransportNodesListShardStoreMetadata.NodeStoreFilesMetadata> tData = null; if (data != null) { tData = new HashMap<>(); - for (Map.Entry<DiscoveryNode, TransportNodesListShardStoreMetadata.StoreFilesMetadata> entry : data.entrySet()) { + for (Map.Entry<DiscoveryNode, StoreFilesMetadata> entry : data.entrySet()) { tData.put( entry.getKey(), new TransportNodesListShardStoreMetadata.NodeStoreFilesMetadata(entry.getKey(), entry.getValue()) diff --git a/server/src/test/java/org/opensearch/index/codec/fuzzy/BloomFilterTests.java b/server/src/test/java/org/opensearch/index/codec/fuzzy/BloomFilterTests.java new file mode 100644 index 0000000000000..92669d5bc1d92 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/codec/fuzzy/BloomFilterTests.java @@ -0,0 +1,82 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.fuzzy; + +import org.apache.lucene.store.ByteArrayDataOutput; +import org.apache.lucene.util.BytesRef; +import org.opensearch.common.lucene.store.ByteArrayIndexInput; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.Iterator; +import java.util.List; + +public class BloomFilterTests extends OpenSearchTestCase { + + public void testBloomFilterSerializationDeserialization() throws IOException { + int elementCount = randomIntBetween(1, 100); + long maxDocs = elementCount * 10L; // Keeping this high so that it ensures some bits are not set. + BloomFilter filter = new BloomFilter(maxDocs, getFpp(), () -> idIterator(elementCount)); + byte[] buffer = new byte[(int) maxDocs * 5]; + ByteArrayDataOutput out = new ByteArrayDataOutput(buffer); + + // Write in the format readable through factory + out.writeString(filter.setType().getSetName()); + filter.writeTo(out); + + FuzzySet reconstructedFilter = FuzzySetFactory.deserializeFuzzySet(new ByteArrayIndexInput("filter", buffer)); + assertEquals(FuzzySet.SetType.BLOOM_FILTER_V1, reconstructedFilter.setType()); + + Iterator<BytesRef> idIterator = idIterator(elementCount); + while (idIterator.hasNext()) { + BytesRef element = idIterator.next(); + assertEquals(FuzzySet.Result.MAYBE, reconstructedFilter.contains(element)); + assertEquals(FuzzySet.Result.MAYBE, filter.contains(element)); + } + } + + public void testBloomFilterIsSaturated_returnsTrue() throws IOException { + BloomFilter bloomFilter = new BloomFilter(1L, getFpp(), () -> idIterator(1000)); + assertEquals(FuzzySet.SetType.BLOOM_FILTER_V1, bloomFilter.setType()); + assertEquals(true, bloomFilter.isSaturated()); + } + + public void testBloomFilterIsSaturated_returnsFalse() throws IOException { + int elementCount = randomIntBetween(1, 100); + BloomFilter bloomFilter = new BloomFilter(20000, getFpp(), () -> idIterator(elementCount)); + assertEquals(FuzzySet.SetType.BLOOM_FILTER_V1, bloomFilter.setType()); + assertEquals(false, bloomFilter.isSaturated()); + } + + public void testBloomFilterWithLargeCapacity() throws IOException { + long maxDocs = randomLongBetween(Integer.MAX_VALUE, 5L * Integer.MAX_VALUE); + BloomFilter bloomFilter = new BloomFilter(maxDocs, getFpp(), () -> List.of(new BytesRef("bar")).iterator()); + assertEquals(FuzzySet.SetType.BLOOM_FILTER_V1, bloomFilter.setType()); + } + + private double getFpp() { + return randomDoubleBetween(0.01, 0.50, true); + } + + private Iterator<BytesRef> idIterator(int count) { + return new Iterator<BytesRef>() { + int cnt = count; + + @Override + public boolean hasNext() { + return cnt-- > 0; + } + + @Override + public BytesRef next() { + return new BytesRef(Integer.toString(cnt)); + } + }; + } +} diff --git a/server/src/test/java/org/opensearch/index/codec/fuzzy/FuzzyFilterPostingsFormatTests.java b/server/src/test/java/org/opensearch/index/codec/fuzzy/FuzzyFilterPostingsFormatTests.java new file mode 100644 index 0000000000000..868c2175d0689 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/codec/fuzzy/FuzzyFilterPostingsFormatTests.java @@ -0,0 +1,34 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.fuzzy; + +import org.apache.lucene.codecs.Codec; +import org.apache.lucene.tests.index.BasePostingsFormatTestCase; +import org.apache.lucene.tests.util.TestUtil; + +import java.util.TreeMap; + +public class FuzzyFilterPostingsFormatTests extends BasePostingsFormatTestCase { + + private TreeMap<String, FuzzySetParameters> params = new TreeMap<>() { + @Override + public FuzzySetParameters get(Object k) { + return new FuzzySetParameters(() -> FuzzySetParameters.DEFAULT_FALSE_POSITIVE_PROBABILITY); + } + }; + + private Codec fuzzyFilterCodec = TestUtil.alwaysPostingsFormat( + new FuzzyFilterPostingsFormat(TestUtil.getDefaultPostingsFormat(), new FuzzySetFactory(params)) + ); + + @Override + protected Codec getCodec() { + return fuzzyFilterCodec; + } +} diff --git a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java index 5b586524d0bfc..cc927a19fd01a 100644 --- a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java @@ -78,6 +78,7 @@ import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; import org.apache.lucene.store.Lock; +import org.apache.lucene.tests.index.ForceMergePolicy; import org.apache.lucene.tests.mockfile.ExtrasFS; import org.apache.lucene.tests.store.MockDirectoryWrapper; import org.apache.lucene.util.Bits; @@ -152,6 +153,7 @@ import org.opensearch.index.translog.TranslogDeletionPolicyFactory; import org.opensearch.index.translog.TranslogException; import org.opensearch.index.translog.listener.TranslogEventListener; +import org.opensearch.test.DummyShardLock; import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.MockLogAppender; import org.opensearch.test.VersionUtils; @@ -3278,12 +3280,15 @@ public void onFailedEngine(String reason, Exception e) { final AtomicReference<RetentionLeases> retentionLeasesHolder = new AtomicReference<>( new RetentionLeases(primaryTerm, retentionLeasesVersion.get(), Collections.emptyList()) ); + + // Just allow force merge so that regular merge does not close the shard first before any any other operation + // InternalEngine engine = createEngine( config( defaultSettings, store, createTempDir(), - newMergePolicy(), + newForceMergePolicy(), null, null, null, @@ -3377,7 +3382,7 @@ public void onFailedEngine(String reason, Exception e) { defaultSettings, store, createTempDir(), - newMergePolicy(), + newForceMergePolicy(), null, null, null, @@ -3446,7 +3451,8 @@ public void eval(MockDirectoryWrapper dir) throws IOException { wrapper.failOn(fail); MockLogAppender mockAppender = MockLogAppender.createForLoggers(Loggers.getLogger(Engine.class, shardId)); try { - Store store = createStore(wrapper); + // Create a store where directory is closed during unreferenced file cleanup. + Store store = createFailingDirectoryStore(wrapper); final Engine.EventListener eventListener = new Engine.EventListener() { @Override public void onFailedEngine(String reason, Exception e) { @@ -3473,7 +3479,7 @@ public void onFailedEngine(String reason, Exception e) { defaultSettings, store, createTempDir(), - newMergePolicy(), + newForceMergePolicy(), null, null, null, @@ -3534,6 +3540,33 @@ public void testSettings() { assertEquals(currentIndexWriterConfig.getCodec().getName(), codecService.codec(codecName).getName()); } + /** + * Creates a merge policy which only supports force merge. + * @return returns a merge policy which only supports force merge. + */ + private MergePolicy newForceMergePolicy() { + return new ForceMergePolicy(new TieredMergePolicy()); + } + + /** + * Create a store where directory is closed when referenced while unreferenced file cleanup. + * + * @param directory directory used for creating the store. + * @return a store where directory is closed when referenced while unreferenced file cleanup. + */ + private Store createFailingDirectoryStore(final Directory directory) { + return new Store(shardId, INDEX_SETTINGS, directory, new DummyShardLock(shardId)) { + @Override + public Directory directory() { + if (callStackContainsAnyOf("cleanUpUnreferencedFiles")) { + throw new AlreadyClosedException("store is already closed"); + } + + return super.directory(); + } + }; + } + public void testCurrentTranslogUUIIDIsCommitted() throws IOException { final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); try (Store store = createStore()) { diff --git a/server/src/test/java/org/opensearch/index/fielddata/IndexFieldDataServiceTests.java b/server/src/test/java/org/opensearch/index/fielddata/IndexFieldDataServiceTests.java index bcdca2236d3f3..3fb43b7dbdc4e 100644 --- a/server/src/test/java/org/opensearch/index/fielddata/IndexFieldDataServiceTests.java +++ b/server/src/test/java/org/opensearch/index/fielddata/IndexFieldDataServiceTests.java @@ -138,13 +138,15 @@ public void testGetForFieldRuntimeField() { ); final SetOnce<Supplier<SearchLookup>> searchLookupSetOnce = new SetOnce<>(); MappedFieldType ft = mock(MappedFieldType.class); + final int shardId = randomInt(); when(ft.fielddataBuilder(Mockito.any(), Mockito.any())).thenAnswer(invocationOnMock -> { @SuppressWarnings("unchecked") Supplier<SearchLookup> searchLookup = (Supplier<SearchLookup>) invocationOnMock.getArguments()[1]; searchLookupSetOnce.set(searchLookup); + assertEquals(searchLookup.get().shardId(), shardId); return (IndexFieldData.Builder) (cache, breakerService) -> null; }); - SearchLookup searchLookup = new SearchLookup(null, null); + SearchLookup searchLookup = new SearchLookup(null, null, shardId); ifdService.getForField(ft, "qualified", () -> searchLookup); assertSame(searchLookup, searchLookupSetOnce.get().get()); } diff --git a/server/src/test/java/org/opensearch/index/mapper/IpFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/IpFieldTypeTests.java index 1a66037d98d71..0a2435553b19e 100644 --- a/server/src/test/java/org/opensearch/index/mapper/IpFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/IpFieldTypeTests.java @@ -32,10 +32,14 @@ package org.opensearch.index.mapper; import org.apache.lucene.document.InetAddressPoint; +import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; +import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.PointRangeQuery; +import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; @@ -75,22 +79,41 @@ public void testTermQuery() { MappedFieldType ft = new IpFieldMapper.IpFieldType("field"); String ip = "2001:db8::2:1"; - assertEquals(InetAddressPoint.newExactQuery("field", InetAddresses.forString(ip)), ft.termQuery(ip, null)); + + Query query = InetAddressPoint.newExactQuery("field", InetAddresses.forString(ip)); + + assertEquals( + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowExactQuery("field", new BytesRef(((PointRangeQuery) query).getLowerPoint())) + ), + ft.termQuery(ip, null) + ); ip = "192.168.1.7"; - assertEquals(InetAddressPoint.newExactQuery("field", InetAddresses.forString(ip)), ft.termQuery(ip, null)); + query = InetAddressPoint.newExactQuery("field", InetAddresses.forString(ip)); + assertEquals( + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowExactQuery("field", new BytesRef(((PointRangeQuery) query).getLowerPoint())) + ), + ft.termQuery(ip, null) + ); ip = "2001:db8::2:1"; String prefix = ip + "/64"; - assertEquals(InetAddressPoint.newPrefixQuery("field", InetAddresses.forString(ip), 64), ft.termQuery(prefix, null)); + + query = InetAddressPoint.newPrefixQuery("field", InetAddresses.forString(ip), 64); + assertEquals(query, ft.termQuery(prefix, null)); ip = "192.168.1.7"; prefix = ip + "/16"; - assertEquals(InetAddressPoint.newPrefixQuery("field", InetAddresses.forString(ip), 16), ft.termQuery(prefix, null)); + query = InetAddressPoint.newPrefixQuery("field", InetAddresses.forString(ip), 16); + assertEquals(query, ft.termQuery(prefix, null)); - MappedFieldType unsearchable = new IpFieldMapper.IpFieldType("field", false, false, true, null, Collections.emptyMap()); + MappedFieldType unsearchable = new IpFieldMapper.IpFieldType("field", false, false, false, null, Collections.emptyMap()); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> unsearchable.termQuery("::1", null)); - assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); + assertEquals("Cannot search on field [field] since it is both not indexed, and does not have doc_values enabled.", e.getMessage()); } public void testTermsQuery() { @@ -118,44 +141,123 @@ public void testTermsQuery() { public void testRangeQuery() { MappedFieldType ft = new IpFieldMapper.IpFieldType("field"); - + Query query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddressPoint.MAX_VALUE); assertEquals( - InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddressPoint.MAX_VALUE), + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ) + ), ft.rangeQuery(null, null, randomBoolean(), randomBoolean(), null, null, null, null) ); + query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddresses.forString("192.168.2.0")); assertEquals( - InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddresses.forString("192.168.2.0")), + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ) + ), ft.rangeQuery(null, "192.168.2.0", randomBoolean(), true, null, null, null, null) ); + query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddresses.forString("192.168.1.255")); assertEquals( - InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddresses.forString("192.168.1.255")), + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ) + ), ft.rangeQuery(null, "192.168.2.0", randomBoolean(), false, null, null, null, null) ); + query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::"), InetAddressPoint.MAX_VALUE); assertEquals( - InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::"), InetAddressPoint.MAX_VALUE), + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ) + ), ft.rangeQuery("2001:db8::", null, true, randomBoolean(), null, null, null, null) ); + query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::1"), InetAddressPoint.MAX_VALUE); assertEquals( - InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::1"), InetAddressPoint.MAX_VALUE), + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ) + ), ft.rangeQuery("2001:db8::", null, false, randomBoolean(), null, null, null, null) ); + query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::"), InetAddresses.forString("2001:db8::ffff")); assertEquals( - InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::"), InetAddresses.forString("2001:db8::ffff")), + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ) + ), ft.rangeQuery("2001:db8::", "2001:db8::ffff", true, true, null, null, null, null) ); + query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::1"), InetAddresses.forString("2001:db8::fffe")); assertEquals( - InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::1"), InetAddresses.forString("2001:db8::fffe")), + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ) + ), ft.rangeQuery("2001:db8::", "2001:db8::ffff", false, false, null, null, null, null) ); + query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::2"), InetAddresses.forString("2001:db8::")); assertEquals( - InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::2"), InetAddresses.forString("2001:db8::")), + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ) + ), // same lo/hi values but inclusive=false so this won't match anything ft.rangeQuery("2001:db8::1", "2001:db8::1", false, false, null, null, null, null) ); @@ -178,30 +280,60 @@ public void testRangeQuery() { ) ); + query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddresses.forString("::fffe:ffff:ffff")); assertEquals( - InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddresses.forString("::fffe:ffff:ffff")), + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ) + ), // same lo/hi values but inclusive=false so this won't match anything ft.rangeQuery("::", "0.0.0.0", true, false, null, null, null, null) ); + query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::1:0:0:0"), InetAddressPoint.MAX_VALUE); assertEquals( - InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::1:0:0:0"), InetAddressPoint.MAX_VALUE), + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ) + ), // same lo/hi values but inclusive=false so this won't match anything ft.rangeQuery("255.255.255.255", "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", false, true, null, null, null, null) ); + query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("192.168.1.7"), InetAddresses.forString("2001:db8::")); assertEquals( // lower bound is ipv4, upper bound is ipv6 - InetAddressPoint.newRangeQuery("field", InetAddresses.forString("192.168.1.7"), InetAddresses.forString("2001:db8::")), + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ) + ), ft.rangeQuery("::ffff:c0a8:107", "2001:db8::", true, true, null, null, null, null) ); - MappedFieldType unsearchable = new IpFieldMapper.IpFieldType("field", false, false, true, null, Collections.emptyMap()); + MappedFieldType unsearchable = new IpFieldMapper.IpFieldType("field", false, false, false, null, Collections.emptyMap()); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> unsearchable.rangeQuery("::1", "2001::", true, true, null, null, null, null) ); - assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); + assertEquals("Cannot search on field [field] since it is both not indexed, and does not have doc_values enabled.", e.getMessage()); } public void testFetchSourceValue() throws IOException { diff --git a/server/src/test/java/org/opensearch/index/mapper/MapperServiceTests.java b/server/src/test/java/org/opensearch/index/mapper/MapperServiceTests.java index bb3f2be8ea748..adcfc9d7b17fc 100644 --- a/server/src/test/java/org/opensearch/index/mapper/MapperServiceTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/MapperServiceTests.java @@ -63,7 +63,9 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.List; import java.util.Map; +import java.util.function.Function; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.is; @@ -541,6 +543,28 @@ public void testReloadSearchAnalyzers() throws IOException { ); } + public void testMapperDynamicAllowedIgnored() { + final List<Function<Settings.Builder, Settings.Builder>> scenarios = List.of( + (builder) -> builder.putNull(MapperService.INDEX_MAPPER_DYNAMIC_SETTING.getKey()), + (builder) -> builder.put(MapperService.INDEX_MAPPER_DYNAMIC_SETTING.getKey(), true), + (builder) -> builder.put(MapperService.INDEX_MAPPER_DYNAMIC_SETTING.getKey(), false) + ); + + for (int i = 0; i < scenarios.size(); i++) { + final Settings.Builder defaultSettingsBuilder = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1); + + final Settings settings = scenarios.get(i).apply(defaultSettingsBuilder).build(); + + createIndex("test" + i, settings).mapperService(); + } + + assertWarnings( + "[index.mapper.dynamic] setting was deprecated in OpenSearch and will be removed in a future release! See the breaking changes documentation for the next major version." + ); + } + private boolean assertSameContainedFilters(TokenFilterFactory[] originalTokenFilter, NamedAnalyzer updatedAnalyzer) { ReloadableCustomAnalyzer updatedReloadableAnalyzer = (ReloadableCustomAnalyzer) updatedAnalyzer.analyzer(); TokenFilterFactory[] newTokenFilters = updatedReloadableAnalyzer.getComponents().getTokenFilters(); diff --git a/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java index 62337264bc0b1..29efd64e5c751 100644 --- a/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java @@ -34,12 +34,16 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.join.ScoreMode; import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Settings; import org.opensearch.index.IndexSettings; @@ -58,6 +62,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.Optional; import static org.opensearch.index.IndexSettingsTests.newIndexMeta; import static org.opensearch.index.query.InnerHitBuilderTests.randomNestedInnerHits; @@ -431,4 +436,96 @@ public void testSetParentFilterInContext() throws Exception { assertNull(queryShardContext.getParentFilter()); verify(innerQueryBuilder).toQuery(queryShardContext); } + + public void testNestedDepthProhibited() throws Exception { + assertThrows(IllegalArgumentException.class, () -> doWithDepth(0, context -> fail("won't call"))); + } + + public void testNestedDepthAllowed() throws Exception { + ThrowingConsumer<QueryShardContext> check = (context) -> { + NestedQueryBuilder queryBuilder = new NestedQueryBuilder("nested1", new MatchAllQueryBuilder(), ScoreMode.None); + OpenSearchToParentBlockJoinQuery blockJoinQuery = (OpenSearchToParentBlockJoinQuery) queryBuilder.toQuery(context); + Optional<BooleanClause> childLeg = ((BooleanQuery) blockJoinQuery.getChildQuery()).clauses() + .stream() + .filter(c -> c.getOccur() == BooleanClause.Occur.MUST) + .findFirst(); + assertTrue(childLeg.isPresent()); + assertEquals(new MatchAllDocsQuery(), childLeg.get().getQuery()); + }; + check.accept(createShardContext()); + doWithDepth(randomIntBetween(1, 20), check); + } + + public void testNestedDepthOnceOnly() throws Exception { + doWithDepth(1, this::checkOnceNested); + } + + public void testNestedDepthDefault() throws Exception { + assertEquals(20, createShardContext().getIndexSettings().getMaxNestedQueryDepth()); + } + + private void checkOnceNested(QueryShardContext ctx) throws Exception { + { + NestedQueryBuilder depth2 = new NestedQueryBuilder( + "nested1", + new NestedQueryBuilder("nested1", new MatchAllQueryBuilder(), ScoreMode.None), + ScoreMode.None + ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> depth2.toQuery(ctx)); + assertEquals( + "The depth of Nested Query is [2] has exceeded the allowed maximum of [1]. This maximum can be set by changing the [index.query.max_nested_depth] index level setting.", + e.getMessage() + ); + } + { + QueryBuilder mustBjqMustBjq = new BoolQueryBuilder().must( + new NestedQueryBuilder("nested1", new MatchAllQueryBuilder(), ScoreMode.None) + ).must(new NestedQueryBuilder("nested1", new MatchAllQueryBuilder(), ScoreMode.None)); + BooleanQuery bool = (BooleanQuery) mustBjqMustBjq.toQuery(ctx); + assertEquals( + "Can parse joins one by one without breaching depth limit", + 2, + bool.clauses().stream().filter(c -> c.getQuery() instanceof OpenSearchToParentBlockJoinQuery).count() + ); + } + } + + public void testUpdateMaxDepthSettings() throws Exception { + doWithDepth(2, (ctx) -> { + assertEquals(ctx.getIndexSettings().getMaxNestedQueryDepth(), 2); + NestedQueryBuilder depth2 = new NestedQueryBuilder( + "nested1", + new NestedQueryBuilder("nested1", new MatchAllQueryBuilder(), ScoreMode.None), + ScoreMode.None + ); + Query depth2Query = depth2.toQuery(ctx); + assertTrue(depth2Query instanceof OpenSearchToParentBlockJoinQuery); + }); + } + + void doWithDepth(int depth, ThrowingConsumer<QueryShardContext> test) throws Exception { + QueryShardContext context = createShardContext(); + int defLimit = context.getIndexSettings().getMaxNestedQueryDepth(); + assertTrue(defLimit > 0); + Settings updateSettings = Settings.builder() + .put(context.getIndexSettings().getSettings()) + .put("index.query.max_nested_depth", depth) + .build(); + context.getIndexSettings().updateIndexMetadata(IndexMetadata.builder("index").settings(updateSettings).build()); + try { + test.accept(context); + } finally { + context.getIndexSettings() + .updateIndexMetadata( + IndexMetadata.builder("index") + .settings( + Settings.builder() + .put(context.getIndexSettings().getSettings()) + .put("index.query.max_nested_depth", defLimit) + .build() + ) + .build() + ); + } + } } diff --git a/server/src/test/java/org/opensearch/index/query/QueryShardContextTests.java b/server/src/test/java/org/opensearch/index/query/QueryShardContextTests.java index c819d35872c6e..1a2ad49a3f334 100644 --- a/server/src/test/java/org/opensearch/index/query/QueryShardContextTests.java +++ b/server/src/test/java/org/opensearch/index/query/QueryShardContextTests.java @@ -91,6 +91,8 @@ public class QueryShardContextTests extends OpenSearchTestCase { + private static final int SHARD_ID = 0; + public void testFailIfFieldMappingNotFound() { QueryShardContext context = createQueryShardContext(IndexMetadata.INDEX_UUID_NA_VALUE, null); context.setAllowUnmappedFields(false); @@ -307,6 +309,11 @@ public void testFielddataLookupOneFieldManyReferences() throws IOException { assertEquals(Arrays.asList(expectedFirstDoc.toString(), expectedSecondDoc.toString()), collect("field", queryShardContext)); } + public void testSearchLookupShardId() { + SearchLookup searchLookup = createQueryShardContext("uuid", null, null).lookup(); + assertEquals(SHARD_ID, searchLookup.shardId()); + } + public static QueryShardContext createQueryShardContext(String indexUuid, String clusterAlias) { return createQueryShardContext(indexUuid, clusterAlias, null); } @@ -343,7 +350,7 @@ private static QueryShardContext createQueryShardContext( } final long nowInMillis = randomNonNegativeLong(); return new QueryShardContext( - 0, + SHARD_ID, indexSettings, BigArrays.NON_RECYCLING_INSTANCE, null, diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java index c87cdfcc8f1a1..ccdd1fe4ab609 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.util.HashMap; +import java.util.List; import java.util.Map; import static org.opensearch.index.remote.RemoteSegmentTransferTracker.currentTimeMsUsingSystemNanos; @@ -149,6 +150,7 @@ public void testComputeTimeLagOnUpdate() throws InterruptedException { Thread.sleep(1); transferTracker.updateLocalRefreshTimeMs(currentTimeMsUsingSystemNanos()); + transferTracker.updateLatestLocalFileNameLengthMap(List.of("test"), k -> 1L); // Sleep for 100ms and then the lag should be within 100ms +/- 20ms Thread.sleep(100); assertTrue(Math.abs(transferTracker.getTimeMsLag() - 100) <= 20); diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureServiceTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureServiceTests.java index cb77174e612fd..9d00cf9f2be46 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureServiceTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureServiceTests.java @@ -19,6 +19,7 @@ import org.opensearch.threadpool.ThreadPool; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; import java.util.regex.Matcher; @@ -100,6 +101,7 @@ public void testValidateSegmentUploadLag() throws InterruptedException { while (currentTimeMsUsingSystemNanos() - localRefreshTimeMs <= 20 * avg) { Thread.sleep((long) (4 * avg)); } + pressureTracker.updateLatestLocalFileNameLengthMap(List.of("test"), k -> 1L); Exception e = assertThrows(OpenSearchRejectedExecutionException.class, () -> pressureService.validateSegmentsUploadLag(shardId)); String regex = "^rejected execution on primary shard:\\[index]\\[0] due to remote segments lagging behind " + "local segments.time_lag:[0-9]{2,3} ms dynamic_time_lag_threshold:95\\.0 ms$"; diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java index 555284e55a0fa..85878cc2e1c9d 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java @@ -10,6 +10,7 @@ import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.tests.store.BaseDirectoryWrapper; @@ -102,6 +103,16 @@ private void indexDocs(int startDocId, int numberOfDocs) throws IOException { public void tearDown() throws Exception { Directory storeDirectory = ((FilterDirectory) ((FilterDirectory) indexShard.store().directory()).getDelegate()).getDelegate(); ((BaseDirectoryWrapper) storeDirectory).setCheckIndexOnClose(false); + + for (ReferenceManager.RefreshListener refreshListener : indexShard.getEngine().config().getInternalRefreshListener()) { + if (refreshListener instanceof ReleasableRetryableRefreshListener) { + ((ReleasableRetryableRefreshListener) refreshListener).drainRefreshes(); + } + } + if (remoteStoreRefreshListener != null) { + remoteStoreRefreshListener.drainRefreshes(); + } + closeShards(indexShard); super.tearDown(); } @@ -335,6 +346,7 @@ public void testRefreshSuccessOnFirstAttempt() throws Exception { RemoteStoreStatsTrackerFactory trackerFactory = tuple.v2(); RemoteSegmentTransferTracker segmentTracker = trackerFactory.getRemoteSegmentTransferTracker(indexShard.shardId()); assertNoLagAndTotalUploadsFailed(segmentTracker, 0); + assertTrue("remote store in sync", tuple.v1().isRemoteSegmentStoreInSync()); } public void testRefreshSuccessOnSecondAttempt() throws Exception { @@ -404,6 +416,20 @@ public void testRefreshSuccessOnThirdAttempt() throws Exception { assertNoLagAndTotalUploadsFailed(segmentTracker, 2); } + public void testRefreshPersistentFailure() throws Exception { + int succeedOnAttempt = 10; + CountDownLatch refreshCountLatch = new CountDownLatch(1); + CountDownLatch successLatch = new CountDownLatch(10); + Tuple<RemoteStoreRefreshListener, RemoteStoreStatsTrackerFactory> tuple = mockIndexShardWithRetryAndScheduleRefresh( + succeedOnAttempt, + refreshCountLatch, + successLatch + ); + // Giving 10ms for some iterations of remote refresh upload + Thread.sleep(10); + assertFalse("remote store should not in sync", tuple.v1().isRemoteSegmentStoreInSync()); + } + private void assertNoLagAndTotalUploadsFailed(RemoteSegmentTransferTracker segmentTracker, long totalUploadsFailed) throws Exception { assertBusy(() -> { assertEquals(0, segmentTracker.getBytesLag()); @@ -425,6 +451,33 @@ public void testTrackerData() throws Exception { assertBusy(() -> assertNoLag(tracker)); } + /** + * Tests segments upload fails with replication checkpoint and replication tracker primary term mismatch + */ + public void testRefreshFailedDueToPrimaryTermMisMatch() throws Exception { + int totalAttempt = 1; + int checkpointPublishSucceedOnAttempt = 0; + // We spy on IndexShard.isPrimaryStarted() to validate that we have tried running remote time as per the expectation. + CountDownLatch refreshCountLatch = new CountDownLatch(totalAttempt); + + // success latch should change as we would be failed primary term latest validation. + CountDownLatch successLatch = new CountDownLatch(1); + CountDownLatch reachedCheckpointPublishLatch = new CountDownLatch(0); + Tuple<RemoteStoreRefreshListener, RemoteStoreStatsTrackerFactory> tuple = mockIndexShardWithRetryAndScheduleRefresh( + totalAttempt, + refreshCountLatch, + successLatch, + checkpointPublishSucceedOnAttempt, + reachedCheckpointPublishLatch, + false + ); + + assertBusy(() -> assertEquals(1, tuple.v2().getRemoteSegmentTransferTracker(indexShard.shardId()).getTotalUploadsFailed())); + assertBusy(() -> assertEquals(0, refreshCountLatch.getCount())); + assertBusy(() -> assertEquals(1, successLatch.getCount())); + assertBusy(() -> assertEquals(0, reachedCheckpointPublishLatch.getCount())); + } + private void assertNoLag(RemoteSegmentTransferTracker tracker) { assertEquals(0, tracker.getRefreshSeqNoLag()); assertEquals(0, tracker.getBytesLag()); @@ -460,6 +513,24 @@ private Tuple<RemoteStoreRefreshListener, RemoteStoreStatsTrackerFactory> mockIn CountDownLatch successLatch, int succeedCheckpointPublishOnAttempt, CountDownLatch reachedCheckpointPublishLatch + ) throws IOException { + return mockIndexShardWithRetryAndScheduleRefresh( + succeedOnAttempt, + refreshCountLatch, + successLatch, + succeedCheckpointPublishOnAttempt, + reachedCheckpointPublishLatch, + true + ); + } + + private Tuple<RemoteStoreRefreshListener, RemoteStoreStatsTrackerFactory> mockIndexShardWithRetryAndScheduleRefresh( + int succeedOnAttempt, + CountDownLatch refreshCountLatch, + CountDownLatch successLatch, + int succeedCheckpointPublishOnAttempt, + CountDownLatch reachedCheckpointPublishLatch, + boolean mockPrimaryTerm ) throws IOException { // Create index shard that we will be using to mock different methods in IndexShard for the unit test indexShard = newStartedShard( @@ -500,6 +571,9 @@ private Tuple<RemoteStoreRefreshListener, RemoteStoreStatsTrackerFactory> mockIn when(remoteStore.directory()).thenReturn(remoteStoreFilterDirectory); // Mock indexShard.getOperationPrimaryTerm() + if (mockPrimaryTerm) { + when(shard.getOperationPrimaryTerm()).thenReturn(indexShard.getOperationPrimaryTerm()); + } when(shard.getLatestReplicationCheckpoint()).thenReturn(indexShard.getLatestReplicationCheckpoint()); // Mock indexShard.routingEntry().primary() @@ -520,6 +594,7 @@ private Tuple<RemoteStoreRefreshListener, RemoteStoreStatsTrackerFactory> mockIn // Mock indexShard.getSegmentInfosSnapshot() doAnswer(invocation -> { if (counter.incrementAndGet() <= succeedOnAttempt) { + logger.error("Failing in get segment info {}", counter.get()); throw new RuntimeException("Inducing failure in upload"); } return indexShard.getSegmentInfosSnapshot(); @@ -535,6 +610,7 @@ private Tuple<RemoteStoreRefreshListener, RemoteStoreStatsTrackerFactory> mockIn doAnswer(invocation -> { if (Objects.nonNull(successLatch)) { successLatch.countDown(); + logger.info("Value fo latch {}", successLatch.getCount()); } return indexShard.getEngine(); }).when(shard).getEngine(); @@ -594,6 +670,31 @@ private void verifyUploadedSegments(RemoteSegmentStoreDirectory remoteSegmentSto } } } + assertTrue(remoteStoreRefreshListener.isRemoteSegmentStoreInSync()); + } + + public void testRemoteSegmentStoreNotInSync() throws IOException { + setup(true, 3); + remoteStoreRefreshListener.afterRefresh(true); + try (Store remoteStore = indexShard.remoteStore()) { + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = + (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) remoteStore.directory()).getDelegate()).getDelegate(); + verifyUploadedSegments(remoteSegmentStoreDirectory); + remoteStoreRefreshListener.isRemoteSegmentStoreInSync(); + boolean oneFileDeleted = false; + // Delete any one file from remote store + try (GatedCloseable<SegmentInfos> segmentInfosGatedCloseable = indexShard.getSegmentInfosSnapshot()) { + SegmentInfos segmentInfos = segmentInfosGatedCloseable.get(); + for (String file : segmentInfos.files(true)) { + if (oneFileDeleted == false && RemoteStoreRefreshListener.EXCLUDE_FILES.contains(file) == false) { + remoteSegmentStoreDirectory.deleteFile(file); + oneFileDeleted = true; + break; + } + } + } + assertFalse(remoteStoreRefreshListener.isRemoteSegmentStoreInSync()); + } } } diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationWithNodeToNodeIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationWithNodeToNodeIndexShardTests.java index f0950fe5392de..e541e988f3920 100644 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationWithNodeToNodeIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationWithNodeToNodeIndexShardTests.java @@ -110,7 +110,6 @@ public void testReplicaClosesWhileReplicating_AfterGetCheckpoint() throws Except IndexShard primary = shards.getPrimary(); final IndexShard replica = shards.getReplicas().get(0); - final int numDocs = shards.indexDocs(randomInt(10)); primary.refresh("Test"); final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); @@ -124,7 +123,6 @@ public void getCheckpointMetadata( ) { // trigger a cancellation by closing the replica. targetService.beforeIndexShardClosed(replica.shardId, replica, Settings.EMPTY); - resolveCheckpointInfoResponseListener(listener, primary); } @Override @@ -141,7 +139,6 @@ public void getSegmentFiles( }; when(sourceFactory.get(any())).thenReturn(source); startReplicationAndAssertCancellation(replica, primary, targetService); - shards.removeReplica(replica); closeShards(replica); } diff --git a/server/src/test/java/org/opensearch/index/shard/StoreRecoveryTests.java b/server/src/test/java/org/opensearch/index/shard/StoreRecoveryTests.java index c1a51bb780f61..846b975a9520e 100644 --- a/server/src/test/java/org/opensearch/index/shard/StoreRecoveryTests.java +++ b/server/src/test/java/org/opensearch/index/shard/StoreRecoveryTests.java @@ -270,6 +270,7 @@ public void testStatsDirWrapper() throws IOException { IOUtils.close(dir, target); } + @SuppressWarnings("removal") public boolean hardLinksSupported(Path path) throws IOException { try { Files.createFile(path.resolve("foo.bar")); diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index 2c6c4afed69fd..8b69c15dac9d3 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -8,6 +8,7 @@ package org.opensearch.index.store; +import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.CodecUtil; @@ -33,6 +34,8 @@ import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.remote.RemoteStoreUtils; import org.opensearch.index.shard.IndexShard; @@ -41,6 +44,8 @@ import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadataHandler; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.test.MockLogAppender; +import org.opensearch.test.junit.annotations.TestLogging; import org.opensearch.threadpool.ThreadPool; import org.junit.After; import org.junit.Before; @@ -58,6 +63,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; import org.mockito.Mockito; @@ -160,6 +166,7 @@ public void setup() throws IOException { when(threadPool.executor(ThreadPool.Names.REMOTE_PURGE)).thenReturn(executorService); when(threadPool.executor(ThreadPool.Names.REMOTE_RECOVERY)).thenReturn(executorService); + when(threadPool.executor(ThreadPool.Names.SAME)).thenReturn(executorService); } @After @@ -495,6 +502,75 @@ public void testIsAcquiredException() throws IOException { assertThrows(NoSuchFileException.class, () -> remoteSegmentStoreDirectory.isLockAcquired(testPrimaryTerm, testGeneration)); } + private List<String> getDummyMetadataFiles(int count) { + List<String> sortedMetadataFiles = new ArrayList<>(); + for (int counter = 0; counter < count; counter++) { + sortedMetadataFiles.add(RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(counter, 23, 34, 1, 1, "node-1")); + } + return sortedMetadataFiles; + } + + public void testGetMetadataFilesForActiveSegments() throws IOException { + populateMetadata(); + remoteSegmentStoreDirectory.init(); + + // scenario 1: if activeSegments([[0, 1, 2], 3(l), 4(l), 5(l), 6(l), 7(l), 8(l), 9]) => [9] + List<String> sortedMdFiles = getDummyMetadataFiles(10); + Set<String> lockedMdFiles = new HashSet<>(); + for (int idx = 3; idx <= 8; idx++) { + lockedMdFiles.add(sortedMdFiles.get(idx)); + } + Set<String> expectedMdFilesForActiveSegments = Set.of(sortedMdFiles.get(8)); + assertEquals( + "scenario 1 failed", + expectedMdFilesForActiveSegments, + remoteSegmentStoreDirectory.getMetadataFilesToFilterActiveSegments(3, sortedMdFiles, lockedMdFiles) + ); + + // scenario 2: if activeSegments([[0, 1, 2], 3, 4, 5, 6(l), 7(l), 8(l), 9]) => [2, 6, 8] + lockedMdFiles.clear(); + lockedMdFiles.add(sortedMdFiles.get(6)); + lockedMdFiles.add(sortedMdFiles.get(7)); + lockedMdFiles.add(sortedMdFiles.get(8)); + expectedMdFilesForActiveSegments = Set.of(sortedMdFiles.get(2), sortedMdFiles.get(6), sortedMdFiles.get(8)); + assertEquals( + "scenario 2 failed", + expectedMdFilesForActiveSegments, + remoteSegmentStoreDirectory.getMetadataFilesToFilterActiveSegments(3, sortedMdFiles, lockedMdFiles) + ); + + // scenario 3: if activeSegments([[0, 1, 2], 3, 4, 5(l), 6, 7(l), 8(l), 9]) => [3, 5, 7, 8] + lockedMdFiles.clear(); + lockedMdFiles.add(sortedMdFiles.get(5)); + lockedMdFiles.add(sortedMdFiles.get(7)); + lockedMdFiles.add(sortedMdFiles.get(8)); + expectedMdFilesForActiveSegments = Set.of(sortedMdFiles.get(2), sortedMdFiles.get(5), sortedMdFiles.get(7), sortedMdFiles.get(8)); + assertEquals( + "scenario 3 failed", + expectedMdFilesForActiveSegments, + remoteSegmentStoreDirectory.getMetadataFilesToFilterActiveSegments(3, sortedMdFiles, lockedMdFiles) + ); + + // scenario 3: if activeSegments([[0(l), 1(l), 2(l), 3(l), 4(l), 5(l), 6(l), 7(l), 8(l), 9(l)]) + lockedMdFiles.addAll(sortedMdFiles); + expectedMdFilesForActiveSegments = Set.of(); + assertEquals( + "scenario 4 failed", + expectedMdFilesForActiveSegments, + remoteSegmentStoreDirectory.getMetadataFilesToFilterActiveSegments(0, sortedMdFiles, lockedMdFiles) + ); + + // scenario 5: if (activeSegments([[0, 1, 2, 3]]) => [] + sortedMdFiles = sortedMdFiles.subList(0, 4); + lockedMdFiles.clear(); + expectedMdFilesForActiveSegments = Set.of(); + assertEquals( + "scenario 5 failed", + expectedMdFilesForActiveSegments, + remoteSegmentStoreDirectory.getMetadataFilesToFilterActiveSegments(4, sortedMdFiles, lockedMdFiles) + ); + } + public void testGetMetadataFileForCommit() throws IOException { long testPrimaryTerm = 2; long testGeneration = 3; @@ -507,7 +583,6 @@ public void testGetMetadataFileForCommit() throws IOException { String output = remoteSegmentStoreDirectory.getMetadataFileForCommit(testPrimaryTerm, testGeneration); assertEquals("metadata__" + testPrimaryTerm + "__" + testGeneration + "__pqr", output); - } public void testCopyFrom() throws IOException { @@ -612,6 +687,29 @@ public void onFailure(Exception e) { storeDirectory.close(); } + public void testCleanupAsync() throws Exception { + populateMetadata(); + RemoteSegmentStoreDirectoryFactory remoteSegmentStoreDirectoryFactory = mock(RemoteSegmentStoreDirectoryFactory.class); + RemoteSegmentStoreDirectory remoteSegmentDirectory = new RemoteSegmentStoreDirectory( + remoteDataDirectory, + remoteMetadataDirectory, + mdLockManager, + threadPool, + indexShard.shardId() + ); + when(remoteSegmentStoreDirectoryFactory.newDirectory(any(), any(), any())).thenReturn(remoteSegmentDirectory); + String repositoryName = "test-repository"; + String indexUUID = "test-idx-uuid"; + ShardId shardId = new ShardId(Index.UNKNOWN_INDEX_NAME, indexUUID, Integer.parseInt("0")); + + RemoteSegmentStoreDirectory.remoteDirectoryCleanup(remoteSegmentStoreDirectoryFactory, repositoryName, indexUUID, shardId); + verify(remoteSegmentStoreDirectoryFactory).newDirectory(repositoryName, indexUUID, shardId); + verify(threadPool, times(0)).executor(ThreadPool.Names.REMOTE_PURGE); + verify(remoteMetadataDirectory).delete(); + verify(remoteDataDirectory).delete(); + verify(mdLockManager).delete(); + } + public void testCopyFromException() throws IOException { String filename = "_100.si"; Directory storeDirectory = LuceneTestCase.newDirectory(); @@ -971,21 +1069,85 @@ public void testDeleteStaleCommitsWithinThreshold() throws Exception { verify(remoteMetadataDirectory, times(0)).openInput(any(String.class), eq(IOContext.DEFAULT)); } + @TestLogging(value = "_root:debug", reason = "Validate logging output") public void testDeleteStaleCommitsActualDelete() throws Exception { + try (final MockLogAppender appender = MockLogAppender.createForLoggers(LogManager.getRootLogger())) { + appender.addExpectation( + new MockLogAppender.PatternSeenWithLoggerPrefixExpectation( + "Metadata files to delete message", + "org.opensearch.index.store.RemoteSegmentStoreDirectory", + Level.DEBUG, + "metadataFilesEligibleToDelete=\\[" + metadataFilename3 + "\\] metadataFilesToBeDeleted=\\[" + metadataFilename3 + "\\]" + ) + ); + + final Map<String, Map<String, String>> metadataFilenameContentMapping = populateMetadata(); + final List<String> filesToBeDeleted = metadataFilenameContentMapping.get(metadataFilename3) + .values() + .stream() + .map(metadata -> metadata.split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1]) + .collect(Collectors.toList()); + + remoteSegmentStoreDirectory.init(); + + // popluateMetadata() adds stub to return 3 metadata files + // We are passing lastNMetadataFilesToKeep=2 here so that oldest 1 metadata file will be deleted + remoteSegmentStoreDirectory.deleteStaleSegmentsAsync(2); + + for (final String file : filesToBeDeleted) { + verify(remoteDataDirectory).deleteFile(file); + } + assertBusy(() -> assertThat(remoteSegmentStoreDirectory.canDeleteStaleCommits.get(), is(true))); + verify(remoteMetadataDirectory).deleteFile(metadataFilename3); + appender.assertAllExpectationsMatched(); + } + } + + public void testDeleteStaleCommitsActualDeleteWithLocks() throws Exception { Map<String, Map<String, String>> metadataFilenameContentMapping = populateMetadata(); remoteSegmentStoreDirectory.init(); + // Locking one of the metadata files to ensure that it is not getting deleted. + when(mdLockManager.fetchLockedMetadataFiles(any())).thenReturn(Set.of(metadataFilename2)); + // popluateMetadata() adds stub to return 3 metadata files // We are passing lastNMetadataFilesToKeep=2 here so that oldest 1 metadata file will be deleted - remoteSegmentStoreDirectory.deleteStaleSegmentsAsync(2); + remoteSegmentStoreDirectory.deleteStaleSegmentsAsync(1); for (String metadata : metadataFilenameContentMapping.get(metadataFilename3).values()) { String uploadedFilename = metadata.split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1]; verify(remoteDataDirectory).deleteFile(uploadedFilename); } - ; assertBusy(() -> assertThat(remoteSegmentStoreDirectory.canDeleteStaleCommits.get(), is(true))); verify(remoteMetadataDirectory).deleteFile(metadataFilename3); + verify(remoteMetadataDirectory, times(0)).deleteFile(metadataFilename2); + } + + public void testDeleteStaleCommitsNoDeletesDueToLocks() throws Exception { + remoteSegmentStoreDirectory.init(); + + // Locking all the old metadata files to ensure that none of the segment files are getting deleted. + when(mdLockManager.fetchLockedMetadataFiles(any())).thenReturn(Set.of(metadataFilename2, metadataFilename3)); + + // popluateMetadata() adds stub to return 3 metadata files + // We are passing lastNMetadataFilesToKeep=2 here so that oldest 1 metadata file will be deleted + remoteSegmentStoreDirectory.deleteStaleSegmentsAsync(1); + + assertBusy(() -> assertThat(remoteSegmentStoreDirectory.canDeleteStaleCommits.get(), is(true))); + verify(remoteMetadataDirectory, times(0)).deleteFile(any()); + } + + public void testDeleteStaleCommitsExceptionWhileFetchingLocks() throws Exception { + remoteSegmentStoreDirectory.init(); + + // Locking one of the metadata files to ensure that it is not getting deleted. + when(mdLockManager.fetchLockedMetadataFiles(any())).thenThrow(new RuntimeException("Rate limit exceeded")); + + // popluateMetadata() adds stub to return 3 metadata files + // We are passing lastNMetadataFilesToKeep=2 here so that oldest 1 metadata file will be deleted + remoteSegmentStoreDirectory.deleteStaleSegmentsAsync(1); + + verify(remoteMetadataDirectory, times(0)).deleteFile(any()); } public void testDeleteStaleCommitsDeleteDedup() throws Exception { diff --git a/server/src/test/java/org/opensearch/index/store/StoreTests.java b/server/src/test/java/org/opensearch/index/store/StoreTests.java index d7d326b325cc6..ab30a4c1c435f 100644 --- a/server/src/test/java/org/opensearch/index/store/StoreTests.java +++ b/server/src/test/java/org/opensearch/index/store/StoreTests.java @@ -87,7 +87,7 @@ import org.opensearch.index.shard.ShardPath; import org.opensearch.index.translog.Translog; import org.opensearch.indices.replication.common.ReplicationType; -import org.opensearch.indices.store.TransportNodesListShardStoreMetadata; +import org.opensearch.indices.store.TransportNodesListShardStoreMetadataHelper.StoreFilesMetadata; import org.opensearch.test.DummyShardLock; import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.IndexSettingsModule; @@ -980,12 +980,11 @@ public void testStreamStoreFilesMetadata() throws Exception { ) ); } - TransportNodesListShardStoreMetadata.StoreFilesMetadata outStoreFileMetadata = - new TransportNodesListShardStoreMetadata.StoreFilesMetadata( - new ShardId("test", "_na_", 0), - metadataSnapshot, - peerRecoveryRetentionLeases - ); + StoreFilesMetadata outStoreFileMetadata = new StoreFilesMetadata( + new ShardId("test", "_na_", 0), + metadataSnapshot, + peerRecoveryRetentionLeases + ); ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer); org.opensearch.Version targetNodeVersion = randomVersion(random()); @@ -994,8 +993,7 @@ public void testStreamStoreFilesMetadata() throws Exception { ByteArrayInputStream inBuffer = new ByteArrayInputStream(outBuffer.toByteArray()); InputStreamStreamInput in = new InputStreamStreamInput(inBuffer); in.setVersion(targetNodeVersion); - TransportNodesListShardStoreMetadata.StoreFilesMetadata inStoreFileMetadata = - new TransportNodesListShardStoreMetadata.StoreFilesMetadata(in); + StoreFilesMetadata inStoreFileMetadata = new StoreFilesMetadata(in); Iterator<StoreFileMetadata> outFiles = outStoreFileMetadata.iterator(); for (StoreFileMetadata inFile : inStoreFileMetadata) { assertThat(inFile.name(), equalTo(outFiles.next().name())); diff --git a/server/src/test/java/org/opensearch/index/store/lockmanager/RemoteStoreMetadataLockManagerTests.java b/server/src/test/java/org/opensearch/index/store/lockmanager/RemoteStoreMetadataLockManagerTests.java index b4eac2c4548d5..299100b65a43e 100644 --- a/server/src/test/java/org/opensearch/index/store/lockmanager/RemoteStoreMetadataLockManagerTests.java +++ b/server/src/test/java/org/opensearch/index/store/lockmanager/RemoteStoreMetadataLockManagerTests.java @@ -17,6 +17,7 @@ import java.io.IOException; import java.util.Arrays; import java.util.Collection; +import java.util.Set; import junit.framework.TestCase; @@ -96,4 +97,26 @@ public void testIsAcquiredExceptionCase() { // metadata file is not passed durin FileLockInfo testLockInfo = FileLockInfo.getLockInfoBuilder().withAcquirerId(testAcquirerId).build(); assertThrows(IllegalArgumentException.class, () -> remoteStoreMetadataLockManager.isAcquired(testLockInfo)); } + + public void testFetchLocksEmpty() throws IOException { + when(lockDirectory.listFilesByPrefix("metadata")).thenReturn(Set.of()); + assertEquals(0, remoteStoreMetadataLockManager.fetchLockedMetadataFiles("metadata").size()); + } + + public void testFetchLocksNonEmpty() throws IOException { + String metadata1 = "metadata_1_2_3"; + String metadata2 = "metadata_4_5_6"; + when(lockDirectory.listFilesByPrefix("metadata")).thenReturn( + Set.of( + FileLockInfo.LockFileUtils.generateLockName(metadata1, "snapshot1"), + FileLockInfo.LockFileUtils.generateLockName(metadata2, "snapshot2") + ) + ); + assertEquals(Set.of(metadata1, metadata2), remoteStoreMetadataLockManager.fetchLockedMetadataFiles("metadata")); + } + + public void testFetchLocksException() throws IOException { + when(lockDirectory.listFilesByPrefix("metadata")).thenThrow(new IOException("Something went wrong")); + assertThrows(IOException.class, () -> remoteStoreMetadataLockManager.fetchLockedMetadataFiles("metadata")); + } } diff --git a/server/src/test/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInputTests.java b/server/src/test/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInputTests.java index 2204124f1de4f..a135802c5f49c 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInputTests.java +++ b/server/src/test/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInputTests.java @@ -78,11 +78,31 @@ public void test4MBBlock() throws Exception { runAllTestsFor(22); } - public void testChunkedRepository() throws IOException { - final long blockSize = new ByteSizeValue(1, ByteSizeUnit.KB).getBytes(); - final long repositoryChunkSize = new ByteSizeValue(2, ByteSizeUnit.KB).getBytes(); - final long fileSize = new ByteSizeValue(3, ByteSizeUnit.KB).getBytes(); + public void testChunkedRepositoryWithBlockSizeGreaterThanChunkSize() throws IOException { + verifyChunkedRepository( + new ByteSizeValue(8, ByteSizeUnit.KB).getBytes(), // block Size + new ByteSizeValue(2, ByteSizeUnit.KB).getBytes(), // repository chunk size + new ByteSizeValue(15, ByteSizeUnit.KB).getBytes() // file size + ); + } + + public void testChunkedRepositoryWithBlockSizeLessThanChunkSize() throws IOException { + verifyChunkedRepository( + new ByteSizeValue(1, ByteSizeUnit.KB).getBytes(), // block Size + new ByteSizeValue(2, ByteSizeUnit.KB).getBytes(), // repository chunk size + new ByteSizeValue(3, ByteSizeUnit.KB).getBytes() // file size + ); + } + + public void testChunkedRepositoryWithBlockSizeEqualToChunkSize() throws IOException { + verifyChunkedRepository( + new ByteSizeValue(2, ByteSizeUnit.KB).getBytes(), // block Size + new ByteSizeValue(2, ByteSizeUnit.KB).getBytes(), // repository chunk size + new ByteSizeValue(15, ByteSizeUnit.KB).getBytes() // file size + ); + } + private void verifyChunkedRepository(long blockSize, long repositoryChunkSize, long fileSize) throws IOException { when(transferManager.fetchBlob(any())).thenReturn(new ByteArrayIndexInput("test", new byte[(int) blockSize])); try ( FSDirectory directory = new MMapDirectory(path, lockFactory); @@ -105,8 +125,9 @@ public void testChunkedRepository() throws IOException { // Seek to the position past the first repository chunk indexInput.seek(repositoryChunkSize); } - // Verify the second chunk is requested (i.e. ".part1") - verify(transferManager).fetchBlob(argThat(request -> request.getBlobName().equals("File_Name.part1"))); + + // Verify all the chunks related to block are added to the fetchBlob request + verify(transferManager).fetchBlob(argThat(request -> request.getBlobLength() == blockSize)); } private void runAllTestsFor(int blockSizeShift) throws Exception { @@ -115,6 +136,7 @@ private void runAllTestsFor(int blockSizeShift) throws Exception { TestGroup.testGetBlock(blockedSnapshotFile, blockSize, FILE_SIZE); TestGroup.testGetBlockOffset(blockedSnapshotFile, blockSize, FILE_SIZE); TestGroup.testGetBlockStart(blockedSnapshotFile, blockSize); + TestGroup.testGetBlobParts(blockedSnapshotFile); TestGroup.testCurrentBlockStart(blockedSnapshotFile, blockSize); TestGroup.testCurrentBlockPosition(blockedSnapshotFile, blockSize); TestGroup.testClone(blockedSnapshotFile, blockSize); @@ -252,6 +274,35 @@ public static void testGetBlockStart(OnDemandBlockSnapshotIndexInput blockedSnap assertEquals(blockSize * 2, blockedSnapshotFile.getBlockStart(2)); } + public static void testGetBlobParts(OnDemandBlockSnapshotIndexInput blockedSnapshotFile) { + // block id 0 + int blockId = 0; + long blockStart = blockedSnapshotFile.getBlockStart(blockId); + long blockEnd = blockStart + blockedSnapshotFile.getActualBlockSize(blockId); + assertEquals( + (blockEnd - blockStart), + blockedSnapshotFile.getBlobParts(blockStart, blockEnd).stream().mapToLong(o -> o.getLength()).sum() + ); + + // block 1 + blockId = 1; + blockStart = blockedSnapshotFile.getBlockStart(blockId); + blockEnd = blockStart + blockedSnapshotFile.getActualBlockSize(blockId); + assertEquals( + (blockEnd - blockStart), + blockedSnapshotFile.getBlobParts(blockStart, blockEnd).stream().mapToLong(o -> o.getLength()).sum() + ); + + // block 2 + blockId = 2; + blockStart = blockedSnapshotFile.getBlockStart(blockId); + blockEnd = blockStart + blockedSnapshotFile.getActualBlockSize(blockId); + assertEquals( + (blockEnd - blockStart), + blockedSnapshotFile.getBlobParts(blockStart, blockEnd).stream().mapToLong(o -> o.getLength()).sum() + ); + } + public static void testCurrentBlockStart(OnDemandBlockSnapshotIndexInput blockedSnapshotFile, int blockSize) throws IOException { // block 0 blockedSnapshotFile.seek(blockSize - 1); diff --git a/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheCleanerTests.java b/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheCleanerTests.java index 04434fa52e555..e2a6a4011a6b7 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheCleanerTests.java +++ b/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheCleanerTests.java @@ -18,7 +18,6 @@ import org.opensearch.env.NodeEnvironment; import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.ShardPath; -import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.test.OpenSearchTestCase; import org.hamcrest.MatcherAssert; import org.junit.After; @@ -59,7 +58,7 @@ public class FileCacheCleanerTests extends OpenSearchTestCase { @Before public void setUpFileCache() throws IOException { env = newNodeEnvironment(SETTINGS); - cleaner = new FileCacheCleaner(env, fileCache); + cleaner = new FileCacheCleaner(() -> fileCache); files.put(SHARD_0, addFile(fileCache, env, SHARD_0)); files.put(SHARD_1, addFile(fileCache, env, SHARD_1)); MatcherAssert.assertThat(fileCache.size(), equalTo(2L)); @@ -103,12 +102,11 @@ public void testShardRemoved() { final Path cachePath = ShardPath.loadFileCachePath(env, SHARD_0).getDataPath(); assertTrue(Files.exists(cachePath)); - cleaner.beforeIndexShardDeleted(SHARD_0, SETTINGS); + cleaner.beforeShardPathDeleted(SHARD_0, INDEX_SETTINGS, env); MatcherAssert.assertThat(fileCache.size(), equalTo(1L)); assertNull(fileCache.get(files.get(SHARD_0))); assertFalse(Files.exists(files.get(SHARD_0))); assertTrue(Files.exists(files.get(SHARD_1))); - cleaner.afterIndexShardDeleted(SHARD_0, SETTINGS); assertFalse(Files.exists(cachePath)); } @@ -116,15 +114,9 @@ public void testIndexRemoved() { final Path indexCachePath = env.fileCacheNodePath().fileCachePath.resolve(SHARD_0.getIndex().getUUID()); assertTrue(Files.exists(indexCachePath)); - cleaner.beforeIndexShardDeleted(SHARD_0, SETTINGS); - cleaner.afterIndexShardDeleted(SHARD_0, SETTINGS); - cleaner.beforeIndexShardDeleted(SHARD_1, SETTINGS); - cleaner.afterIndexShardDeleted(SHARD_1, SETTINGS); - cleaner.afterIndexRemoved( - SHARD_0.getIndex(), - INDEX_SETTINGS, - IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.DELETED - ); + cleaner.beforeShardPathDeleted(SHARD_0, INDEX_SETTINGS, env); + cleaner.beforeShardPathDeleted(SHARD_1, INDEX_SETTINGS, env); + cleaner.beforeIndexPathDeleted(SHARD_0.getIndex(), INDEX_SETTINGS, env); MatcherAssert.assertThat(fileCache.size(), equalTo(0L)); assertFalse(Files.exists(indexCachePath)); } diff --git a/server/src/test/java/org/opensearch/index/store/remote/utils/TransferManagerTests.java b/server/src/test/java/org/opensearch/index/store/remote/utils/TransferManagerTests.java index d42e614302658..7ae3944eb6944 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/utils/TransferManagerTests.java +++ b/server/src/test/java/org/opensearch/index/store/remote/utils/TransferManagerTests.java @@ -163,17 +163,11 @@ public void testUsageExceedsCapacity() throws Exception { public void testDownloadFails() throws Exception { doThrow(new IOException("Expected test exception")).when(blobContainer).readBlob(eq("failure-blob"), anyLong(), anyLong()); + List<BlobFetchRequest.BlobPart> blobParts = new ArrayList<>(); + blobParts.add(new BlobFetchRequest.BlobPart("failure-blob", 0, EIGHT_MB)); expectThrows( IOException.class, - () -> transferManager.fetchBlob( - BlobFetchRequest.builder() - .blobName("failure-blob") - .position(0) - .fileName("file") - .directory(directory) - .length(EIGHT_MB) - .build() - ) + () -> transferManager.fetchBlob(BlobFetchRequest.builder().fileName("file").directory(directory).blobParts(blobParts).build()) ); MatcherAssert.assertThat(fileCache.usage().activeUsage(), equalTo(0L)); MatcherAssert.assertThat(fileCache.usage().usage(), equalTo(0L)); @@ -187,16 +181,13 @@ public void testFetchesToDifferentBlobsDoNotBlockOnEachOther() throws Exception latch.await(); return new ByteArrayInputStream(createData()); }).when(blobContainer).readBlob(eq("blocking-blob"), anyLong(), anyLong()); + List<BlobFetchRequest.BlobPart> blobParts = new ArrayList<>(); + blobParts.add(new BlobFetchRequest.BlobPart("blocking-blob", 0, EIGHT_MB)); + final Thread blockingThread = new Thread(() -> { try { transferManager.fetchBlob( - BlobFetchRequest.builder() - .blobName("blocking-blob") - .position(0) - .fileName("blocking-file") - .directory(directory) - .length(EIGHT_MB) - .build() + BlobFetchRequest.builder().fileName("blocking-file").directory(directory).blobParts(blobParts).build() ); } catch (IOException e) { throw new RuntimeException(e); @@ -216,9 +207,9 @@ public void testFetchesToDifferentBlobsDoNotBlockOnEachOther() throws Exception } private IndexInput fetchBlobWithName(String blobname) throws IOException { - return transferManager.fetchBlob( - BlobFetchRequest.builder().blobName("blob").position(0).fileName(blobname).directory(directory).length(EIGHT_MB).build() - ); + List<BlobFetchRequest.BlobPart> blobParts = new ArrayList<>(); + blobParts.add(new BlobFetchRequest.BlobPart("blob", 0, EIGHT_MB)); + return transferManager.fetchBlob(BlobFetchRequest.builder().fileName(blobname).directory(directory).blobParts(blobParts).build()); } private static void assertIndexInputIsFunctional(IndexInput indexInput) throws IOException { diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/FileTransferTrackerTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/FileTransferTrackerTests.java index b96ada1f6bbff..9665b8e6fd646 100644 --- a/server/src/test/java/org/opensearch/index/translog/transfer/FileTransferTrackerTests.java +++ b/server/src/test/java/org/opensearch/index/translog/transfer/FileTransferTrackerTests.java @@ -20,6 +20,10 @@ import java.util.List; import java.util.Set; +import static org.mockito.Mockito.anyLong; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.spy; + public class FileTransferTrackerTests extends OpenSearchTestCase { protected final ShardId shardId = new ShardId("index", "_na_", 1); @@ -94,6 +98,32 @@ public void testOnFailure() throws IOException { } } + public void testOnSuccessStatsFailure() throws IOException { + RemoteTranslogTransferTracker localRemoteTranslogTransferTracker = spy(remoteTranslogTransferTracker); + doAnswer((count) -> { throw new NullPointerException("Error while updating stats"); }).when(localRemoteTranslogTransferTracker) + .addUploadBytesSucceeded(anyLong()); + + FileTransferTracker localFileTransferTracker = new FileTransferTracker(shardId, localRemoteTranslogTransferTracker); + + Path testFile = createTempFile(); + int fileSize = 128; + Files.write(testFile, randomByteArrayOfLength(fileSize), StandardOpenOption.APPEND); + try ( + FileSnapshot.TransferFileSnapshot transferFileSnapshot = new FileSnapshot.TransferFileSnapshot( + testFile, + randomNonNegativeLong(), + null + ); + ) { + Set<FileSnapshot.TransferFileSnapshot> toUpload = new HashSet<>(2); + toUpload.add(transferFileSnapshot); + localFileTransferTracker.recordBytesForFiles(toUpload); + localRemoteTranslogTransferTracker.addUploadBytesStarted(fileSize); + localFileTransferTracker.onSuccess(transferFileSnapshot); + assertEquals(localFileTransferTracker.allUploaded().size(), 1); + } + } + public void testUploaded() throws IOException { Path testFile = createTempFile(); int fileSize = 128; diff --git a/server/src/test/java/org/opensearch/indices/IRCKeyWriteableSerializerTests.java b/server/src/test/java/org/opensearch/indices/IRCKeyWriteableSerializerTests.java new file mode 100644 index 0000000000000..af657dadd7a1a --- /dev/null +++ b/server/src/test/java/org/opensearch/indices/IRCKeyWriteableSerializerTests.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.indices; + +import org.opensearch.common.Randomness; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.IndexService; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.test.OpenSearchSingleNodeTestCase; + +import java.util.Random; +import java.util.UUID; + +public class IRCKeyWriteableSerializerTests extends OpenSearchSingleNodeTestCase { + + public void testSerializer() throws Exception { + IndexService indexService = createIndex("test"); + IndexShard indexShard = indexService.getShardOrNull(0); + IRCKeyWriteableSerializer ser = new IRCKeyWriteableSerializer(); + + int NUM_KEYS = 1000; + int[] valueLengths = new int[] { 1000, 6000 }; // test both branches in equals() + Random rand = Randomness.get(); + for (int valueLength : valueLengths) { + for (int i = 0; i < NUM_KEYS; i++) { + IndicesRequestCache.Key key = getRandomIRCKey(valueLength, rand, indexShard.shardId()); + byte[] serialized = ser.serialize(key); + assertTrue(ser.equals(key, serialized)); + IndicesRequestCache.Key deserialized = ser.deserialize(serialized); + assertTrue(key.equals(deserialized)); + } + } + } + + private IndicesRequestCache.Key getRandomIRCKey(int valueLength, Random random, ShardId shard) { + byte[] value = new byte[valueLength]; + for (int i = 0; i < valueLength; i++) { + value[i] = (byte) (random.nextInt(126 - 32) + 32); + } + BytesReference keyValue = new BytesArray(value); + return new IndicesRequestCache.Key(shard, keyValue, UUID.randomUUID().toString()); // same UUID source as used in real key + } +} diff --git a/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java b/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java index 73728aec12e51..594b9aac971b7 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java @@ -46,9 +46,14 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.opensearch.common.CheckedSupplier; +import org.opensearch.common.cache.RemovalNotification; +import org.opensearch.common.cache.RemovalReason; +import org.opensearch.common.cache.module.CacheModule; +import org.opensearch.common.cache.service.CacheService; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.common.bytes.AbstractBytesReference; import org.opensearch.core.common.bytes.BytesReference; @@ -64,23 +69,34 @@ import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; +import org.opensearch.node.Node; import org.opensearch.test.OpenSearchSingleNodeTestCase; +import org.opensearch.threadpool.ThreadPool; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Optional; import java.util.UUID; +import java.util.concurrent.atomic.AtomicInteger; +import static org.opensearch.indices.IndicesRequestCache.INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class IndicesRequestCacheTests extends OpenSearchSingleNodeTestCase { + private ThreadPool getThreadPool() { + return new ThreadPool(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "default tracer tests").build()); + } public void testBasicOperationsCache() throws Exception { IndexShard indexShard = createIndex("test").getShard(0); + ThreadPool threadPool = getThreadPool(); IndicesRequestCache cache = new IndicesRequestCache( Settings.EMPTY, - (shardId -> Optional.of(new IndicesService.IndexShardCacheEntity(indexShard))) + (shardId -> Optional.of(new IndicesService.IndexShardCacheEntity(indexShard))), + new CacheModule(new ArrayList<>(), Settings.EMPTY).getCacheService(), + threadPool ); Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); @@ -123,7 +139,7 @@ public void testBasicOperationsCache() throws Exception { indexShard.close("test", true, true); // closed shard but reader is still open cache.clear(entity); } - cache.cleanCache(); + cache.cacheCleanupManager.cleanCache(); assertEquals(1, requestCacheStats.stats().getHitCount()); assertEquals(1, requestCacheStats.stats().getMissCount()); assertEquals(0, requestCacheStats.stats().getEvictions()); @@ -132,12 +148,78 @@ public void testBasicOperationsCache() throws Exception { assertEquals(0, requestCacheStats.stats().getMemorySize().bytesAsInt()); IOUtils.close(reader, writer, dir, cache); + terminate(threadPool); + assertEquals(0, cache.numRegisteredCloseListeners()); + } + + public void testBasicOperationsCacheWithFeatureFlag() throws Exception { + IndexShard indexShard = createIndex("test").getShard(0); + CacheService cacheService = new CacheModule(new ArrayList<>(), Settings.EMPTY).getCacheService(); + ThreadPool threadPool = getThreadPool(); + IndicesRequestCache cache = new IndicesRequestCache( + Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.PLUGGABLE_CACHE, "true").build(), + (shardId -> Optional.of(new IndicesService.IndexShardCacheEntity(indexShard))), + cacheService, + threadPool + ); + Directory dir = newDirectory(); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); + + writer.addDocument(newDoc(0, "foo")); + DirectoryReader reader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); + TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); + BytesReference termBytes = XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); + + // initial cache + IndicesService.IndexShardCacheEntity entity = new IndicesService.IndexShardCacheEntity(indexShard); + Loader loader = new Loader(reader, 0); + BytesReference value = cache.getOrCompute(entity, loader, reader, termBytes); + assertEquals("foo", value.streamInput().readString()); + ShardRequestCache requestCacheStats = indexShard.requestCache(); + assertEquals(0, requestCacheStats.stats().getHitCount()); + assertEquals(1, requestCacheStats.stats().getMissCount()); + assertEquals(0, requestCacheStats.stats().getEvictions()); + assertFalse(loader.loadedFromCache); + assertEquals(1, cache.count()); + + // cache hit + entity = new IndicesService.IndexShardCacheEntity(indexShard); + loader = new Loader(reader, 0); + value = cache.getOrCompute(entity, loader, reader, termBytes); + assertEquals("foo", value.streamInput().readString()); + requestCacheStats = indexShard.requestCache(); + assertEquals(1, requestCacheStats.stats().getHitCount()); + assertEquals(1, requestCacheStats.stats().getMissCount()); + assertEquals(0, requestCacheStats.stats().getEvictions()); + assertTrue(loader.loadedFromCache); + assertEquals(1, cache.count()); + assertTrue(requestCacheStats.stats().getMemorySize().bytesAsInt() > value.length()); + assertEquals(1, cache.numRegisteredCloseListeners()); + + // Closing the cache doesn't modify an already returned CacheEntity + if (randomBoolean()) { + reader.close(); + } else { + indexShard.close("test", true, true); // closed shard but reader is still open + cache.clear(entity); + } + cache.cacheCleanupManager.cleanCache(); + assertEquals(1, requestCacheStats.stats().getHitCount()); + assertEquals(1, requestCacheStats.stats().getMissCount()); + assertEquals(0, requestCacheStats.stats().getEvictions()); + assertTrue(loader.loadedFromCache); + assertEquals(0, cache.count()); + assertEquals(0, requestCacheStats.stats().getMemorySize().bytesAsInt()); + + IOUtils.close(reader, writer, dir, cache); + terminate(threadPool); assertEquals(0, cache.numRegisteredCloseListeners()); } public void testCacheDifferentReaders() throws Exception { IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndexShard indexShard = createIndex("test").getShard(0); + ThreadPool threadPool = getThreadPool(); IndicesRequestCache cache = new IndicesRequestCache(Settings.EMPTY, (shardId -> { IndexService indexService = null; try { @@ -146,7 +228,7 @@ public void testCacheDifferentReaders() throws Exception { return Optional.empty(); } return Optional.of(new IndicesService.IndexShardCacheEntity(indexService.getShard(shardId.id()))); - })); + }), new CacheModule(new ArrayList<>(), Settings.EMPTY).getCacheService(), threadPool); Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); @@ -215,7 +297,7 @@ public void testCacheDifferentReaders() throws Exception { // Closing the cache doesn't change returned entities reader.close(); - cache.cleanCache(); + cache.cacheCleanupManager.cleanCache(); assertEquals(2, requestCacheStats.stats().getMissCount()); assertEquals(0, requestCacheStats.stats().getEvictions()); assertTrue(loader.loadedFromCache); @@ -230,7 +312,7 @@ public void testCacheDifferentReaders() throws Exception { indexShard.close("test", true, true); // closed shard but reader is still open cache.clear(secondEntity); } - cache.cleanCache(); + cache.cacheCleanupManager.cleanCache(); assertEquals(2, requestCacheStats.stats().getMissCount()); assertEquals(0, requestCacheStats.stats().getEvictions()); assertTrue(loader.loadedFromCache); @@ -238,16 +320,436 @@ public void testCacheDifferentReaders() throws Exception { assertEquals(0, requestCacheStats.stats().getMemorySize().bytesAsInt()); IOUtils.close(secondReader, writer, dir, cache); + terminate(threadPool); assertEquals(0, cache.numRegisteredCloseListeners()); } + public void testCacheCleanupThresholdSettingValidator_Valid_Percentage() { + String s = IndicesRequestCache.validateStalenessSetting("50%"); + assertEquals("50%", s); + } + + public void testCacheCleanupThresholdSettingValidator_Valid_Double() { + String s = IndicesRequestCache.validateStalenessSetting("0.5"); + assertEquals("0.5", s); + } + + public void testCacheCleanupThresholdSettingValidator_Valid_DecimalPercentage() { + String s = IndicesRequestCache.validateStalenessSetting("0.5%"); + assertEquals("0.5%", s); + } + + public void testCacheCleanupThresholdSettingValidator_InValid_MB() { + assertThrows(IllegalArgumentException.class, () -> { IndicesRequestCache.validateStalenessSetting("50mb"); }); + } + + public void testCacheCleanupThresholdSettingValidator_Invalid_Percentage() { + assertThrows(IllegalArgumentException.class, () -> { IndicesRequestCache.validateStalenessSetting("500%"); }); + } + + public void testCacheCleanupBasedOnZeroThreshold() throws Exception { + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexShard indexShard = createIndex("test").getShard(0); + ThreadPool threadPool = getThreadPool(); + Settings settings = Settings.builder().put(INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING.getKey(), "0%").build(); + IndicesRequestCache cache = new IndicesRequestCache(settings, (shardId -> { + IndexService indexService = null; + try { + indexService = indicesService.indexServiceSafe(shardId.getIndex()); + } catch (IndexNotFoundException ex) { + return Optional.empty(); + } + return Optional.of(new IndicesService.IndexShardCacheEntity(indexService.getShard(shardId.id()))); + }), new CacheModule(new ArrayList<>(), settings).getCacheService(), threadPool); + Directory dir = newDirectory(); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); + + writer.addDocument(newDoc(0, "foo")); + DirectoryReader reader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); + TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); + BytesReference termBytes = XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); + if (randomBoolean()) { + writer.flush(); + IOUtils.close(writer); + writer = new IndexWriter(dir, newIndexWriterConfig()); + } + writer.updateDocument(new Term("id", "0"), newDoc(0, "bar")); + DirectoryReader secondReader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); + + // Get 2 entries into the cache + IndicesService.IndexShardCacheEntity entity = new IndicesService.IndexShardCacheEntity(indexShard); + Loader loader = new Loader(reader, 0); + cache.getOrCompute(entity, loader, reader, termBytes); + + entity = new IndicesService.IndexShardCacheEntity(indexShard); + loader = new Loader(reader, 0); + cache.getOrCompute(entity, loader, reader, termBytes); + + IndicesService.IndexShardCacheEntity secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); + loader = new Loader(secondReader, 0); + cache.getOrCompute(entity, loader, secondReader, termBytes); + + secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); + loader = new Loader(secondReader, 0); + cache.getOrCompute(secondEntity, loader, secondReader, termBytes); + assertEquals(2, cache.count()); + + // Close the reader, to be enqueued for cleanup + // 1 out of 2 keys ie 50% are now stale. + reader.close(); + // cache count should not be affected + assertEquals(2, cache.count()); + // clean cache with 0% staleness threshold + cache.cacheCleanupManager.cleanCache(); + // cleanup should remove the stale-key + assertEquals(1, cache.count()); + + IOUtils.close(secondReader, writer, dir, cache); + terminate(threadPool); + } + + public void testCacheCleanupBasedOnStaleThreshold_StalenessEqualToThreshold() throws Exception { + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexShard indexShard = createIndex("test").getShard(0); + ThreadPool threadPool = getThreadPool(); + Settings settings = Settings.builder().put(INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING.getKey(), "0.5").build(); + IndicesRequestCache cache = new IndicesRequestCache(settings, (shardId -> { + IndexService indexService = null; + try { + indexService = indicesService.indexServiceSafe(shardId.getIndex()); + } catch (IndexNotFoundException ex) { + return Optional.empty(); + } + return Optional.of(new IndicesService.IndexShardCacheEntity(indexService.getShard(shardId.id()))); + }), new CacheModule(new ArrayList<>(), settings).getCacheService(), threadPool); + Directory dir = newDirectory(); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); + + writer.addDocument(newDoc(0, "foo")); + DirectoryReader reader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); + TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); + BytesReference termBytes = XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); + if (randomBoolean()) { + writer.flush(); + IOUtils.close(writer); + writer = new IndexWriter(dir, newIndexWriterConfig()); + } + writer.updateDocument(new Term("id", "0"), newDoc(0, "bar")); + DirectoryReader secondReader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); + + // Get 2 entries into the cache + IndicesService.IndexShardCacheEntity entity = new IndicesService.IndexShardCacheEntity(indexShard); + Loader loader = new Loader(reader, 0); + cache.getOrCompute(entity, loader, reader, termBytes); + + entity = new IndicesService.IndexShardCacheEntity(indexShard); + loader = new Loader(reader, 0); + cache.getOrCompute(entity, loader, reader, termBytes); + + IndicesService.IndexShardCacheEntity secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); + loader = new Loader(secondReader, 0); + cache.getOrCompute(entity, loader, secondReader, termBytes); + + secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); + loader = new Loader(secondReader, 0); + cache.getOrCompute(secondEntity, loader, secondReader, termBytes); + assertEquals(2, cache.count()); + + // Close the reader, to be enqueued for cleanup + // 1 out of 2 keys ie 50% are now stale. + reader.close(); + // cache count should not be affected + assertEquals(2, cache.count()); + + // clean cache with 50% staleness threshold + cache.cacheCleanupManager.cleanCache(); + // cleanup should have taken effect + assertEquals(1, cache.count()); + + IOUtils.close(secondReader, writer, dir, cache); + terminate(threadPool); + } + + public void testStaleCount_OnRemovalNotificationOfStaleKey_DecrementsStaleCount() throws Exception { + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexShard indexShard = createIndex("test").getShard(0); + ThreadPool threadPool = getThreadPool(); + Settings settings = Settings.builder().put(INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING.getKey(), "0.51").build(); + IndicesRequestCache cache = new IndicesRequestCache(settings, (shardId -> { + IndexService indexService = null; + try { + indexService = indicesService.indexServiceSafe(shardId.getIndex()); + } catch (IndexNotFoundException ex) { + return Optional.empty(); + } + return Optional.of(new IndicesService.IndexShardCacheEntity(indexService.getShard(shardId.id()))); + }), new CacheModule(new ArrayList<>(), settings).getCacheService(), threadPool); + Directory dir = newDirectory(); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); + + writer.addDocument(newDoc(0, "foo")); + DirectoryReader reader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); + TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); + BytesReference termBytes = XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); + if (randomBoolean()) { + writer.flush(); + IOUtils.close(writer); + writer = new IndexWriter(dir, newIndexWriterConfig()); + } + writer.updateDocument(new Term("id", "0"), newDoc(0, "bar")); + DirectoryReader secondReader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); + + // Get 2 entries into the cache + IndicesService.IndexShardCacheEntity entity = new IndicesService.IndexShardCacheEntity(indexShard); + Loader loader = new Loader(reader, 0); + cache.getOrCompute(entity, loader, reader, termBytes); + + entity = new IndicesService.IndexShardCacheEntity(indexShard); + loader = new Loader(reader, 0); + cache.getOrCompute(entity, loader, reader, termBytes); + + IndicesService.IndexShardCacheEntity secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); + loader = new Loader(secondReader, 0); + cache.getOrCompute(entity, loader, secondReader, termBytes); + + secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); + loader = new Loader(secondReader, 0); + cache.getOrCompute(secondEntity, loader, secondReader, termBytes); + assertEquals(2, cache.count()); + + // Close the reader, to be enqueued for cleanup + reader.close(); + AtomicInteger staleKeysCount = cache.cacheCleanupManager.getStaleKeysCount(); + // 1 out of 2 keys ie 50% are now stale. + assertEquals(1, staleKeysCount.get()); + // cache count should not be affected + assertEquals(2, cache.count()); + + OpenSearchDirectoryReader.DelegatingCacheHelper delegatingCacheHelper = + (OpenSearchDirectoryReader.DelegatingCacheHelper) secondReader.getReaderCacheHelper(); + String readerCacheKeyId = delegatingCacheHelper.getDelegatingCacheKey().getId(); + IndicesRequestCache.Key key = new IndicesRequestCache.Key( + ((IndexShard) secondEntity.getCacheIdentity()).shardId(), + termBytes, + readerCacheKeyId + ); + + cache.onRemoval(new RemovalNotification<IndicesRequestCache.Key, BytesReference>(key, termBytes, RemovalReason.EVICTED)); + staleKeysCount = cache.cacheCleanupManager.getStaleKeysCount(); + // eviction of previous stale key from the cache should decrement staleKeysCount in iRC + assertEquals(0, staleKeysCount.get()); + + IOUtils.close(secondReader, writer, dir, cache); + terminate(threadPool); + } + + public void testStaleCount_OnRemovalNotificationOfStaleKey_DoesNotDecrementsStaleCount() throws Exception { + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexShard indexShard = createIndex("test").getShard(0); + ThreadPool threadPool = getThreadPool(); + Settings settings = Settings.builder().put(INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING.getKey(), "0.51").build(); + IndicesRequestCache cache = new IndicesRequestCache(settings, (shardId -> { + IndexService indexService = null; + try { + indexService = indicesService.indexServiceSafe(shardId.getIndex()); + } catch (IndexNotFoundException ex) { + return Optional.empty(); + } + return Optional.of(new IndicesService.IndexShardCacheEntity(indexService.getShard(shardId.id()))); + }), new CacheModule(new ArrayList<>(), settings).getCacheService(), threadPool); + Directory dir = newDirectory(); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); + + writer.addDocument(newDoc(0, "foo")); + DirectoryReader reader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); + TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); + BytesReference termBytes = XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); + if (randomBoolean()) { + writer.flush(); + IOUtils.close(writer); + writer = new IndexWriter(dir, newIndexWriterConfig()); + } + writer.updateDocument(new Term("id", "0"), newDoc(0, "bar")); + DirectoryReader secondReader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); + + // Get 2 entries into the cache + IndicesService.IndexShardCacheEntity entity = new IndicesService.IndexShardCacheEntity(indexShard); + Loader loader = new Loader(reader, 0); + cache.getOrCompute(entity, loader, reader, termBytes); + + entity = new IndicesService.IndexShardCacheEntity(indexShard); + loader = new Loader(reader, 0); + cache.getOrCompute(entity, loader, reader, termBytes); + + IndicesService.IndexShardCacheEntity secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); + loader = new Loader(secondReader, 0); + cache.getOrCompute(entity, loader, secondReader, termBytes); + + secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); + loader = new Loader(secondReader, 0); + cache.getOrCompute(secondEntity, loader, secondReader, termBytes); + assertEquals(2, cache.count()); + + // Close the reader, to be enqueued for cleanup + reader.close(); + AtomicInteger staleKeysCount = cache.cacheCleanupManager.getStaleKeysCount(); + // 1 out of 2 keys ie 50% are now stale. + assertEquals(1, staleKeysCount.get()); + // cache count should not be affected + assertEquals(2, cache.count()); + + OpenSearchDirectoryReader.DelegatingCacheHelper delegatingCacheHelper = (OpenSearchDirectoryReader.DelegatingCacheHelper) reader + .getReaderCacheHelper(); + String readerCacheKeyId = delegatingCacheHelper.getDelegatingCacheKey().getId(); + IndicesRequestCache.Key key = new IndicesRequestCache.Key( + ((IndexShard) secondEntity.getCacheIdentity()).shardId(), + termBytes, + readerCacheKeyId + ); + + cache.onRemoval(new RemovalNotification<IndicesRequestCache.Key, BytesReference>(key, termBytes, RemovalReason.EVICTED)); + staleKeysCount = cache.cacheCleanupManager.getStaleKeysCount(); + // eviction of NON-stale key from the cache should NOT decrement staleKeysCount in iRC + assertEquals(1, staleKeysCount.get()); + + IOUtils.close(secondReader, writer, dir, cache); + terminate(threadPool); + } + + public void testCacheCleanupBasedOnStaleThreshold_StalenessGreaterThanThreshold() throws Exception { + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexShard indexShard = createIndex("test").getShard(0); + ThreadPool threadPool = getThreadPool(); + Settings settings = Settings.builder().put(INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING.getKey(), "0.49").build(); + IndicesRequestCache cache = new IndicesRequestCache(settings, (shardId -> { + IndexService indexService = null; + try { + indexService = indicesService.indexServiceSafe(shardId.getIndex()); + } catch (IndexNotFoundException ex) { + return Optional.empty(); + } + return Optional.of(new IndicesService.IndexShardCacheEntity(indexService.getShard(shardId.id()))); + }), new CacheModule(new ArrayList<>(), settings).getCacheService(), threadPool); + Directory dir = newDirectory(); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); + + writer.addDocument(newDoc(0, "foo")); + DirectoryReader reader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); + TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); + BytesReference termBytes = XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); + if (randomBoolean()) { + writer.flush(); + IOUtils.close(writer); + writer = new IndexWriter(dir, newIndexWriterConfig()); + } + writer.updateDocument(new Term("id", "0"), newDoc(0, "bar")); + DirectoryReader secondReader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); + + // Get 2 entries into the cache + IndicesService.IndexShardCacheEntity entity = new IndicesService.IndexShardCacheEntity(indexShard); + Loader loader = new Loader(reader, 0); + cache.getOrCompute(entity, loader, reader, termBytes); + + entity = new IndicesService.IndexShardCacheEntity(indexShard); + loader = new Loader(reader, 0); + cache.getOrCompute(entity, loader, reader, termBytes); + + IndicesService.IndexShardCacheEntity secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); + loader = new Loader(secondReader, 0); + cache.getOrCompute(entity, loader, secondReader, termBytes); + + secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); + loader = new Loader(secondReader, 0); + cache.getOrCompute(secondEntity, loader, secondReader, termBytes); + assertEquals(2, cache.count()); + + // Close the reader, to be enqueued for cleanup + // 1 out of 2 keys ie 50% are now stale. + reader.close(); + // cache count should not be affected + assertEquals(2, cache.count()); + + // clean cache with 49% staleness threshold + cache.cacheCleanupManager.cleanCache(); + // cleanup should have taken effect with 49% threshold + assertEquals(1, cache.count()); + + IOUtils.close(secondReader, writer, dir, cache); + terminate(threadPool); + } + + public void testCacheCleanupBasedOnStaleThreshold_StalenessLesserThanThreshold() throws Exception { + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexShard indexShard = createIndex("test").getShard(0); + ThreadPool threadPool = getThreadPool(); + Settings settings = Settings.builder().put(INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING.getKey(), "51%").build(); + IndicesRequestCache cache = new IndicesRequestCache(settings, (shardId -> { + IndexService indexService = null; + try { + indexService = indicesService.indexServiceSafe(shardId.getIndex()); + } catch (IndexNotFoundException ex) { + return Optional.empty(); + } + return Optional.of(new IndicesService.IndexShardCacheEntity(indexService.getShard(shardId.id()))); + }), new CacheModule(new ArrayList<>(), Settings.EMPTY).getCacheService(), threadPool); + Directory dir = newDirectory(); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); + + writer.addDocument(newDoc(0, "foo")); + DirectoryReader reader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); + TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); + BytesReference termBytes = XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); + if (randomBoolean()) { + writer.flush(); + IOUtils.close(writer); + writer = new IndexWriter(dir, newIndexWriterConfig()); + } + writer.updateDocument(new Term("id", "0"), newDoc(0, "bar")); + DirectoryReader secondReader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); + + // Get 2 entries into the cache + IndicesService.IndexShardCacheEntity entity = new IndicesService.IndexShardCacheEntity(indexShard); + Loader loader = new Loader(reader, 0); + cache.getOrCompute(entity, loader, reader, termBytes); + + entity = new IndicesService.IndexShardCacheEntity(indexShard); + loader = new Loader(reader, 0); + cache.getOrCompute(entity, loader, reader, termBytes); + + IndicesService.IndexShardCacheEntity secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); + loader = new Loader(secondReader, 0); + cache.getOrCompute(entity, loader, secondReader, termBytes); + + secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); + loader = new Loader(secondReader, 0); + cache.getOrCompute(secondEntity, loader, secondReader, termBytes); + assertEquals(2, cache.count()); + + // Close the reader, to be enqueued for cleanup + // 1 out of 2 keys ie 50% are now stale. + reader.close(); + // cache count should not be affected + assertEquals(2, cache.count()); + + // clean cache with 51% staleness threshold + cache.cacheCleanupManager.cleanCache(); + // cleanup should have been ignored + assertEquals(2, cache.count()); + + IOUtils.close(secondReader, writer, dir, cache); + terminate(threadPool); + } + public void testEviction() throws Exception { final ByteSizeValue size; { IndexShard indexShard = createIndex("test").getShard(0); + ThreadPool threadPool = getThreadPool(); IndicesRequestCache cache = new IndicesRequestCache( Settings.EMPTY, - (shardId -> Optional.of(new IndicesService.IndexShardCacheEntity(indexShard))) + (shardId -> Optional.of(new IndicesService.IndexShardCacheEntity(indexShard))), + new CacheModule(new ArrayList<>(), Settings.EMPTY).getCacheService(), + threadPool ); Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); @@ -270,11 +772,15 @@ public void testEviction() throws Exception { assertEquals("bar", value2.streamInput().readString()); size = indexShard.requestCache().stats().getMemorySize(); IOUtils.close(reader, secondReader, writer, dir, cache); + terminate(threadPool); } IndexShard indexShard = createIndex("test1").getShard(0); + ThreadPool threadPool = getThreadPool(); IndicesRequestCache cache = new IndicesRequestCache( Settings.builder().put(IndicesRequestCache.INDICES_CACHE_QUERY_SIZE.getKey(), size.getBytes() + 1 + "b").build(), - (shardId -> Optional.of(new IndicesService.IndexShardCacheEntity(indexShard))) + (shardId -> Optional.of(new IndicesService.IndexShardCacheEntity(indexShard))), + new CacheModule(new ArrayList<>(), Settings.EMPTY).getCacheService(), + threadPool ); Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); @@ -306,11 +812,13 @@ public void testEviction() throws Exception { assertEquals(2, cache.count()); assertEquals(1, indexShard.requestCache().stats().getEvictions()); IOUtils.close(reader, secondReader, thirdReader, writer, dir, cache); + terminate(threadPool); } public void testClearAllEntityIdentity() throws Exception { IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndexShard indexShard = createIndex("test").getShard(0); + ThreadPool threadPool = getThreadPool(); IndicesRequestCache cache = new IndicesRequestCache(Settings.EMPTY, (shardId -> { IndexService indexService = null; try { @@ -319,7 +827,7 @@ public void testClearAllEntityIdentity() throws Exception { return Optional.empty(); } return Optional.of(new IndicesService.IndexShardCacheEntity(indexService.getShard(shardId.id()))); - })); + }), new CacheModule(new ArrayList<>(), Settings.EMPTY).getCacheService(), threadPool); Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); @@ -354,7 +862,7 @@ public void testClearAllEntityIdentity() throws Exception { final long hitCount = requestCacheStats.getHitCount(); // clear all for the indexShard Idendity even though is't still open cache.clear(randomFrom(entity, secondEntity)); - cache.cleanCache(); + cache.cacheCleanupManager.cleanCache(); assertEquals(1, cache.count()); // third has not been validated since it's a different identity value3 = cache.getOrCompute(thirddEntity, thirdLoader, thirdReader, termBytes); @@ -364,7 +872,7 @@ public void testClearAllEntityIdentity() throws Exception { assertEquals("baz", value3.streamInput().readString()); IOUtils.close(reader, secondReader, thirdReader, writer, dir, cache); - + terminate(threadPool); } public Iterable<Field> newDoc(int id, String value) { @@ -406,6 +914,7 @@ public BytesReference get() { public void testInvalidate() throws Exception { IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndexShard indexShard = createIndex("test").getShard(0); + ThreadPool threadPool = getThreadPool(); IndicesRequestCache cache = new IndicesRequestCache(Settings.EMPTY, (shardId -> { IndexService indexService = null; try { @@ -414,7 +923,7 @@ public void testInvalidate() throws Exception { return Optional.empty(); } return Optional.of(new IndicesService.IndexShardCacheEntity(indexService.getShard(shardId.id()))); - })); + }), new CacheModule(new ArrayList<>(), Settings.EMPTY).getCacheService(), threadPool); Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); @@ -471,7 +980,7 @@ public void testInvalidate() throws Exception { indexShard.close("test", true, true); // closed shard but reader is still open cache.clear(entity); } - cache.cleanCache(); + cache.cacheCleanupManager.cleanCache(); assertEquals(1, requestCacheStats.stats().getHitCount()); assertEquals(2, requestCacheStats.stats().getMissCount()); assertEquals(0, requestCacheStats.stats().getEvictions()); @@ -479,6 +988,7 @@ public void testInvalidate() throws Exception { assertEquals(0, requestCacheStats.stats().getMemorySize().bytesAsInt()); IOUtils.close(reader, writer, dir, cache); + terminate(threadPool); assertEquals(0, cache.numRegisteredCloseListeners()); } diff --git a/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java b/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java index e4dd32e5c6f70..2531790ede4af 100644 --- a/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java @@ -15,7 +15,6 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.CancellableThreads; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.action.ActionListener; import org.opensearch.index.shard.IndexShard; @@ -27,12 +26,9 @@ import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.transport.TransportService; -import org.junit.Assert; import java.util.Arrays; import java.util.Collections; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; import static org.mockito.Mockito.mock; @@ -165,40 +161,6 @@ public void testTransportTimeoutForGetSegmentFilesAction() { assertEquals(recoverySettings.internalActionLongTimeout(), capturedRequest.options.timeout()); } - public void testGetSegmentFiles_CancelWhileRequestOpen() throws InterruptedException { - CountDownLatch latch = new CountDownLatch(1); - final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint( - indexShard.shardId(), - PRIMARY_TERM, - SEGMENTS_GEN, - VERSION, - Codec.getDefault().getName() - ); - StoreFileMetadata testMetadata = new StoreFileMetadata("testFile", 1L, "checksum", Version.LATEST); - replicationSource.getSegmentFiles( - REPLICATION_ID, - checkpoint, - Arrays.asList(testMetadata), - mock(IndexShard.class), - (fileName, bytesRecovered) -> {}, - new ActionListener<>() { - @Override - public void onResponse(GetSegmentFilesResponse getSegmentFilesResponse) { - Assert.fail("onFailure response expected."); - } - - @Override - public void onFailure(Exception e) { - assertEquals(e.getClass(), CancellableThreads.ExecutionCancelledException.class); - latch.countDown(); - } - } - ); - replicationSource.cancel(); - latch.await(2, TimeUnit.SECONDS); - assertEquals("listener should have resolved in a failure", 0, latch.getCount()); - } - private DiscoveryNode newDiscoveryNode(String nodeName) { return new DiscoveryNode( nodeName, diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index f284a425a417b..3c72dda2d8b5d 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -620,6 +620,7 @@ public void testForceSegmentSyncHandlerWithFailure_AlreadyClosedException_swallo } public void testTargetCancelledBeforeStartInvoked() { + final String cancelReason = "test"; final SegmentReplicationTarget target = new SegmentReplicationTarget( replicaShard, primaryShard.getLatestReplicationCheckpoint(), @@ -633,12 +634,12 @@ public void onReplicationDone(SegmentReplicationState state) { @Override public void onReplicationFailure(SegmentReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { // failures leave state object in last entered stage. - assertEquals(SegmentReplicationState.Stage.GET_CHECKPOINT_INFO, state.getStage()); - assertTrue(e.getCause() instanceof CancellableThreads.ExecutionCancelledException); + assertEquals(SegmentReplicationState.Stage.INIT, state.getStage()); + assertEquals(cancelReason, e.getMessage()); } } ); - target.cancel("test"); + target.cancel(cancelReason); sut.startReplication(target); } diff --git a/server/src/test/java/org/opensearch/monitor/fs/FsHealthServiceTests.java b/server/src/test/java/org/opensearch/monitor/fs/FsHealthServiceTests.java index a42c302b516d2..48b2941fe3b7e 100644 --- a/server/src/test/java/org/opensearch/monitor/fs/FsHealthServiceTests.java +++ b/server/src/test/java/org/opensearch/monitor/fs/FsHealthServiceTests.java @@ -185,7 +185,7 @@ public void testLoggingOnHungIO() throws Exception { } public void testFailsHealthOnHungIOBeyondHealthyTimeout() throws Exception { - long healthyTimeoutThreshold = randomLongBetween(500, 1000); + long healthyTimeoutThreshold = randomLongBetween(1500, 2000); long refreshInterval = randomLongBetween(500, 1000); long slowLogThreshold = randomLongBetween(100, 200); long delayBetweenChecks = 100; diff --git a/server/src/test/java/org/opensearch/node/IoUsageStatsTests.java b/server/src/test/java/org/opensearch/node/IoUsageStatsTests.java new file mode 100644 index 0000000000000..4a4de44e3acea --- /dev/null +++ b/server/src/test/java/org/opensearch/node/IoUsageStatsTests.java @@ -0,0 +1,57 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node; + +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.Locale; + +public class IoUsageStatsTests extends OpenSearchTestCase { + IoUsageStats ioUsageStats; + + @Override + public void setUp() throws Exception { + super.setUp(); + ioUsageStats = new IoUsageStats(10); + } + + public void testDefaultsIoUsageStats() { + assertEquals(ioUsageStats.getIoUtilisationPercent(), 10.0, 0); + } + + public void testUpdateIoUsageStats() { + assertEquals(ioUsageStats.getIoUtilisationPercent(), 10.0, 0); + ioUsageStats.setIoUtilisationPercent(20); + assertEquals(ioUsageStats.getIoUtilisationPercent(), 20.0, 0); + } + + public void testIoUsageStats() throws IOException { + XContentBuilder builder = JsonXContent.contentBuilder(); + builder = ioUsageStats.toXContent(builder, ToXContent.EMPTY_PARAMS); + String response = builder.toString(); + assertEquals(response, "{\"max_io_utilization_percent\":\"10.0\"}"); + ioUsageStats.setIoUtilisationPercent(20); + builder = JsonXContent.contentBuilder(); + builder = ioUsageStats.toXContent(builder, ToXContent.EMPTY_PARAMS); + response = builder.toString(); + assertEquals(response, "{\"max_io_utilization_percent\":\"20.0\"}"); + } + + public void testIoUsageStatsToString() { + String expected = "IO utilization percent: " + String.format(Locale.ROOT, "%.1f", 10.0); + assertEquals(expected, ioUsageStats.toString()); + ioUsageStats.setIoUtilisationPercent(20); + expected = "IO utilization percent: " + String.format(Locale.ROOT, "%.1f", 20.0); + assertEquals(expected, ioUsageStats.toString()); + } +} diff --git a/server/src/test/java/org/opensearch/node/ResourceUsageCollectorServiceTests.java b/server/src/test/java/org/opensearch/node/ResourceUsageCollectorServiceTests.java index b2fa884afab69..6dd90784ab65f 100644 --- a/server/src/test/java/org/opensearch/node/ResourceUsageCollectorServiceTests.java +++ b/server/src/test/java/org/opensearch/node/ResourceUsageCollectorServiceTests.java @@ -14,24 +14,21 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.transport.TransportAddress; -import org.opensearch.node.resource.tracker.NodeResourceUsageTracker; import org.opensearch.node.resource.tracker.ResourceTrackerSettings; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; import org.opensearch.test.OpenSearchSingleNodeTestCase; -import org.opensearch.threadpool.TestThreadPool; -import org.opensearch.threadpool.ThreadPool; import org.junit.After; -import org.junit.Before; import java.util.Map; import java.util.Optional; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import static org.opensearch.test.ClusterServiceUtils.createClusterService; +import static org.opensearch.ratelimitting.admissioncontrol.AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.greaterThan; /** @@ -39,58 +36,50 @@ * are working as expected */ public class ResourceUsageCollectorServiceTests extends OpenSearchSingleNodeTestCase { + @Override + protected boolean resetNodeAfterTest() { + return true; + } - private ClusterService clusterService; - private ResourceUsageCollectorService collector; - private ThreadPool threadpool; - NodeResourceUsageTracker tracker; - - @Before - public void setUp() throws Exception { - super.setUp(); - - threadpool = new TestThreadPool("resource_usage_collector_tests"); - - clusterService = createClusterService(threadpool); - - Settings settings = Settings.builder() - .put(ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), new TimeValue(500, TimeUnit.MILLISECONDS)) + @Override + protected Settings nodeSettings() { + return Settings.builder() + .put(super.nodeSettings()) + .put(ResourceTrackerSettings.GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), TimeValue.timeValueMillis(500)) + .put(ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), TimeValue.timeValueMillis(500)) + .put(ResourceTrackerSettings.GLOBAL_IO_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), TimeValue.timeValueMillis(5000)) + .put(ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.ENFORCED) .build(); - tracker = new NodeResourceUsageTracker( - threadpool, - settings, - new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) - ); - collector = new ResourceUsageCollectorService(tracker, clusterService, threadpool); - tracker.start(); - collector.start(); } @After - public void tearDown() throws Exception { - super.tearDown(); - threadpool.shutdownNow(); - clusterService.close(); - collector.stop(); - tracker.stop(); - collector.close(); - tracker.close(); + public void cleanup() { + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().putNull("*")) + .setTransientSettings(Settings.builder().putNull("*")) + ); } public void testResourceUsageStats() { - collector.collectNodeResourceUsageStats("node1", System.currentTimeMillis(), 97, 99); - Map<String, NodeResourceUsageStats> nodeStats = collector.getAllNodeStatistics(); + ResourceUsageCollectorService resourceUsageCollectorService = getInstanceFromNode(ResourceUsageCollectorService.class); + resourceUsageCollectorService.collectNodeResourceUsageStats("node1", System.currentTimeMillis(), 97, 99, new IoUsageStats(98)); + Map<String, NodeResourceUsageStats> nodeStats = resourceUsageCollectorService.getAllNodeStatistics(); assertTrue(nodeStats.containsKey("node1")); assertEquals(99.0, nodeStats.get("node1").cpuUtilizationPercent, 0.0); assertEquals(97.0, nodeStats.get("node1").memoryUtilizationPercent, 0.0); + assertEquals(98, nodeStats.get("node1").getIoUsageStats().getIoUtilisationPercent(), 0.0); - Optional<NodeResourceUsageStats> nodeResourceUsageStatsOptional = collector.getNodeStatistics("node1"); + Optional<NodeResourceUsageStats> nodeResourceUsageStatsOptional = resourceUsageCollectorService.getNodeStatistics("node1"); assertNotNull(nodeResourceUsageStatsOptional.get()); assertEquals(99.0, nodeResourceUsageStatsOptional.get().cpuUtilizationPercent, 0.0); assertEquals(97.0, nodeResourceUsageStatsOptional.get().memoryUtilizationPercent, 0.0); + assertEquals(98, nodeResourceUsageStatsOptional.get().getIoUsageStats().getIoUtilisationPercent(), 0.0); - nodeResourceUsageStatsOptional = collector.getNodeStatistics("node2"); + nodeResourceUsageStatsOptional = resourceUsageCollectorService.getNodeStatistics("node2"); assertTrue(nodeResourceUsageStatsOptional.isEmpty()); } @@ -98,26 +87,29 @@ public void testScheduler() throws Exception { /** * Wait for cluster state to be ready so that localNode().getId() is ready and we add the values to the map */ - assertBusy(() -> assertTrue(collector.getNodeStatistics(clusterService.localNode().getId()).isPresent()), 1, TimeUnit.MINUTES); - assertTrue(collector.getNodeStatistics(clusterService.localNode().getId()).isPresent()); + ResourceUsageCollectorService resourceUsageCollectorService = getInstanceFromNode(ResourceUsageCollectorService.class); + ClusterService clusterService = getInstanceFromNode(ClusterService.class); + assertBusy(() -> assertEquals(1, resourceUsageCollectorService.getAllNodeStatistics().size())); + /** * Wait for memory utilization to be reported greater than 0 */ assertBusy( () -> assertThat( - collector.getNodeStatistics(clusterService.localNode().getId()).get().getMemoryUtilizationPercent(), + resourceUsageCollectorService.getNodeStatistics(clusterService.localNode().getId()).get().getMemoryUtilizationPercent(), greaterThan(0.0) ), 5, TimeUnit.SECONDS ); - assertTrue(collector.getNodeStatistics("Invalid").isEmpty()); + assertTrue(resourceUsageCollectorService.getNodeStatistics("Invalid").isEmpty()); } /* * Test that concurrently adding values and removing nodes does not cause exceptions */ public void testConcurrentAddingAndRemovingNodes() throws Exception { + ResourceUsageCollectorService resourceUsageCollectorService = getInstanceFromNode(ResourceUsageCollectorService.class); String[] nodes = new String[] { "a", "b", "c", "d" }; final CountDownLatch latch = new CountDownLatch(5); @@ -131,13 +123,14 @@ public void testConcurrentAddingAndRemovingNodes() throws Exception { } for (int i = 0; i < randomIntBetween(100, 200); i++) { if (randomBoolean()) { - collector.removeNodeResourceUsageStats(randomFrom(nodes)); + resourceUsageCollectorService.removeNodeResourceUsageStats(randomFrom(nodes)); } - collector.collectNodeResourceUsageStats( + resourceUsageCollectorService.collectNodeResourceUsageStats( randomFrom(nodes), System.currentTimeMillis(), randomIntBetween(1, 100), - randomIntBetween(1, 100) + randomIntBetween(1, 100), + new IoUsageStats(randomIntBetween(1, 100)) ); } }; @@ -157,18 +150,32 @@ public void testConcurrentAddingAndRemovingNodes() throws Exception { t3.join(); t4.join(); - final Map<String, NodeResourceUsageStats> nodeStats = collector.getAllNodeStatistics(); + final Map<String, NodeResourceUsageStats> nodeStats = resourceUsageCollectorService.getAllNodeStatistics(); for (String nodeId : nodes) { if (nodeStats.containsKey(nodeId)) { assertThat(nodeStats.get(nodeId).memoryUtilizationPercent, greaterThan(0.0)); assertThat(nodeStats.get(nodeId).cpuUtilizationPercent, greaterThan(0.0)); + assertThat(nodeStats.get(nodeId).getIoUsageStats().getIoUtilisationPercent(), greaterThan(0.0)); } } } public void testNodeRemoval() { - collector.collectNodeResourceUsageStats("node1", System.currentTimeMillis(), randomIntBetween(1, 100), randomIntBetween(1, 100)); - collector.collectNodeResourceUsageStats("node2", System.currentTimeMillis(), randomIntBetween(1, 100), randomIntBetween(1, 100)); + ResourceUsageCollectorService resourceUsageCollectorService = getInstanceFromNode(ResourceUsageCollectorService.class); + resourceUsageCollectorService.collectNodeResourceUsageStats( + "node1", + System.currentTimeMillis(), + randomIntBetween(1, 100), + randomIntBetween(1, 100), + new IoUsageStats(randomIntBetween(1, 100)) + ); + resourceUsageCollectorService.collectNodeResourceUsageStats( + "node2", + System.currentTimeMillis(), + randomIntBetween(1, 100), + randomIntBetween(1, 100), + new IoUsageStats(randomIntBetween(1, 100)) + ); ClusterState previousState = ClusterState.builder(new ClusterName("cluster")) .nodes( @@ -182,8 +189,8 @@ public void testNodeRemoval() { .build(); ClusterChangedEvent event = new ClusterChangedEvent("test", newState, previousState); - collector.clusterChanged(event); - final Map<String, NodeResourceUsageStats> nodeStats = collector.getAllNodeStatistics(); + resourceUsageCollectorService.clusterChanged(event); + final Map<String, NodeResourceUsageStats> nodeStats = resourceUsageCollectorService.getAllNodeStatistics(); assertTrue(nodeStats.containsKey("node1")); assertFalse(nodeStats.containsKey("node2")); } diff --git a/server/src/test/java/org/opensearch/node/resource/tracker/AverageUsageTrackerTests.java b/server/src/test/java/org/opensearch/node/resource/tracker/AverageUsageTrackerTests.java index 374c993a264d4..49a58991e8e5c 100644 --- a/server/src/test/java/org/opensearch/node/resource/tracker/AverageUsageTrackerTests.java +++ b/server/src/test/java/org/opensearch/node/resource/tracker/AverageUsageTrackerTests.java @@ -8,15 +8,22 @@ package org.opensearch.node.resource.tracker; +import org.opensearch.common.ValidationException; import org.opensearch.common.unit.TimeValue; +import org.opensearch.monitor.fs.FsInfo; +import org.opensearch.monitor.fs.FsService; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.junit.After; import org.junit.Before; +import java.util.Optional; import java.util.concurrent.TimeUnit; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + /** * Tests to validate AverageMemoryUsageTracker and AverageCpuUsageTracker implementation */ @@ -24,10 +31,12 @@ public class AverageUsageTrackerTests extends OpenSearchTestCase { ThreadPool threadPool; AverageMemoryUsageTracker averageMemoryUsageTracker; AverageCpuUsageTracker averageCpuUsageTracker; + AverageIoUsageTracker averageIoUsageTracker; @Before public void setup() { threadPool = new TestThreadPool(getClass().getName()); + FsService fsService = mock(FsService.class); averageMemoryUsageTracker = new AverageMemoryUsageTracker( threadPool, new TimeValue(500, TimeUnit.MILLISECONDS), @@ -38,6 +47,12 @@ public void setup() { new TimeValue(500, TimeUnit.MILLISECONDS), new TimeValue(1000, TimeUnit.MILLISECONDS) ); + averageIoUsageTracker = new AverageIoUsageTracker( + fsService, + threadPool, + new TimeValue(500, TimeUnit.MILLISECONDS), + new TimeValue(1000, TimeUnit.MILLISECONDS) + ); } @After @@ -46,14 +61,15 @@ public void cleanup() { } public void testBasicUsage() { - assertAverageUsageStats(averageMemoryUsageTracker); assertAverageUsageStats(averageCpuUsageTracker); + assertAverageUsageStats(averageIoUsageTracker); } public void testUpdateWindowSize() { assertUpdateWindowSize(averageMemoryUsageTracker); assertUpdateWindowSize(averageCpuUsageTracker); + assertUpdateWindowSize(averageIoUsageTracker); } private void assertAverageUsageStats(AbstractAverageUsageTracker usageTracker) { @@ -96,4 +112,24 @@ private void assertUpdateWindowSize(AbstractAverageUsageTracker usageTracker) { // ( 2 + 1 + 2 + 2 ) / 4 = 1.75 assertEquals(1.75, usageTracker.getAverage(), 0.0); } + + public void testPreValidationForIOTracker() { + Optional<ValidationException> validationException = averageIoUsageTracker.preValidateFsStats(); + assertTrue(validationException.isPresent()); + FsService fsService = mock(FsService.class); + FsInfo fsInfo = mock(FsInfo.class); + FsInfo.IoStats ioStats = mock(FsInfo.IoStats.class); + when(fsService.stats()).thenReturn(fsInfo); + when(fsInfo.getIoStats()).thenReturn(ioStats); + FsInfo.DeviceStats[] deviceStats = new FsInfo.DeviceStats[0]; + when(fsService.stats().getIoStats().getDevicesStats()).thenReturn(deviceStats); + averageIoUsageTracker = new AverageIoUsageTracker( + fsService, + threadPool, + new TimeValue(500, TimeUnit.MILLISECONDS), + new TimeValue(1000, TimeUnit.MILLISECONDS) + ); + validationException = averageIoUsageTracker.preValidateFsStats(); + assertFalse(validationException.isPresent()); + } } diff --git a/server/src/test/java/org/opensearch/node/resource/tracker/NodeResourceUsageTrackerTests.java b/server/src/test/java/org/opensearch/node/resource/tracker/NodeResourceUsageTrackerTests.java index 1ce68b9f29062..191b09331f111 100644 --- a/server/src/test/java/org/opensearch/node/resource/tracker/NodeResourceUsageTrackerTests.java +++ b/server/src/test/java/org/opensearch/node/resource/tracker/NodeResourceUsageTrackerTests.java @@ -12,6 +12,7 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.monitor.fs.FsService; import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -22,6 +23,7 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.greaterThan; +import static org.mockito.Mockito.mock; /** * Tests to assert resource usage trackers retrieving resource utilization averages @@ -51,6 +53,7 @@ public void testStats() throws Exception { .put(ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), new TimeValue(500, TimeUnit.MILLISECONDS)) .build(); NodeResourceUsageTracker tracker = new NodeResourceUsageTracker( + mock(FsService.class), threadPool, settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) @@ -67,6 +70,7 @@ public void testStats() throws Exception { public void testUpdateSettings() { NodeResourceUsageTracker tracker = new NodeResourceUsageTracker( + mock(FsService.class), threadPool, Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) @@ -74,6 +78,7 @@ public void testUpdateSettings() { assertEquals(tracker.getResourceTrackerSettings().getCpuWindowDuration().getSeconds(), 30); assertEquals(tracker.getResourceTrackerSettings().getMemoryWindowDuration().getSeconds(), 30); + assertEquals(tracker.getResourceTrackerSettings().getIoWindowDuration().getSeconds(), 120); Settings settings = Settings.builder() .put(ResourceTrackerSettings.GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), "10s") @@ -92,5 +97,13 @@ public void testUpdateSettings() { "5s", response.getPersistentSettings().get(ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING.getKey()) ); + Settings ioSettings = Settings.builder() + .put(ResourceTrackerSettings.GLOBAL_IO_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), "20s") + .build(); + response = client().admin().cluster().prepareUpdateSettings().setPersistentSettings(ioSettings).get(); + assertEquals( + "20s", + response.getPersistentSettings().get(ResourceTrackerSettings.GLOBAL_IO_USAGE_AC_WINDOW_DURATION_SETTING.getKey()) + ); } } diff --git a/server/src/test/java/org/opensearch/plugins/PluginInfoTests.java b/server/src/test/java/org/opensearch/plugins/PluginInfoTests.java index b976704e8af57..12c7dc870c104 100644 --- a/server/src/test/java/org/opensearch/plugins/PluginInfoTests.java +++ b/server/src/test/java/org/opensearch/plugins/PluginInfoTests.java @@ -32,10 +32,16 @@ package org.opensearch.plugins; +import com.fasterxml.jackson.core.JsonParseException; + import org.opensearch.Version; import org.opensearch.action.admin.cluster.node.info.PluginsAndModules; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.io.stream.ByteBufferStreamInput; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.semver.SemverRange; import org.opensearch.test.OpenSearchTestCase; import java.nio.ByteBuffer; @@ -74,6 +80,33 @@ public void testReadFromProperties() throws Exception { assertEquals("fake desc", info.getDescription()); assertEquals("1.0", info.getVersion()); assertEquals("FakePlugin", info.getClassname()); + assertEquals(Version.CURRENT.toString(), info.getOpenSearchVersionRanges().get(0).toString()); + assertThat(info.getExtendedPlugins(), empty()); + } + + public void testReadFromPropertiesWithSingleOpenSearchRange() throws Exception { + Path pluginDir = createTempDir().resolve("fake-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "fake desc", + "name", + "my_plugin", + "version", + "1.0", + "dependencies", + "{opensearch:\"~" + Version.CURRENT.toString() + "\"}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin" + ); + PluginInfo info = PluginInfo.readFromProperties(pluginDir); + assertEquals("my_plugin", info.getName()); + assertEquals("fake desc", info.getDescription()); + assertEquals("1.0", info.getVersion()); + assertEquals("FakePlugin", info.getClassname()); + assertEquals("~" + Version.CURRENT.toString(), info.getOpenSearchVersionRanges().get(0).toString()); assertThat(info.getExtendedPlugins(), empty()); } @@ -102,6 +135,7 @@ public void testReadFromPropertiesWithFolderNameAndVersionAfter() throws Excepti assertEquals("1.0", info.getVersion()); assertEquals("FakePlugin", info.getClassname()); assertEquals("custom-folder", info.getTargetFolderName()); + assertEquals(Version.CURRENT.toString(), info.getOpenSearchVersionRanges().get(0).toString()); assertThat(info.getExtendedPlugins(), empty()); } @@ -130,11 +164,40 @@ public void testReadFromPropertiesVersionMissing() throws Exception { assertThat(e.getMessage(), containsString("[version] is missing")); } - public void testReadFromPropertiesOpenSearchVersionMissing() throws Exception { + public void testReadFromPropertiesOpenSearchVersionAndDependenciesMissing() throws Exception { Path pluginDir = createTempDir().resolve("fake-plugin"); PluginTestUtil.writePluginProperties(pluginDir, "description", "fake desc", "name", "my_plugin", "version", "1.0"); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginInfo.readFromProperties(pluginDir)); - assertThat(e.getMessage(), containsString("[opensearch.version] is missing")); + assertThat( + e.getMessage(), + containsString("Either [opensearch.version] or [dependencies] property must be specified for the plugin ") + ); + } + + public void testReadFromPropertiesWithDependenciesAndOpenSearchVersion() throws Exception { + Path pluginDir = createTempDir().resolve("fake-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "fake desc", + "name", + "my_plugin", + "version", + "1.0", + "opensearch.version", + Version.CURRENT.toString(), + "dependencies", + "{opensearch:" + Version.CURRENT.toString() + "}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin" + ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginInfo.readFromProperties(pluginDir)); + assertThat( + e.getMessage(), + containsString("Only one of [opensearch.version] or [dependencies] property can be specified for the plugin") + ); } public void testReadFromPropertiesJavaVersionMissing() throws Exception { @@ -305,7 +368,31 @@ public void testSerialize() throws Exception { ByteBufferStreamInput input = new ByteBufferStreamInput(buffer); PluginInfo info2 = new PluginInfo(input); assertThat(info2.toString(), equalTo(info.toString())); + } + public void testToXContent() throws Exception { + PluginInfo info = new PluginInfo( + "fake", + "foo", + "dummy", + Version.CURRENT, + "1.8", + "dummyClass", + "folder", + Collections.emptyList(), + false + ); + XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); + String prettyPrint = info.toXContent(builder, ToXContent.EMPTY_PARAMS).prettyPrint().toString(); + assertTrue(prettyPrint.contains("\"name\" : \"fake\"")); + assertTrue(prettyPrint.contains("\"version\" : \"dummy\"")); + assertTrue(prettyPrint.contains("\"opensearch_version\" : \"" + Version.CURRENT)); + assertTrue(prettyPrint.contains("\"java_version\" : \"1.8\"")); + assertTrue(prettyPrint.contains("\"description\" : \"foo\"")); + assertTrue(prettyPrint.contains("\"classname\" : \"dummyClass\"")); + assertTrue(prettyPrint.contains("\"custom_foldername\" : \"folder\"")); + assertTrue(prettyPrint.contains("\"extended_plugins\" : [ ]")); + assertTrue(prettyPrint.contains("\"has_native_controller\" : false")); } public void testPluginListSorted() { @@ -347,4 +434,193 @@ public void testUnknownProperties() throws Exception { assertThat(e.getMessage(), containsString("Unknown properties in plugin descriptor")); } + public void testMultipleDependencies() throws Exception { + Path pluginDir = createTempDir().resolve("fake-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "fake desc", + "name", + "my_plugin", + "version", + "1.0", + "dependencies", + "{opensearch:\"~" + Version.CURRENT.toString() + "\", dependency2:\"1.0.0\"}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin" + ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginInfo.readFromProperties(pluginDir)); + assertThat(e.getMessage(), containsString("Exactly one dependency is allowed to be specified in plugin descriptor properties")); + } + + public void testNonOpenSearchDependency() throws Exception { + Path pluginDir = createTempDir().resolve("fake-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "fake desc", + "name", + "my_plugin", + "version", + "1.0", + "dependencies", + "{some_dependency:\"~" + Version.CURRENT.toString() + "\"}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin" + ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginInfo.readFromProperties(pluginDir)); + assertThat(e.getMessage(), containsString("Only opensearch is allowed to be specified as a plugin dependency")); + } + + public void testEmptyDependenciesProperty() throws Exception { + Path pluginDir = createTempDir().resolve("fake-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "fake desc", + "name", + "my_plugin", + "version", + "1.0", + "dependencies", + "{}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin" + ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginInfo.readFromProperties(pluginDir)); + assertThat(e.getMessage(), containsString("Exactly one dependency is allowed to be specified in plugin descriptor properties")); + } + + public void testInvalidDependenciesProperty() throws Exception { + Path pluginDir = createTempDir().resolve("fake-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "fake desc", + "name", + "my_plugin", + "version", + "1.0", + "dependencies", + "{invalid}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin" + ); + expectThrows(JsonParseException.class, () -> PluginInfo.readFromProperties(pluginDir)); + } + + public void testEmptyOpenSearchVersionInDependencies() throws Exception { + Path pluginDir = createTempDir().resolve("fake-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "fake desc", + "name", + "my_plugin", + "version", + "1.0", + "dependencies", + "{opensearch:\"\"}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin" + ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginInfo.readFromProperties(pluginDir)); + assertThat(e.getMessage(), containsString("Version cannot be empty")); + } + + public void testInvalidOpenSearchVersionInDependencies() throws Exception { + Path pluginDir = createTempDir().resolve("fake-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "fake desc", + "name", + "my_plugin", + "version", + "1.0", + "dependencies", + "{opensearch:\"1.2\"}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin" + ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginInfo.readFromProperties(pluginDir)); + assertThat( + e.getMessage(), + containsString("the version needs to contain major, minor, and revision, and optionally the build: 1.2") + ); + } + + public void testInvalidRangeInDependencies() throws Exception { + Path pluginDir = createTempDir().resolve("fake-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "fake desc", + "name", + "my_plugin", + "version", + "1.0", + "dependencies", + "{opensearch:\"<2.2.0\"}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin" + ); + expectThrows(NumberFormatException.class, () -> PluginInfo.readFromProperties(pluginDir)); + } + + public void testhMultipleOpenSearchRangesInDependencies() throws Exception { + Path pluginDir = createTempDir().resolve("fake-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "fake desc", + "name", + "my_plugin", + "version", + "1.0", + "dependencies", + "{opensearch:\"~1.2.3, =1.2.3\"}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin" + ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginInfo.readFromProperties(pluginDir)); + assertThat(e.getMessage(), containsString("Exactly one range is allowed to be specified in dependencies for the plugin")); + } + + public void testhMultipleOpenSearchRangesInConstructor() throws Exception { + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> new PluginInfo( + "plugin_name", + "foo", + "dummy", + List.of( + new SemverRange(Version.CURRENT, SemverRange.RangeOperator.EQ), + new SemverRange(Version.CURRENT, SemverRange.RangeOperator.DEFAULT) + ), + "1.8", + "dummyclass", + null, + Collections.emptyList(), + randomBoolean() + ) + ); + assertThat(e.getMessage(), containsString("Exactly one range is allowed to be specified in dependencies for the plugin")); + } } diff --git a/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java b/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java index db276678ba4dd..bd9ee33856f14 100644 --- a/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java +++ b/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java @@ -45,8 +45,10 @@ import org.opensearch.env.Environment; import org.opensearch.env.TestEnvironment; import org.opensearch.index.IndexModule; +import org.opensearch.semver.SemverRange; import org.opensearch.test.MockLogAppender; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.VersionUtils; import org.hamcrest.Matchers; import java.io.IOException; @@ -717,6 +719,45 @@ public void testIncompatibleOpenSearchVersion() throws Exception { assertThat(e.getMessage(), containsString("was built for OpenSearch version 6.0.0")); } + public void testCompatibleOpenSearchVersionRange() { + List<SemverRange> pluginCompatibilityRange = List.of(new SemverRange(Version.CURRENT, SemverRange.RangeOperator.TILDE)); + PluginInfo info = new PluginInfo( + "my_plugin", + "desc", + "1.0", + pluginCompatibilityRange, + "1.8", + "FakePlugin", + null, + Collections.emptyList(), + false + ); + PluginsService.verifyCompatibility(info); + } + + public void testIncompatibleOpenSearchVersionRange() { + // Version.CURRENT is behind by one with respect to patch version in the range + List<SemverRange> pluginCompatibilityRange = List.of( + new SemverRange( + VersionUtils.getVersion(Version.CURRENT.major, Version.CURRENT.minor, (byte) (Version.CURRENT.revision + 1)), + SemverRange.RangeOperator.TILDE + ) + ); + PluginInfo info = new PluginInfo( + "my_plugin", + "desc", + "1.0", + pluginCompatibilityRange, + "1.8", + "FakePlugin", + null, + Collections.emptyList(), + false + ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginsService.verifyCompatibility(info)); + assertThat(e.getMessage(), containsString("was built for OpenSearch version ")); + } + public void testIncompatibleJavaVersion() throws Exception { PluginInfo info = new PluginInfo( "my_plugin", @@ -891,7 +932,10 @@ public void testExtensiblePlugin() { TestExtensiblePlugin extensiblePlugin = new TestExtensiblePlugin(); PluginsService.loadExtensions( Collections.singletonList( - Tuple.tuple(new PluginInfo("extensible", null, null, null, null, null, Collections.emptyList(), false), extensiblePlugin) + Tuple.tuple( + new PluginInfo("extensible", null, null, Version.CURRENT, null, null, Collections.emptyList(), false), + extensiblePlugin + ) ) ); @@ -902,9 +946,12 @@ public void testExtensiblePlugin() { TestPlugin testPlugin = new TestPlugin(); PluginsService.loadExtensions( Arrays.asList( - Tuple.tuple(new PluginInfo("extensible", null, null, null, null, null, Collections.emptyList(), false), extensiblePlugin), Tuple.tuple( - new PluginInfo("test", null, null, null, null, null, Collections.singletonList("extensible"), false), + new PluginInfo("extensible", null, null, Version.CURRENT, null, null, Collections.emptyList(), false), + extensiblePlugin + ), + Tuple.tuple( + new PluginInfo("test", null, null, Version.CURRENT, null, null, Collections.singletonList("extensible"), false), testPlugin ) ) @@ -1036,6 +1083,40 @@ public void testThrowingConstructor() { assertThat(e.getCause().getCause(), hasToString(containsString("test constructor failure"))); } + public void testPluginCompatibilityWithSemverRange() { + // Compatible plugin and core versions + assertTrue(PluginsService.isPluginVersionCompatible(getPluginInfoWithWithSemverRange("1.0.0"), Version.fromString("1.0.0"))); + + assertTrue(PluginsService.isPluginVersionCompatible(getPluginInfoWithWithSemverRange("=1.0.0"), Version.fromString("1.0.0"))); + + assertTrue(PluginsService.isPluginVersionCompatible(getPluginInfoWithWithSemverRange("~1.0.0"), Version.fromString("1.0.0"))); + + assertTrue(PluginsService.isPluginVersionCompatible(getPluginInfoWithWithSemverRange("~1.0.1"), Version.fromString("1.0.2"))); + + // Incompatible plugin and core versions + assertFalse(PluginsService.isPluginVersionCompatible(getPluginInfoWithWithSemverRange("1.0.0"), Version.fromString("1.0.1"))); + + assertFalse(PluginsService.isPluginVersionCompatible(getPluginInfoWithWithSemverRange("=1.0.0"), Version.fromString("1.0.1"))); + + assertFalse(PluginsService.isPluginVersionCompatible(getPluginInfoWithWithSemverRange("~1.0.1"), Version.fromString("1.0.0"))); + + assertFalse(PluginsService.isPluginVersionCompatible(getPluginInfoWithWithSemverRange("~1.0.0"), Version.fromString("1.1.0"))); + } + + private PluginInfo getPluginInfoWithWithSemverRange(String semverRange) { + return new PluginInfo( + "my_plugin", + "desc", + "1.0", + List.of(SemverRange.fromString(semverRange)), + "1.8", + "FakePlugin", + null, + Collections.emptyList(), + false + ); + } + private static class TestExtensiblePlugin extends Plugin implements ExtensiblePlugin { private List<TestExtensionPoint> extensions; diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlServiceTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlServiceTests.java index 7a67ffc8c7c5d..4f615290f1805 100644 --- a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlServiceTests.java +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlServiceTests.java @@ -8,6 +8,7 @@ package org.opensearch.ratelimitting.admissioncontrol; +import org.apache.lucene.util.Constants; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; @@ -48,13 +49,21 @@ public void tearDown() throws Exception { public void testWhenAdmissionControllerRegistered() { admissionControlService = new AdmissionControlService(Settings.EMPTY, clusterService, threadPool, null); - assertEquals(admissionControlService.getAdmissionControllers().size(), 1); + if (Constants.LINUX) { + assertEquals(admissionControlService.getAdmissionControllers().size(), 2); + } else { + assertEquals(admissionControlService.getAdmissionControllers().size(), 1); + } } public void testRegisterInvalidAdmissionController() { String test = "TEST"; admissionControlService = new AdmissionControlService(Settings.EMPTY, clusterService, threadPool, null); - assertEquals(admissionControlService.getAdmissionControllers().size(), 1); + if (Constants.LINUX) { + assertEquals(admissionControlService.getAdmissionControllers().size(), 2); + } else { + assertEquals(admissionControlService.getAdmissionControllers().size(), 1); + } IllegalArgumentException ex = expectThrows( IllegalArgumentException.class, () -> admissionControlService.registerAdmissionController(test) @@ -66,7 +75,11 @@ public void testAdmissionControllerSettings() { admissionControlService = new AdmissionControlService(Settings.EMPTY, clusterService, threadPool, null); AdmissionControlSettings admissionControlSettings = admissionControlService.admissionControlSettings; List<AdmissionController> admissionControllerList = admissionControlService.getAdmissionControllers(); - assertEquals(admissionControllerList.size(), 1); + if (Constants.LINUX) { + assertEquals(admissionControllerList.size(), 2); + } else { + assertEquals(admissionControllerList.size(), 1); + } CpuBasedAdmissionController cpuBasedAdmissionController = (CpuBasedAdmissionController) admissionControlService .getAdmissionController(CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER); assertEquals( @@ -132,7 +145,11 @@ public void testApplyAdmissionControllerEnabled() { .build(); clusterService.getClusterSettings().applySettings(settings); List<AdmissionController> admissionControllerList = admissionControlService.getAdmissionControllers(); - assertEquals(admissionControllerList.size(), 1); + if (Constants.LINUX) { + assertEquals(admissionControllerList.size(), 2); + } else { + assertEquals(admissionControllerList.size(), 1); + } } public void testApplyAdmissionControllerEnforced() { @@ -153,6 +170,10 @@ public void testApplyAdmissionControllerEnforced() { .build(); clusterService.getClusterSettings().applySettings(settings); List<AdmissionController> admissionControllerList = admissionControlService.getAdmissionControllers(); - assertEquals(admissionControllerList.size(), 1); + if (Constants.LINUX) { + assertEquals(admissionControllerList.size(), 2); + } else { + assertEquals(admissionControllerList.size(), 1); + } } } diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSingleNodeTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSingleNodeTests.java index a1694b2c3cee2..5534dbcf2774b 100644 --- a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSingleNodeTests.java +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSingleNodeTests.java @@ -8,6 +8,7 @@ package org.opensearch.ratelimitting.admissioncontrol; +import org.apache.lucene.util.Constants; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.admin.cluster.state.ClusterStateRequest; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; @@ -21,15 +22,24 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.node.ResourceUsageCollectorService; import org.opensearch.node.resource.tracker.ResourceTrackerSettings; +import org.opensearch.ratelimitting.admissioncontrol.controllers.CpuBasedAdmissionController; +import org.opensearch.ratelimitting.admissioncontrol.controllers.IoBasedAdmissionController; import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; import org.opensearch.ratelimitting.admissioncontrol.stats.AdmissionControllerStats; import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.junit.After; +import java.util.HashMap; +import java.util.Map; + import static org.opensearch.ratelimitting.admissioncontrol.AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE; +import static org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE; import static org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT; import static org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT; +import static org.opensearch.ratelimitting.admissioncontrol.settings.IoBasedAdmissionControllerSettings.INDEXING_IO_USAGE_LIMIT; +import static org.opensearch.ratelimitting.admissioncontrol.settings.IoBasedAdmissionControllerSettings.IO_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE; +import static org.opensearch.ratelimitting.admissioncontrol.settings.IoBasedAdmissionControllerSettings.SEARCH_IO_USAGE_LIMIT; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.is; @@ -38,6 +48,8 @@ */ public class AdmissionControlSingleNodeTests extends OpenSearchSingleNodeTestCase { + public static final String INDEX_NAME = "test_index"; + @Override protected boolean resetNodeAfterTest() { return true; @@ -45,6 +57,7 @@ protected boolean resetNodeAfterTest() { @After public void cleanup() { + client().admin().indices().prepareDelete(INDEX_NAME).get(); assertAcked( client().admin() .cluster() @@ -60,7 +73,8 @@ protected Settings nodeSettings() { .put(super.nodeSettings()) .put(ResourceTrackerSettings.GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), TimeValue.timeValueMillis(500)) .put(ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), TimeValue.timeValueMillis(500)) - .put(ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.ENFORCED) + .put(ResourceTrackerSettings.GLOBAL_IO_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), TimeValue.timeValueMillis(5000)) + .put(CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.ENFORCED.getMode()) .put(SEARCH_CPU_USAGE_LIMIT.getKey(), 0) .put(INDEXING_CPU_USAGE_LIMIT.getKey(), 0) .build(); @@ -69,11 +83,10 @@ protected Settings nodeSettings() { public void testAdmissionControlRejectionEnforcedMode() throws Exception { ensureGreen(); assertBusy(() -> assertEquals(1, getInstanceFromNode(ResourceUsageCollectorService.class).getAllNodeStatistics().size())); - // Thread.sleep(700); - client().admin().indices().prepareCreate("index").execute().actionGet(); + client().admin().indices().prepareCreate(INDEX_NAME).execute().actionGet(); BulkRequestBuilder bulk = client().prepareBulk(); for (int i = 0; i < 3; i++) { - bulk.add(client().prepareIndex("index").setSource("foo", "bar " + i)); + bulk.add(client().prepareIndex(INDEX_NAME).setSource("foo", "bar " + i)); } // Verify that cluster state is updated ActionFuture<ClusterStateResponse> future2 = client().admin().cluster().state(new ClusterStateRequest()); @@ -83,24 +96,116 @@ public void testAdmissionControlRejectionEnforcedMode() throws Exception { BulkResponse res = client().bulk(bulk.request()).actionGet(); assertEquals(429, res.getItems()[0].getFailure().getStatus().getStatus()); AdmissionControlService admissionControlService = getInstanceFromNode(AdmissionControlService.class); - AdmissionControllerStats acStats = admissionControlService.stats().getAdmissionControllerStatsList().get(0); - assertEquals(1, (long) acStats.getRejectionCount().get(AdmissionControlActionType.INDEXING.getType())); - client().admin().indices().prepareRefresh("index").get(); + Map<String, AdmissionControllerStats> acStats = this.getAdmissionControlStats(admissionControlService); + assertEquals( + 1, + (long) acStats.get(CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER) + .getRejectionCount() + .get(AdmissionControlActionType.INDEXING.getType()) + ); + if (Constants.LINUX) { + assertEquals( + 0, + (long) acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER) + .getRejectionCount() + .getOrDefault(AdmissionControlActionType.INDEXING.getType(), 0L) + ); + } else { + assertNull(acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER)); + } + client().admin().indices().prepareRefresh(INDEX_NAME).get(); // verify search request hits 429 - SearchRequest searchRequest = new SearchRequest("index"); + SearchRequest searchRequest = new SearchRequest(INDEX_NAME); try { client().search(searchRequest).actionGet(); } catch (Exception e) { assertTrue(((SearchPhaseExecutionException) e).getDetailedMessage().contains("OpenSearchRejectedExecutionException")); } - acStats = admissionControlService.stats().getAdmissionControllerStatsList().get(0); - assertEquals(1, (long) acStats.getRejectionCount().get(AdmissionControlActionType.SEARCH.getType())); + acStats = this.getAdmissionControlStats(admissionControlService); + assertEquals( + 1, + (long) acStats.get(CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER) + .getRejectionCount() + .get(AdmissionControlActionType.SEARCH.getType()) + ); + if (Constants.LINUX) { + assertEquals( + 0, + (long) acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER) + .getRejectionCount() + .getOrDefault(AdmissionControlActionType.SEARCH.getType(), 0L) + ); + } else { + assertNull(acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER)); + } + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.transientSettings( + Settings.builder() + .put(super.nodeSettings()) + .put(SEARCH_IO_USAGE_LIMIT.getKey(), 0) + .put(INDEXING_IO_USAGE_LIMIT.getKey(), 0) + .put(CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.DISABLED.getMode()) + .put(IO_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.ENFORCED.getMode()) + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + bulk = client().prepareBulk(); + for (int i = 0; i < 3; i++) { + bulk.add(client().prepareIndex(INDEX_NAME).setSource("foo", "bar " + i)); + } + res = client().bulk(bulk.request()).actionGet(); + if (Constants.LINUX) { + assertEquals(429, res.getItems()[0].getFailure().getStatus().getStatus()); + } + admissionControlService = getInstanceFromNode(AdmissionControlService.class); + acStats = this.getAdmissionControlStats(admissionControlService); + assertEquals( + 1, + (long) acStats.get(CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER) + .getRejectionCount() + .get(AdmissionControlActionType.INDEXING.getType()) + ); + if (Constants.LINUX) { + assertEquals( + 1, + (long) acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER) + .getRejectionCount() + .getOrDefault(AdmissionControlActionType.INDEXING.getType(), 0L) + ); + } else { + assertNull(acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER)); + } + client().admin().indices().prepareRefresh(INDEX_NAME).get(); + + // verify search request hits 429 + searchRequest = new SearchRequest(INDEX_NAME); + try { + client().search(searchRequest).actionGet(); + } catch (Exception e) { + assertTrue(((SearchPhaseExecutionException) e).getDetailedMessage().contains("OpenSearchRejectedExecutionException")); + } + acStats = this.getAdmissionControlStats(admissionControlService); + assertEquals( + 1, + (long) acStats.get(CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER) + .getRejectionCount() + .get(AdmissionControlActionType.SEARCH.getType()) + ); + if (Constants.LINUX) { + assertEquals( + 1, + (long) acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER) + .getRejectionCount() + .getOrDefault(AdmissionControlActionType.SEARCH.getType(), 0L) + ); + } else { + assertNull(acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER)); + } } public void testAdmissionControlRejectionMonitorOnlyMode() throws Exception { assertBusy(() -> assertEquals(1, getInstanceFromNode(ResourceUsageCollectorService.class).getAllNodeStatistics().size())); - // Verify that cluster state is updated ActionFuture<ClusterStateResponse> future2 = client().admin().cluster().state(new ClusterStateRequest()); assertThat(future2.isDone(), is(true)); @@ -108,66 +213,165 @@ public void testAdmissionControlRejectionMonitorOnlyMode() throws Exception { updateSettingsRequest.transientSettings( Settings.builder() .put(super.nodeSettings()) - .put(ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.MONITOR.getMode()) + .put(CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.MONITOR.getMode()) ); assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); BulkRequestBuilder bulk = client().prepareBulk(); for (int i = 0; i < 3; i++) { - bulk.add(client().prepareIndex("index").setSource("foo", "bar " + i)); + bulk.add(client().prepareIndex(INDEX_NAME).setSource("foo", "bar " + i)); } // verify bulk request success but admission control having rejections stats BulkResponse res = client().bulk(bulk.request()).actionGet(); assertFalse(res.hasFailures()); AdmissionControlService admissionControlService = getInstanceFromNode(AdmissionControlService.class); - AdmissionControllerStats acStats = admissionControlService.stats().getAdmissionControllerStatsList().get(0); - assertEquals(1, (long) acStats.getRejectionCount().get(AdmissionControlActionType.INDEXING.getType())); - client().admin().indices().prepareRefresh("index").get(); + Map<String, AdmissionControllerStats> acStats = this.getAdmissionControlStats(admissionControlService); + assertEquals( + 1, + (long) acStats.get(CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER) + .getRejectionCount() + .get(AdmissionControlActionType.INDEXING.getType()) + ); + client().admin().indices().prepareRefresh(INDEX_NAME).get(); // verify search request success but admission control having rejections stats - SearchRequest searchRequest = new SearchRequest("index"); + SearchRequest searchRequest = new SearchRequest(INDEX_NAME); SearchResponse searchResponse = client().search(searchRequest).actionGet(); assertEquals(3, searchResponse.getHits().getHits().length); - acStats = admissionControlService.stats().getAdmissionControllerStatsList().get(0); - assertEquals(1, (long) acStats.getRejectionCount().get(AdmissionControlActionType.SEARCH.getType())); + acStats = this.getAdmissionControlStats(admissionControlService); + assertEquals( + 1, + (long) acStats.get(CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER) + .getRejectionCount() + .get(AdmissionControlActionType.SEARCH.getType()) + ); + + updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.transientSettings( + Settings.builder() + .put(super.nodeSettings()) + .put(SEARCH_IO_USAGE_LIMIT.getKey(), 0) + .put(INDEXING_IO_USAGE_LIMIT.getKey(), 0) + .put(SEARCH_CPU_USAGE_LIMIT.getKey(), 101) + .put(INDEXING_CPU_USAGE_LIMIT.getKey(), 101) + .put(CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.DISABLED.getMode()) + .put(IO_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.MONITOR.getMode()) + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + bulk = client().prepareBulk(); + for (int i = 0; i < 3; i++) { + bulk.add(client().prepareIndex(INDEX_NAME).setSource("foo", "bar " + i)); + } + // verify bulk request success but admission control having rejections stats + res = client().bulk(bulk.request()).actionGet(); + assertFalse(res.hasFailures()); + acStats = this.getAdmissionControlStats(admissionControlService); + assertEquals( + 1, + (long) acStats.get(CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER) + .getRejectionCount() + .get(AdmissionControlActionType.INDEXING.getType()) + ); + if (Constants.LINUX) { + assertEquals( + 1, + (long) acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER) + .getRejectionCount() + .getOrDefault(AdmissionControlActionType.INDEXING.getType(), 0L) + ); + } else { + assertNull(acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER)); + } + searchRequest = new SearchRequest(INDEX_NAME); + searchResponse = client().search(searchRequest).actionGet(); + assertEquals(3, searchResponse.getHits().getHits().length); + acStats = this.getAdmissionControlStats(admissionControlService); + assertEquals( + 1, + (long) acStats.get(CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER) + .getRejectionCount() + .get(AdmissionControlActionType.SEARCH.getType()) + ); + if (Constants.LINUX) { + assertEquals( + 1, + (long) acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER) + .getRejectionCount() + .getOrDefault(AdmissionControlActionType.SEARCH.getType(), 0L) + ); + } else { + assertNull(acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER)); + } } public void testAdmissionControlRejectionDisabledMode() throws Exception { assertBusy(() -> assertEquals(1, getInstanceFromNode(ResourceUsageCollectorService.class).getAllNodeStatistics().size())); - // Verify that cluster state is updated ActionFuture<ClusterStateResponse> future2 = client().admin().cluster().state(new ClusterStateRequest()); assertThat(future2.isDone(), is(true)); ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); updateSettingsRequest.transientSettings( - Settings.builder().put(super.nodeSettings()).put(ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.DISABLED) + Settings.builder() + .put(super.nodeSettings()) + .put(CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.DISABLED.getMode()) ); assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); BulkRequestBuilder bulk = client().prepareBulk(); for (int i = 0; i < 3; i++) { - bulk.add(client().prepareIndex("index").setSource("foo", "bar " + i)); + bulk.add(client().prepareIndex(INDEX_NAME).setSource("foo", "bar " + i)); } // verify bulk request success and no rejections BulkResponse res = client().bulk(bulk.request()).actionGet(); assertFalse(res.hasFailures()); AdmissionControlService admissionControlService = getInstanceFromNode(AdmissionControlService.class); - AdmissionControllerStats acStats = admissionControlService.stats().getAdmissionControllerStatsList().get(0); - assertEquals(0, acStats.getRejectionCount().size()); - client().admin().indices().prepareRefresh("index").get(); + Map<String, AdmissionControllerStats> acStats = this.getAdmissionControlStats(admissionControlService); + + assertEquals(0, acStats.get(CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER).getRejectionCount().size()); + client().admin().indices().prepareRefresh(INDEX_NAME).get(); // verify search request success and no rejections - SearchRequest searchRequest = new SearchRequest("index"); + SearchRequest searchRequest = new SearchRequest(INDEX_NAME); SearchResponse searchResponse = client().search(searchRequest).actionGet(); assertEquals(3, searchResponse.getHits().getHits().length); - acStats = admissionControlService.stats().getAdmissionControllerStatsList().get(0); - assertEquals(0, acStats.getRejectionCount().size()); + acStats = this.getAdmissionControlStats(admissionControlService); + assertEquals(0, acStats.get(CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER).getRejectionCount().size()); + updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.transientSettings( + Settings.builder() + .put(super.nodeSettings()) + .put(IO_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.DISABLED.getMode()) + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + bulk = client().prepareBulk(); + for (int i = 0; i < 3; i++) { + bulk.add(client().prepareIndex(INDEX_NAME).setSource("foo", "bar " + i)); + } + // verify bulk request success but admission control having rejections stats + res = client().bulk(bulk.request()).actionGet(); + assertFalse(res.hasFailures()); + acStats = this.getAdmissionControlStats(admissionControlService); + assertEquals(0, acStats.get(CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER).getRejectionCount().size()); + if (Constants.LINUX) { + assertEquals(0, acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER).getRejectionCount().size()); + } else { + assertNull(acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER)); + } + searchRequest = new SearchRequest(INDEX_NAME); + searchResponse = client().search(searchRequest).actionGet(); + assertEquals(3, searchResponse.getHits().getHits().length); + acStats = this.getAdmissionControlStats(admissionControlService); + assertEquals(0, acStats.get(CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER).getRejectionCount().size()); + if (Constants.LINUX) { + assertEquals(0, acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER).getRejectionCount().size()); + } else { + assertNull(acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER)); + } } public void testAdmissionControlWithinLimits() throws Exception { assertBusy(() -> assertEquals(1, getInstanceFromNode(ResourceUsageCollectorService.class).getAllNodeStatistics().size())); - // Verify that cluster state is updated ActionFuture<ClusterStateResponse> future2 = client().admin().cluster().state(new ClusterStateRequest()); assertThat(future2.isDone(), is(true)); @@ -175,29 +379,49 @@ public void testAdmissionControlWithinLimits() throws Exception { updateSettingsRequest.transientSettings( Settings.builder() .put(super.nodeSettings()) - .put(ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.ENFORCED) + .put(ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.ENFORCED.getMode()) .put(SEARCH_CPU_USAGE_LIMIT.getKey(), 101) .put(INDEXING_CPU_USAGE_LIMIT.getKey(), 101) + .put(SEARCH_IO_USAGE_LIMIT.getKey(), 101) + .put(INDEXING_IO_USAGE_LIMIT.getKey(), 101) ); assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); BulkRequestBuilder bulk = client().prepareBulk(); for (int i = 0; i < 3; i++) { - bulk.add(client().prepareIndex("index").setSource("foo", "bar " + i)); + bulk.add(client().prepareIndex(INDEX_NAME).setSource("foo", "bar " + i)); } // verify bulk request success and no rejections BulkResponse res = client().bulk(bulk.request()).actionGet(); assertFalse(res.hasFailures()); AdmissionControlService admissionControlService = getInstanceFromNode(AdmissionControlService.class); - AdmissionControllerStats acStats = admissionControlService.stats().getAdmissionControllerStatsList().get(0); - assertEquals(0, acStats.getRejectionCount().size()); - client().admin().indices().prepareRefresh("index").get(); + Map<String, AdmissionControllerStats> acStats = this.getAdmissionControlStats(admissionControlService); + assertEquals(0, acStats.get(CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER).getRejectionCount().size()); + if (Constants.LINUX) { + assertEquals(0, acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER).getRejectionCount().size()); + } else { + assertNull(acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER)); + } + client().admin().indices().prepareRefresh(INDEX_NAME).get(); // verify search request success and no rejections - SearchRequest searchRequest = new SearchRequest("index"); + SearchRequest searchRequest = new SearchRequest(INDEX_NAME); SearchResponse searchResponse = client().search(searchRequest).actionGet(); assertEquals(3, searchResponse.getHits().getHits().length); - acStats = admissionControlService.stats().getAdmissionControllerStatsList().get(0); - assertEquals(0, acStats.getRejectionCount().size()); + acStats = this.getAdmissionControlStats(admissionControlService); + assertEquals(0, acStats.get(CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER).getRejectionCount().size()); + if (Constants.LINUX) { + assertEquals(0, acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER).getRejectionCount().size()); + } else { + assertNull(acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER)); + } + } + + Map<String, AdmissionControllerStats> getAdmissionControlStats(AdmissionControlService admissionControlService) { + Map<String, AdmissionControllerStats> acStats = new HashMap<>(); + for (AdmissionControllerStats admissionControllerStats : admissionControlService.stats().getAdmissionControllerStatsList()) { + acStats.put(admissionControllerStats.getAdmissionControllerName(), admissionControllerStats); + } + return acStats; } } diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/IoBasedAdmissionControllerTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/IoBasedAdmissionControllerTests.java new file mode 100644 index 0000000000000..c5a2208f49ce6 --- /dev/null +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/IoBasedAdmissionControllerTests.java @@ -0,0 +1,141 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.controllers; + +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.node.ResourceUsageCollectorService; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; +import org.opensearch.ratelimitting.admissioncontrol.settings.IoBasedAdmissionControllerSettings; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; + +import org.mockito.Mockito; + +public class IoBasedAdmissionControllerTests extends OpenSearchTestCase { + private ClusterService clusterService; + private ThreadPool threadPool; + IoBasedAdmissionController admissionController = null; + String action = "TEST_ACTION"; + + @Override + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool("admission_controller_settings_test"); + clusterService = new ClusterService( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdownNow(); + } + + public void testCheckDefaultParameters() { + admissionController = new IoBasedAdmissionController( + IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER, + null, + clusterService, + Settings.EMPTY + ); + assertEquals(admissionController.getName(), IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER); + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.INDEXING.getType()), 0); + assertEquals(admissionController.settings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.DISABLED); + assertFalse( + admissionController.isEnabledForTransportLayer(admissionController.settings.getTransportLayerAdmissionControllerMode()) + ); + } + + public void testCheckUpdateSettings() { + admissionController = new IoBasedAdmissionController( + IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER, + null, + clusterService, + Settings.EMPTY + ); + Settings settings = Settings.builder() + .put( + IoBasedAdmissionControllerSettings.IO_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .build(); + clusterService.getClusterSettings().applySettings(settings); + assertEquals(admissionController.getName(), IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER); + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.INDEXING.getType()), 0); + assertEquals(admissionController.settings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.ENFORCED); + assertTrue(admissionController.isEnabledForTransportLayer(admissionController.settings.getTransportLayerAdmissionControllerMode())); + } + + public void testApplyControllerWithDefaultSettings() { + ResourceUsageCollectorService rs = Mockito.mock(ResourceUsageCollectorService.class); + admissionController = new IoBasedAdmissionController( + IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER, + rs, + clusterService, + Settings.EMPTY + ); + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.INDEXING.getType()), 0); + assertEquals(admissionController.settings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.DISABLED); + action = "indices:data/write/bulk[s][p]"; + admissionController.apply(action, AdmissionControlActionType.INDEXING); + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.INDEXING.getType()), 0); + } + + public void testApplyControllerWhenSettingsEnabled() throws Exception { + Settings settings = Settings.builder() + .put( + IoBasedAdmissionControllerSettings.IO_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .build(); + ResourceUsageCollectorService rs = Mockito.mock(ResourceUsageCollectorService.class); + admissionController = new IoBasedAdmissionController( + IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER, + rs, + clusterService, + settings + ); + assertTrue(admissionController.isEnabledForTransportLayer(admissionController.settings.getTransportLayerAdmissionControllerMode())); + assertTrue( + admissionController.isAdmissionControllerEnforced(admissionController.settings.getTransportLayerAdmissionControllerMode()) + ); + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.INDEXING.getType()), 0); + } + + public void testRejectionCount() { + Settings settings = Settings.builder() + .put( + IoBasedAdmissionControllerSettings.IO_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .build(); + ResourceUsageCollectorService rs = Mockito.mock(ResourceUsageCollectorService.class); + admissionController = new IoBasedAdmissionController( + IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER, + rs, + clusterService, + settings + ); + admissionController.addRejectionCount(AdmissionControlActionType.SEARCH.getType(), 1); + admissionController.addRejectionCount(AdmissionControlActionType.INDEXING.getType(), 3); + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.SEARCH.getType()), 1); + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.INDEXING.getType()), 3); + admissionController.addRejectionCount(AdmissionControlActionType.SEARCH.getType(), 1); + admissionController.addRejectionCount(AdmissionControlActionType.INDEXING.getType(), 2); + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.SEARCH.getType()), 2); + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.INDEXING.getType()), 5); + } +} diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControlSettingsTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControllerSettingsTests.java similarity index 98% rename from server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControlSettingsTests.java rename to server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControllerSettingsTests.java index 11688e2f30d4b..9ce28bc7fdb40 100644 --- a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControlSettingsTests.java +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControllerSettingsTests.java @@ -20,7 +20,7 @@ import java.util.Arrays; import java.util.Set; -public class CPUBasedAdmissionControlSettingsTests extends OpenSearchTestCase { +public class CPUBasedAdmissionControllerSettingsTests extends OpenSearchTestCase { private ClusterService clusterService; private ThreadPool threadPool; diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/IoBasedAdmissionControllerSettingsTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/IoBasedAdmissionControllerSettingsTests.java new file mode 100644 index 0000000000000..ff777c175ec0e --- /dev/null +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/IoBasedAdmissionControllerSettingsTests.java @@ -0,0 +1,160 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.settings; + +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; + +import java.util.Arrays; +import java.util.Set; + +public class IoBasedAdmissionControllerSettingsTests extends OpenSearchTestCase { + private ClusterService clusterService; + private ThreadPool threadPool; + + @Override + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool("io_based_admission_controller_settings_test"); + clusterService = new ClusterService( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdownNow(); + } + + public void testSettingsExists() { + Set<Setting<?>> settings = ClusterSettings.BUILT_IN_CLUSTER_SETTINGS; + assertTrue( + "All the IO based admission controller settings should be supported built in settings", + settings.containsAll( + Arrays.asList( + IoBasedAdmissionControllerSettings.IO_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE, + IoBasedAdmissionControllerSettings.SEARCH_IO_USAGE_LIMIT, + IoBasedAdmissionControllerSettings.INDEXING_IO_USAGE_LIMIT + ) + ) + ); + } + + public void testDefaultSettings() { + IoBasedAdmissionControllerSettings ioBasedAdmissionControllerSettings = new IoBasedAdmissionControllerSettings( + clusterService.getClusterSettings(), + Settings.EMPTY + ); + long percent = 95; + assertEquals(ioBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.DISABLED); + assertEquals(ioBasedAdmissionControllerSettings.getIndexingIOUsageLimit().longValue(), percent); + assertEquals(ioBasedAdmissionControllerSettings.getSearchIOUsageLimit().longValue(), percent); + } + + public void testGetConfiguredSettings() { + long percent = 95; + long indexingPercent = 85; + Settings settings = Settings.builder() + .put( + IoBasedAdmissionControllerSettings.IO_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .put(IoBasedAdmissionControllerSettings.INDEXING_IO_USAGE_LIMIT.getKey(), indexingPercent) + .build(); + + IoBasedAdmissionControllerSettings ioBasedAdmissionControllerSettings = new IoBasedAdmissionControllerSettings( + clusterService.getClusterSettings(), + settings + ); + assertEquals(ioBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.ENFORCED); + assertEquals(ioBasedAdmissionControllerSettings.getSearchIOUsageLimit().longValue(), percent); + assertEquals(ioBasedAdmissionControllerSettings.getIndexingIOUsageLimit().longValue(), indexingPercent); + } + + public void testUpdateAfterGetDefaultSettings() { + long percent = 95; + long searchPercent = 80; + IoBasedAdmissionControllerSettings ioBasedAdmissionControllerSettings = new IoBasedAdmissionControllerSettings( + clusterService.getClusterSettings(), + Settings.EMPTY + ); + Settings settings = Settings.builder() + .put( + IoBasedAdmissionControllerSettings.IO_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .put(IoBasedAdmissionControllerSettings.SEARCH_IO_USAGE_LIMIT.getKey(), searchPercent) + .build(); + + clusterService.getClusterSettings().applySettings(settings); + assertEquals(ioBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.ENFORCED); + assertEquals(ioBasedAdmissionControllerSettings.getSearchIOUsageLimit().longValue(), searchPercent); + assertEquals(ioBasedAdmissionControllerSettings.getIndexingIOUsageLimit().longValue(), percent); + } + + public void testUpdateAfterGetConfiguredSettings() { + long percent = 95; + long indexingPercent = 85; + long searchPercent = 80; + Settings settings = Settings.builder() + .put( + IoBasedAdmissionControllerSettings.IO_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .put(IoBasedAdmissionControllerSettings.SEARCH_IO_USAGE_LIMIT.getKey(), searchPercent) + .build(); + + IoBasedAdmissionControllerSettings ioBasedAdmissionControllerSettings = new IoBasedAdmissionControllerSettings( + clusterService.getClusterSettings(), + settings + ); + assertEquals(ioBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.ENFORCED); + assertEquals(ioBasedAdmissionControllerSettings.getSearchIOUsageLimit().longValue(), searchPercent); + assertEquals(ioBasedAdmissionControllerSettings.getIndexingIOUsageLimit().longValue(), percent); + + Settings updatedSettings = Settings.builder() + .put( + IoBasedAdmissionControllerSettings.IO_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.MONITOR.getMode() + ) + .put(IoBasedAdmissionControllerSettings.INDEXING_IO_USAGE_LIMIT.getKey(), indexingPercent) + .build(); + clusterService.getClusterSettings().applySettings(updatedSettings); + assertEquals(ioBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.MONITOR); + assertEquals(ioBasedAdmissionControllerSettings.getSearchIOUsageLimit().longValue(), searchPercent); + assertEquals(ioBasedAdmissionControllerSettings.getIndexingIOUsageLimit().longValue(), indexingPercent); + + searchPercent = 70; + updatedSettings = Settings.builder() + .put(updatedSettings) + .put(IoBasedAdmissionControllerSettings.SEARCH_IO_USAGE_LIMIT.getKey(), searchPercent) + .build(); + + clusterService.getClusterSettings().applySettings(updatedSettings); + assertEquals(ioBasedAdmissionControllerSettings.getSearchIOUsageLimit().longValue(), searchPercent); + assertEquals(ioBasedAdmissionControllerSettings.getIndexingIOUsageLimit().longValue(), indexingPercent); + } +} diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java index 9c65ad32fa6a6..2445cad01574c 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java @@ -42,6 +42,8 @@ import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; import org.opensearch.indices.recovery.RecoverySettings; @@ -64,6 +66,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Function; import java.util.stream.Collectors; @@ -318,4 +321,31 @@ private RepositoryData addRandomSnapshotsToRepoData(RepositoryData repoData, boo return repoData; } + private String getShardIdentifier(String indexUUID, String shardId) { + return String.join("/", indexUUID, shardId); + } + + public void testRemoteStoreShardCleanupTask() { + AtomicBoolean executed1 = new AtomicBoolean(false); + Runnable task1 = () -> executed1.set(true); + String indexName = "test-idx"; + String testIndexUUID = "test-idx-uuid"; + ShardId shardId = new ShardId(new Index(indexName, testIndexUUID), 0); + + // just adding random shards in ongoing cleanups. + RemoteStoreShardCleanupTask.ongoingRemoteDirectoryCleanups.add(getShardIdentifier(testIndexUUID, "1")); + RemoteStoreShardCleanupTask.ongoingRemoteDirectoryCleanups.add(getShardIdentifier(testIndexUUID, "2")); + + // Scenario 1: ongoing = false => executed + RemoteStoreShardCleanupTask remoteStoreShardCleanupTask = new RemoteStoreShardCleanupTask(task1, testIndexUUID, shardId); + remoteStoreShardCleanupTask.run(); + assertTrue(executed1.get()); + + // Scenario 2: ongoing = true => currentTask skipped. + executed1.set(false); + RemoteStoreShardCleanupTask.ongoingRemoteDirectoryCleanups.add(getShardIdentifier(testIndexUUID, "0")); + remoteStoreShardCleanupTask = new RemoteStoreShardCleanupTask(task1, testIndexUUID, shardId); + remoteStoreShardCleanupTask.run(); + assertFalse(executed1.get()); + } } diff --git a/server/src/test/java/org/opensearch/rest/RestControllerTests.java b/server/src/test/java/org/opensearch/rest/RestControllerTests.java index 25405afa24c16..b7239e7b59742 100644 --- a/server/src/test/java/org/opensearch/rest/RestControllerTests.java +++ b/server/src/test/java/org/opensearch/rest/RestControllerTests.java @@ -138,6 +138,37 @@ public void teardown() throws IOException { IOUtils.close(client); } + public void testDefaultRestControllerGetAllHandlersContainsFavicon() { + final RestController restController = new RestController(null, null, null, circuitBreakerService, usageService, identityService); + Iterator<MethodHandlers> handlers = restController.getAllHandlers(); + assertTrue(handlers.hasNext()); + MethodHandlers faviconHandler = handlers.next(); + assertEquals(faviconHandler.getPath(), "/favicon.ico"); + assertEquals(faviconHandler.getValidMethods(), Set.of(RestRequest.Method.GET)); + assertFalse(handlers.hasNext()); + } + + public void testRestControllerGetAllHandlers() { + final RestController restController = new RestController(null, null, null, circuitBreakerService, usageService, identityService); + + restController.registerHandler(RestRequest.Method.PATCH, "/foo", mock(RestHandler.class)); + restController.registerHandler(RestRequest.Method.GET, "/foo", mock(RestHandler.class)); + + Iterator<MethodHandlers> handlers = restController.getAllHandlers(); + + assertTrue(handlers.hasNext()); + MethodHandlers rootHandler = handlers.next(); + assertEquals(rootHandler.getPath(), "/foo"); + assertEquals(rootHandler.getValidMethods(), Set.of(RestRequest.Method.GET, RestRequest.Method.PATCH)); + + assertTrue(handlers.hasNext()); + MethodHandlers faviconHandler = handlers.next(); + assertEquals(faviconHandler.getPath(), "/favicon.ico"); + assertEquals(faviconHandler.getValidMethods(), Set.of(RestRequest.Method.GET)); + + assertFalse(handlers.hasNext()); + } + public void testApplyRelevantHeaders() throws Exception { final ThreadContext threadContext = client.threadPool().getThreadContext(); Set<RestHeaderDefinition> headers = new HashSet<>( @@ -150,15 +181,15 @@ public void testApplyRelevantHeaders() throws Exception { restHeaders.put("header.3", Collections.singletonList("false")); RestRequest fakeRequest = new FakeRestRequest.Builder(xContentRegistry()).withHeaders(restHeaders).build(); final RestController spyRestController = spy(restController); - when(spyRestController.getAllHandlers(null, fakeRequest.rawPath())).thenReturn(new Iterator<MethodHandlers>() { + when(spyRestController.getAllRestMethodHandlers(null, fakeRequest.rawPath())).thenReturn(new Iterator<RestMethodHandlers>() { @Override public boolean hasNext() { return false; } @Override - public MethodHandlers next() { - return new MethodHandlers("/", (RestRequest request, RestChannel channel, NodeClient client) -> { + public RestMethodHandlers next() { + return new RestMethodHandlers("/", (RestRequest request, RestChannel channel, NodeClient client) -> { assertEquals("true", threadContext.getHeader("header.1")); assertEquals("true", threadContext.getHeader("header.2")); assertNull(threadContext.getHeader("header.3")); diff --git a/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java b/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java index 73f83a5642bb4..fa13ec2036797 100644 --- a/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java @@ -125,7 +125,7 @@ public void testBuildTable() { assertThat(headers.get(6).value, equalTo("ip")); assertThat(headers.get(7).value, equalTo("id")); assertThat(headers.get(8).value, equalTo("node")); - assertThat(headers.get(74).value, equalTo("docs.deleted")); + assertThat(headers.get(78).value, equalTo("docs.deleted")); final List<List<Table.Cell>> rows = table.getRows(); assertThat(rows.size(), equalTo(numShards)); @@ -141,9 +141,9 @@ public void testBuildTable() { assertThat(row.get(4).value, equalTo(shardStats.getStats().getDocs().getCount())); assertThat(row.get(6).value, equalTo(localNode.getHostAddress())); assertThat(row.get(7).value, equalTo(localNode.getId())); - assertThat(row.get(72).value, equalTo(shardStats.getDataPath())); - assertThat(row.get(73).value, equalTo(shardStats.getStatePath())); - assertThat(row.get(74).value, equalTo(shardStats.getStats().getDocs().getDeleted())); + assertThat(row.get(76).value, equalTo(shardStats.getDataPath())); + assertThat(row.get(77).value, equalTo(shardStats.getStatePath())); + assertThat(row.get(78).value, equalTo(shardStats.getStats().getDocs().getDeleted())); } } } diff --git a/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java b/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java index 347011af98c6d..3793249d569f0 100644 --- a/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java +++ b/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java @@ -54,7 +54,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; @@ -80,7 +79,6 @@ import org.opensearch.search.rescore.RescoreContext; import org.opensearch.search.slice.SliceBuilder; import org.opensearch.search.sort.SortAndFormats; -import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -554,8 +552,6 @@ protected Engine.Searcher acquireSearcherInternal(String source) { } public void testSearchPathEvaluationUsingSortField() throws Exception { - // enable the concurrent set FeatureFlag - FeatureFlagSetter.set(FeatureFlags.CONCURRENT_SEGMENT_SEARCH); ShardSearchRequest shardSearchRequest = mock(ShardSearchRequest.class); when(shardSearchRequest.searchType()).thenReturn(SearchType.DEFAULT); ShardId shardId = new ShardId("index", UUID.randomUUID().toString(), 1); diff --git a/server/src/test/java/org/opensearch/search/SearchHitTests.java b/server/src/test/java/org/opensearch/search/SearchHitTests.java index 88d5fb38a6cb1..13b4d9f976ed5 100644 --- a/server/src/test/java/org/opensearch/search/SearchHitTests.java +++ b/server/src/test/java/org/opensearch/search/SearchHitTests.java @@ -56,11 +56,13 @@ import org.opensearch.test.AbstractWireSerializingTestCase; import org.opensearch.test.RandomObjects; import org.opensearch.test.VersionUtils; +import org.junit.Assert; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.function.Predicate; @@ -76,6 +78,25 @@ import static org.hamcrest.Matchers.nullValue; public class SearchHitTests extends AbstractWireSerializingTestCase<SearchHit> { + + private Map<String, Float> getSampleMatchedQueries() { + Map<String, Float> matchedQueries = new LinkedHashMap<>(); + matchedQueries.put("query1", 1.0f); + matchedQueries.put("query2", 0.5f); + return matchedQueries; + } + + public static SearchHit createTestItemWithMatchedQueriesScores(boolean withOptionalInnerHits, boolean withShardTarget) { + var searchHit = createTestItem(randomFrom(XContentType.values()), withOptionalInnerHits, withShardTarget); + int size = randomIntBetween(1, 5); // Ensure at least one matched query + Map<String, Float> matchedQueries = new LinkedHashMap<>(size); + for (int i = 0; i < size; i++) { + matchedQueries.put(randomAlphaOfLength(5), randomFloat()); + } + searchHit.matchedQueriesWithScores(matchedQueries); + return searchHit; + } + public static SearchHit createTestItem(boolean withOptionalInnerHits, boolean withShardTarget) { return createTestItem(randomFrom(XContentType.values()), withOptionalInnerHits, withShardTarget); } @@ -129,11 +150,11 @@ public static SearchHit createTestItem(final MediaType mediaType, boolean withOp } if (randomBoolean()) { int size = randomIntBetween(0, 5); - String[] matchedQueries = new String[size]; + Map<String, Float> matchedQueries = new LinkedHashMap<>(size); for (int i = 0; i < size; i++) { - matchedQueries[i] = randomAlphaOfLength(5); + matchedQueries.put(randomAlphaOfLength(5), Float.NaN); } - hit.matchedQueries(matchedQueries); + hit.matchedQueriesWithScores(matchedQueries); } if (randomBoolean()) { hit.explanation(createExplanation(randomIntBetween(0, 5))); @@ -219,6 +240,21 @@ public void testFromXContentLenientParsing() throws IOException { assertToXContentEquivalent(originalBytes, toXContent(parsed, xContentType, true), xContentType); } + public void testSerializationDeserializationWithMatchedQueriesScores() throws IOException { + SearchHit searchHit = createTestItemWithMatchedQueriesScores(true, true); + SearchHit deserializedSearchHit = copyWriteable(searchHit, getNamedWriteableRegistry(), SearchHit::new, Version.V_3_0_0); + assertEquals(searchHit, deserializedSearchHit); + assertEquals(searchHit.getMatchedQueriesAndScores(), deserializedSearchHit.getMatchedQueriesAndScores()); + } + + public void testSerializationDeserializationWithMatchedQueriesList() throws IOException { + SearchHit searchHit = createTestItem(true, true); + SearchHit deserializedSearchHit = copyWriteable(searchHit, getNamedWriteableRegistry(), SearchHit::new, Version.V_2_12_0); + assertEquals(searchHit, deserializedSearchHit); + assertEquals(searchHit.getMatchedQueriesAndScores(), deserializedSearchHit.getMatchedQueriesAndScores()); + Assert.assertArrayEquals(searchHit.getMatchedQueries(), deserializedSearchHit.getMatchedQueries()); + } + /** * When e.g. with "stored_fields": "_none_", only "_index" and "_score" are returned. */ @@ -244,6 +280,125 @@ public void testToXContent() throws IOException { assertEquals("{\"_id\":\"id1\",\"_score\":1.5}", builder.toString()); } + public void testSerializeShardTargetWithNewVersion() throws Exception { + String clusterAlias = randomBoolean() ? null : "cluster_alias"; + SearchShardTarget target = new SearchShardTarget( + "_node_id", + new ShardId(new Index("_index", "_na_"), 0), + clusterAlias, + OriginalIndices.NONE + ); + + Map<String, SearchHits> innerHits = new HashMap<>(); + SearchHit innerHit1 = new SearchHit(0, "_id", null, null); + innerHit1.shard(target); + SearchHit innerInnerHit2 = new SearchHit(0, "_id", null, null); + innerInnerHit2.shard(target); + innerHits.put("1", new SearchHits(new SearchHit[] { innerInnerHit2 }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1f)); + innerHit1.setInnerHits(innerHits); + SearchHit innerHit2 = new SearchHit(0, "_id", null, null); + innerHit2.shard(target); + SearchHit innerHit3 = new SearchHit(0, "_id", null, null); + innerHit3.shard(target); + + innerHits = new HashMap<>(); + SearchHit hit1 = new SearchHit(0, "_id", null, null); + innerHits.put("1", new SearchHits(new SearchHit[] { innerHit1, innerHit2 }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1f)); + innerHits.put("2", new SearchHits(new SearchHit[] { innerHit3 }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1f)); + hit1.shard(target); + hit1.setInnerHits(innerHits); + + SearchHit hit2 = new SearchHit(0, "_id", null, null); + hit2.shard(target); + + SearchHits hits = new SearchHits(new SearchHit[] { hit1, hit2 }, new TotalHits(2, TotalHits.Relation.EQUAL_TO), 1f); + + SearchHits results = copyWriteable(hits, getNamedWriteableRegistry(), SearchHits::new, Version.V_3_0_0); + SearchShardTarget deserializedTarget = results.getAt(0).getShard(); + assertThat(deserializedTarget, equalTo(target)); + assertThat(results.getAt(0).getInnerHits().get("1").getAt(0).getShard(), notNullValue()); + assertThat(results.getAt(0).getInnerHits().get("1").getAt(0).getInnerHits().get("1").getAt(0).getShard(), notNullValue()); + assertThat(results.getAt(0).getInnerHits().get("1").getAt(1).getShard(), notNullValue()); + assertThat(results.getAt(0).getInnerHits().get("2").getAt(0).getShard(), notNullValue()); + for (SearchHit hit : results) { + assertEquals(clusterAlias, hit.getClusterAlias()); + if (hit.getInnerHits() != null) { + for (SearchHits innerhits : hit.getInnerHits().values()) { + for (SearchHit innerHit : innerhits) { + assertEquals(clusterAlias, innerHit.getClusterAlias()); + } + } + } + } + assertThat(results.getAt(1).getShard(), equalTo(target)); + } + + public void testSerializeShardTargetWithNewVersionAndMatchedQueries() throws Exception { + String clusterAlias = randomBoolean() ? null : "cluster_alias"; + SearchShardTarget target = new SearchShardTarget( + "_node_id", + new ShardId(new Index("_index", "_na_"), 0), + clusterAlias, + OriginalIndices.NONE + ); + + Map<String, SearchHits> innerHits = new HashMap<>(); + SearchHit innerHit1 = new SearchHit(0, "_id", null, null); + innerHit1.shard(target); + innerHit1.matchedQueriesWithScores(getSampleMatchedQueries()); + SearchHit innerInnerHit2 = new SearchHit(0, "_id", null, null); + innerInnerHit2.shard(target); + innerHits.put("1", new SearchHits(new SearchHit[] { innerInnerHit2 }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1f)); + innerHit1.setInnerHits(innerHits); + SearchHit innerHit2 = new SearchHit(0, "_id", null, null); + innerHit2.shard(target); + innerHit2.matchedQueriesWithScores(getSampleMatchedQueries()); + SearchHit innerHit3 = new SearchHit(0, "_id", null, null); + innerHit3.shard(target); + innerHit3.matchedQueriesWithScores(getSampleMatchedQueries()); + + innerHits = new HashMap<>(); + SearchHit hit1 = new SearchHit(0, "_id", null, null); + innerHits.put("1", new SearchHits(new SearchHit[] { innerHit1, innerHit2 }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1f)); + innerHits.put("2", new SearchHits(new SearchHit[] { innerHit3 }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1f)); + hit1.shard(target); + hit1.setInnerHits(innerHits); + + SearchHit hit2 = new SearchHit(0, "_id", null, null); + hit2.shard(target); + + SearchHits hits = new SearchHits(new SearchHit[] { hit1, hit2 }, new TotalHits(2, TotalHits.Relation.EQUAL_TO), 1f); + + SearchHits results = copyWriteable(hits, getNamedWriteableRegistry(), SearchHits::new, Version.V_3_0_0); + SearchShardTarget deserializedTarget = results.getAt(0).getShard(); + assertThat(deserializedTarget, equalTo(target)); + assertThat(results.getAt(0).getInnerHits().get("1").getAt(0).getShard(), notNullValue()); + assertThat(results.getAt(0).getInnerHits().get("1").getAt(0).getInnerHits().get("1").getAt(0).getShard(), notNullValue()); + assertThat(results.getAt(0).getInnerHits().get("1").getAt(1).getShard(), notNullValue()); + assertThat(results.getAt(0).getInnerHits().get("2").getAt(0).getShard(), notNullValue()); + String[] expectedMatchedQueries = new String[] { "query1", "query2" }; + String[] actualMatchedQueries = results.getAt(0).getInnerHits().get("1").getAt(0).getMatchedQueries(); + assertArrayEquals(expectedMatchedQueries, actualMatchedQueries); + + Map<String, Float> expectedMatchedQueriesAndScores = new LinkedHashMap<>(); + expectedMatchedQueriesAndScores.put("query1", 1.0f); + expectedMatchedQueriesAndScores.put("query2", 0.5f); + + Map<String, Float> actualMatchedQueriesAndScores = results.getAt(0).getInnerHits().get("1").getAt(0).getMatchedQueriesAndScores(); + assertEquals(expectedMatchedQueriesAndScores, actualMatchedQueriesAndScores); + for (SearchHit hit : results) { + assertEquals(clusterAlias, hit.getClusterAlias()); + if (hit.getInnerHits() != null) { + for (SearchHits innerhits : hit.getInnerHits().values()) { + for (SearchHit innerHit : innerhits) { + assertEquals(clusterAlias, innerHit.getClusterAlias()); + } + } + } + } + assertThat(results.getAt(1).getShard(), equalTo(target)); + } + public void testSerializeShardTarget() throws Exception { String clusterAlias = randomBoolean() ? null : "cluster_alias"; SearchShardTarget target = new SearchShardTarget( diff --git a/server/src/test/java/org/opensearch/search/SearchModuleTests.java b/server/src/test/java/org/opensearch/search/SearchModuleTests.java index 317253be9825f..01b8d6d8cdd72 100644 --- a/server/src/test/java/org/opensearch/search/SearchModuleTests.java +++ b/server/src/test/java/org/opensearch/search/SearchModuleTests.java @@ -113,7 +113,6 @@ import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.hasSize; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; public class SearchModuleTests extends OpenSearchTestCase { @@ -431,9 +430,7 @@ public void testDefaultQueryPhaseSearcher() { } public void testConcurrentQueryPhaseSearcher() { - Settings settings = Settings.builder().put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, true).build(); - FeatureFlags.initializeFeatureFlags(settings); - SearchModule searchModule = new SearchModule(settings, Collections.emptyList()); + SearchModule searchModule = new SearchModule(Settings.EMPTY, Collections.emptyList()); TestSearchContext searchContext = new TestSearchContext(null); searchContext.setConcurrentSegmentSearchEnabled(true); QueryPhase queryPhase = searchModule.getQueryPhase(); @@ -443,8 +440,6 @@ public void testConcurrentQueryPhaseSearcher() { } public void testPluginQueryPhaseSearcher() { - Settings settings = Settings.builder().put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, true).build(); - FeatureFlags.initializeFeatureFlags(settings); QueryPhaseSearcher queryPhaseSearcher = (searchContext, searcher, query, collectors, hasFilterCollector, hasTimeout) -> false; SearchPlugin plugin1 = new SearchPlugin() { @Override @@ -452,7 +447,7 @@ public Optional<QueryPhaseSearcher> getQueryPhaseSearcher() { return Optional.of(queryPhaseSearcher); } }; - SearchModule searchModule = new SearchModule(settings, Collections.singletonList(plugin1)); + SearchModule searchModule = new SearchModule(Settings.EMPTY, Collections.singletonList(plugin1)); QueryPhase queryPhase = searchModule.getQueryPhase(); TestSearchContext searchContext = new TestSearchContext(null); assertEquals(queryPhaseSearcher, queryPhase.getQueryPhaseSearcher()); @@ -480,18 +475,10 @@ public Optional<QueryPhaseSearcher> getQueryPhaseSearcher() { } public void testIndexSearcher() { - SearchModule searchModule = new SearchModule(Settings.EMPTY, Collections.emptyList()); ThreadPool threadPool = mock(ThreadPool.class); - assertNull(searchModule.getIndexSearcherExecutor(threadPool)); - verify(threadPool, times(0)).executor(ThreadPool.Names.INDEX_SEARCHER); - - // enable concurrent segment search feature flag - Settings settings = Settings.builder().put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, true).build(); - FeatureFlags.initializeFeatureFlags(settings); - searchModule = new SearchModule(settings, Collections.emptyList()); + SearchModule searchModule = new SearchModule(Settings.EMPTY, Collections.emptyList()); searchModule.getIndexSearcherExecutor(threadPool); verify(threadPool).executor(ThreadPool.Names.INDEX_SEARCHER); - FeatureFlags.initializeFeatureFlags(Settings.EMPTY); } public void testMultiplePluginRegisterIndexSearcherProvider() { diff --git a/server/src/test/java/org/opensearch/search/SearchServiceTests.java b/server/src/test/java/org/opensearch/search/SearchServiceTests.java index 7c84078af080e..d502bab5918a8 100644 --- a/server/src/test/java/org/opensearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/opensearch/search/SearchServiceTests.java @@ -57,7 +57,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsException; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -225,11 +224,6 @@ public void onQueryPhase(SearchContext context, long tookInNanos) { } } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Settings nodeSettings() { return Settings.builder().put("search.default_search_timeout", "5s").build(); @@ -1189,7 +1183,7 @@ public void testCreateSearchContext() throws IOException { public void testConcurrentSegmentSearchSearchContext() throws IOException { Boolean[][] scenarios = { // cluster setting, index setting, concurrent search enabled? - { null, null, true }, + { null, null, false }, { null, false, false }, { null, true, true }, { true, null, true }, diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/ShardSizeTestCase.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/ShardSizeTestCase.java index 21d05305eed1b..eef7e4c45849d 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/ShardSizeTestCase.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/ShardSizeTestCase.java @@ -37,8 +37,7 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -52,7 +51,7 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.is; -public abstract class ShardSizeTestCase extends ParameterizedOpenSearchIntegTestCase { +public abstract class ShardSizeTestCase extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public ShardSizeTestCase(Settings dynamicSettings) { super(dynamicSettings); @@ -66,11 +65,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected int numberOfShards() { // we need at least 2 diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java index b581e552fec4f..13a3d8145743b 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java @@ -35,10 +35,14 @@ import org.apache.lucene.document.LongPoint; import org.apache.lucene.index.Term; import org.apache.lucene.search.DocValuesFieldExistsQuery; +import org.apache.lucene.search.FieldExistsQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.TermQuery; import org.opensearch.OpenSearchParseException; +import org.opensearch.index.query.MatchAllQueryBuilder; +import org.opensearch.search.aggregations.AggregationBuilders; import org.opensearch.search.aggregations.Aggregator; +import org.opensearch.search.aggregations.bucket.filter.FilterAggregationBuilder; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.opensearch.search.aggregations.bucket.missing.MissingOrder; import org.opensearch.search.aggregations.bucket.terms.StringTerms; @@ -1253,6 +1257,74 @@ public void testMultiValuedWithKeywordLongAndDouble() throws Exception { ); } + public void testDateHistogramSourceWithSize() throws IOException { + final List<Map<String, List<Object>>> dataset = new ArrayList<>( + Arrays.asList( + createDocument("date", asLong("2017-10-20T03:08:45")), + createDocument("date", asLong("2016-09-20T09:00:34")), + createDocument("date", asLong("2016-09-20T11:34:00")), + createDocument("date", asLong("2017-10-20T06:09:24")), + createDocument("date", asLong("2017-10-19T06:09:24")), + createDocument("long", 4L) + ) + ); + testSearchCase( + Arrays.asList( + new MatchAllDocsQuery(), + new FieldExistsQuery("date"), + LongPoint.newRangeQuery("date", asLong("2016-09-20T09:00:34"), asLong("2017-10-20T06:09:24")) + ), + dataset, + () -> { + DateHistogramValuesSourceBuilder histo = new DateHistogramValuesSourceBuilder("date").field("date") + .calendarInterval(DateHistogramInterval.days(1)); + return new CompositeAggregationBuilder("name", Collections.singletonList(histo)).size(1); + }, + (result) -> { + assertEquals(1, result.getBuckets().size()); + assertEquals("{date=1474329600000}", result.afterKey().toString()); // 2017-10-20T00:00:00 + assertEquals("{date=1474329600000}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(0).getDocCount()); + } + ); + } + + public void testDateHistogramSourceWithDocCountField() throws IOException { + final List<Map<String, List<Object>>> dataset = new ArrayList<>( + Arrays.asList( + createDocument("date", asLong("2017-10-20T03:08:45"), "_doc_count", 5), + createDocument("date", asLong("2016-09-20T09:00:34")), + createDocument("date", asLong("2016-09-20T11:34:00"), "_doc_count", 2), + createDocument("date", asLong("2017-10-20T06:09:24")), + createDocument("date", asLong("2017-10-19T06:09:24"), "_doc_count", 3), + createDocument("long", 4L) + ) + ); + testSearchCase( + Arrays.asList( + new MatchAllDocsQuery(), + new FieldExistsQuery("date"), + LongPoint.newRangeQuery("date", asLong("2016-09-20T09:00:34"), asLong("2017-10-20T06:09:24")) + ), + dataset, + () -> { + DateHistogramValuesSourceBuilder histo = new DateHistogramValuesSourceBuilder("date").field("date") + .calendarInterval(DateHistogramInterval.days(1)); + return new CompositeAggregationBuilder("name", Collections.singletonList(histo)); + }, + (result) -> { + assertEquals(3, result.getBuckets().size()); + assertEquals("{date=1508457600000}", result.afterKey().toString()); + assertEquals("{date=1474329600000}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(3L, result.getBuckets().get(0).getDocCount()); + assertEquals("{date=1508371200000}", result.getBuckets().get(1).getKeyAsString()); + assertEquals(3L, result.getBuckets().get(1).getDocCount()); + assertEquals("{date=1508457600000}", result.getBuckets().get(2).getKeyAsString()); + assertEquals(6L, result.getBuckets().get(2).getDocCount()); + } + ); + } + public void testWithDateHistogram() throws IOException { final List<Map<String, List<Object>>> dataset = new ArrayList<>(); dataset.addAll( @@ -2460,4 +2532,41 @@ public void testIndexSortWithDuplicate() throws Exception { ); } } + + public void testUnderFilterAggregator() throws IOException { + executeTestCase(false, false, new MatchAllDocsQuery(), Collections.emptyList(), () -> { + FilterAggregationBuilder filterAggregatorBuilder = new FilterAggregationBuilder( + "filter_mcmilterface", + new MatchAllQueryBuilder() + ); + filterAggregatorBuilder.subAggregation( + new CompositeAggregationBuilder( + "compo", + Collections.singletonList(new TermsValuesSourceBuilder("keyword").field("keyword")) + ) + ); + return filterAggregatorBuilder; + }, (ic) -> {}); + } + + public void testUnderBucketAggregator() throws IOException { + try { + executeTestCase(false, false, new MatchAllDocsQuery(), Collections.emptyList(), () -> { + TermsAggregationBuilder termsAggregationBuilder = AggregationBuilders.terms("terms").field("keyword"); + termsAggregationBuilder.subAggregation( + new CompositeAggregationBuilder( + "compo", + Collections.singletonList(new TermsValuesSourceBuilder("keyword").field("keyword")) + ) + ); + return termsAggregationBuilder; + }, (ic) -> {}); + fail("Should have thrown an IllegalArgumentException"); + } catch (IllegalArgumentException iae) { + assertTrue( + iae.getMessage() + .contains("[composite] aggregation cannot be used with a parent aggregation of type: [TermsAggregatorFactory]") + ); + } + } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java index bca6623e66104..2a4fbca7a8541 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java @@ -34,6 +34,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.LongPoint; +import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; @@ -45,6 +46,7 @@ import org.apache.lucene.tests.index.RandomIndexWriter; import org.opensearch.common.time.DateFormatters; import org.opensearch.index.mapper.DateFieldMapper; +import org.opensearch.index.mapper.DocCountFieldMapper; import org.opensearch.search.aggregations.AggregationBuilder; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.terms.StringTerms; @@ -1178,6 +1180,181 @@ public void testOverlappingBounds() { ); } + public void testHardBoundsNotOverlapping() throws IOException { + testSearchCase( + new MatchAllDocsQuery(), + Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"), + aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY) + .hardBounds(new LongBounds("2018-01-01", "2020-01-01")) + .field(AGGREGABLE_DATE), + histogram -> { + List<? extends Histogram.Bucket> buckets = histogram.getBuckets(); + assertEquals(0, buckets.size()); + }, + false + ); + + testSearchCase( + new MatchAllDocsQuery(), + Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"), + aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY) + .hardBounds(new LongBounds("2016-01-01", "2017-01-01")) + .field(AGGREGABLE_DATE), + histogram -> { + List<? extends Histogram.Bucket> buckets = histogram.getBuckets(); + assertEquals(0, buckets.size()); + }, + false + ); + + testSearchCase( + new MatchAllDocsQuery(), + Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"), + aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY) + .hardBounds(new LongBounds("2016-01-01", "2017-02-03")) + .field(AGGREGABLE_DATE), + histogram -> { + List<? extends Histogram.Bucket> buckets = histogram.getBuckets(); + assertEquals(2, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-02T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + }, + false + ); + + testSearchCase( + new MatchAllDocsQuery(), + Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"), + aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY) + .hardBounds(new LongBounds("2017-02-03", "2020-01-01")) + .field(AGGREGABLE_DATE), + histogram -> { + List<? extends Histogram.Bucket> buckets = histogram.getBuckets(); + assertEquals(3, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-03T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(3, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-04T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(0, bucket.getDocCount()); + + bucket = buckets.get(2); + assertEquals("2017-02-05T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + }, + false + ); + } + + public void testFilterRewriteOptimizationWithRangeQuery() throws IOException { + testSearchCase( + LongPoint.newRangeQuery(AGGREGABLE_DATE, asLong("2018-01-01"), asLong("2020-01-01")), + Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"), + aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY).field(AGGREGABLE_DATE), + histogram -> { + List<? extends Histogram.Bucket> buckets = histogram.getBuckets(); + assertEquals(0, buckets.size()); + }, + 10000, + false, + false, + true // force AGGREGABLE_DATE field to be searchable to test the filter rewrite optimization path + ); + + testSearchCase( + LongPoint.newRangeQuery(AGGREGABLE_DATE, asLong("2016-01-01"), asLong("2017-01-01")), + Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"), + aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY).field(AGGREGABLE_DATE), + histogram -> { + List<? extends Histogram.Bucket> buckets = histogram.getBuckets(); + assertEquals(0, buckets.size()); + }, + 10000, + false, + false, + true + ); + + testSearchCase( + LongPoint.newRangeQuery(AGGREGABLE_DATE, asLong("2016-01-01"), asLong("2017-02-02")), + Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"), + aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY).field(AGGREGABLE_DATE), + histogram -> { + List<? extends Histogram.Bucket> buckets = histogram.getBuckets(); + assertEquals(2, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-02T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + }, + 10000, + false, + false, + true + ); + + testSearchCase( + LongPoint.newRangeQuery(AGGREGABLE_DATE, asLong("2017-02-03"), asLong("2020-01-01")), + Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"), + aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY).field(AGGREGABLE_DATE), + histogram -> { + List<? extends Histogram.Bucket> buckets = histogram.getBuckets(); + assertEquals(3, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-03T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(3, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-04T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(0, bucket.getDocCount()); + + bucket = buckets.get(2); + assertEquals("2017-02-05T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + }, + 10000, + false, + false, + true + ); + } + + public void testDocCountField() throws IOException { + testSearchCase( + new MatchAllDocsQuery(), + Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02"), + aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY).field(AGGREGABLE_DATE), + histogram -> { + List<? extends Histogram.Bucket> buckets = histogram.getBuckets(); + assertEquals(2, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(5, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-02T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + }, + 10000, + false, + true + ); + } + public void testIllegalInterval() throws IOException { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, @@ -1211,13 +1388,42 @@ private void testSearchCase( int maxBucket, boolean useNanosecondResolution ) throws IOException { - boolean aggregableDateIsSearchable = randomBoolean(); + testSearchCase(query, dataset, configure, verify, maxBucket, useNanosecondResolution, false); + } + + private void testSearchCase( + Query query, + List<String> dataset, + Consumer<DateHistogramAggregationBuilder> configure, + Consumer<InternalDateHistogram> verify, + int maxBucket, + boolean useNanosecondResolution, + boolean useDocCountField + ) throws IOException { + testSearchCase(query, dataset, configure, verify, maxBucket, useNanosecondResolution, useDocCountField, randomBoolean()); + } + + private void testSearchCase( + Query query, + List<String> dataset, + Consumer<DateHistogramAggregationBuilder> configure, + Consumer<InternalDateHistogram> verify, + int maxBucket, + boolean useNanosecondResolution, + boolean useDocCountField, + boolean aggregableDateIsSearchable + ) throws IOException { + logger.debug("Aggregable date is searchable {}", aggregableDateIsSearchable); DateFieldMapper.DateFieldType fieldType = aggregableDateFieldType(useNanosecondResolution, aggregableDateIsSearchable); try (Directory directory = newDirectory()) { try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { Document document = new Document(); + if (useDocCountField) { + // add the doc count field to the first document + document.add(new NumericDocValuesField(DocCountFieldMapper.NAME, 5)); + } for (String date : dataset) { long instant = asLong(date, fieldType); document.add(new SortedNumericDocValuesField(AGGREGABLE_DATE, instant)); diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/KeywordTermsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/KeywordTermsAggregatorTests.java index 4229361aa7f46..753644dce81d5 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/KeywordTermsAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/KeywordTermsAggregatorTests.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.bucket.terms; import org.apache.lucene.document.Document; -import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.IndexSearcher; @@ -41,7 +40,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; -import org.apache.lucene.util.BytesRef; +import org.opensearch.common.TriConsumer; import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.search.aggregations.AggregatorTestCase; @@ -57,6 +56,8 @@ public class KeywordTermsAggregatorTests extends AggregatorTestCase { private static final String KEYWORD_FIELD = "keyword"; + private static final Consumer<TermsAggregationBuilder> CONFIGURE_KEYWORD_FIELD = agg -> agg.field(KEYWORD_FIELD); + private static final List<String> dataset; static { List<String> d = new ArrayList<>(45); @@ -68,51 +69,63 @@ public class KeywordTermsAggregatorTests extends AggregatorTestCase { dataset = d; } + private static final Consumer<InternalMappedTerms> VERIFY_MATCH_ALL_DOCS = agg -> { + assertEquals(9, agg.getBuckets().size()); + for (int i = 0; i < 9; i++) { + StringTerms.Bucket bucket = (StringTerms.Bucket) agg.getBuckets().get(i); + assertThat(bucket.getKey(), equalTo(String.valueOf(9L - i))); + assertThat(bucket.getDocCount(), equalTo(9L - i)); + } + }; + + private static final Consumer<InternalMappedTerms> VERIFY_MATCH_NO_DOCS = agg -> { assertEquals(0, agg.getBuckets().size()); }; + + private static final Query MATCH_ALL_DOCS_QUERY = new MatchAllDocsQuery(); + + private static final Query MATCH_NO_DOCS_QUERY = new MatchNoDocsQuery(); + public void testMatchNoDocs() throws IOException { testSearchCase( - new MatchNoDocsQuery(), + ADD_SORTED_SET_FIELD_NOT_INDEXED, + MATCH_NO_DOCS_QUERY, dataset, - aggregation -> aggregation.field(KEYWORD_FIELD), - agg -> assertEquals(0, agg.getBuckets().size()), - null // without type hint + CONFIGURE_KEYWORD_FIELD, + VERIFY_MATCH_NO_DOCS, + null // without type hint ); testSearchCase( - new MatchNoDocsQuery(), + ADD_SORTED_SET_FIELD_NOT_INDEXED, + MATCH_NO_DOCS_QUERY, dataset, - aggregation -> aggregation.field(KEYWORD_FIELD), - agg -> assertEquals(0, agg.getBuckets().size()), - ValueType.STRING // with type hint + CONFIGURE_KEYWORD_FIELD, + VERIFY_MATCH_NO_DOCS, + ValueType.STRING // with type hint ); } public void testMatchAllDocs() throws IOException { - Query query = new MatchAllDocsQuery(); - - testSearchCase(query, dataset, aggregation -> aggregation.field(KEYWORD_FIELD), agg -> { - assertEquals(9, agg.getBuckets().size()); - for (int i = 0; i < 9; i++) { - StringTerms.Bucket bucket = (StringTerms.Bucket) agg.getBuckets().get(i); - assertThat(bucket.getKey(), equalTo(String.valueOf(9L - i))); - assertThat(bucket.getDocCount(), equalTo(9L - i)); - } - }, - null // without type hint + testSearchCase( + ADD_SORTED_SET_FIELD_NOT_INDEXED, + MATCH_ALL_DOCS_QUERY, + dataset, + CONFIGURE_KEYWORD_FIELD, + VERIFY_MATCH_ALL_DOCS, + null // without type hint ); - testSearchCase(query, dataset, aggregation -> aggregation.field(KEYWORD_FIELD), agg -> { - assertEquals(9, agg.getBuckets().size()); - for (int i = 0; i < 9; i++) { - StringTerms.Bucket bucket = (StringTerms.Bucket) agg.getBuckets().get(i); - assertThat(bucket.getKey(), equalTo(String.valueOf(9L - i))); - assertThat(bucket.getDocCount(), equalTo(9L - i)); - } - }, - ValueType.STRING // with type hint + testSearchCase( + ADD_SORTED_SET_FIELD_NOT_INDEXED, + MATCH_ALL_DOCS_QUERY, + dataset, + CONFIGURE_KEYWORD_FIELD, + VERIFY_MATCH_ALL_DOCS, + ValueType.STRING // with type hint ); } private void testSearchCase( + TriConsumer<Document, String, String> addField, Query query, List<String> dataset, Consumer<TermsAggregationBuilder> configure, @@ -123,7 +136,7 @@ private void testSearchCase( try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { Document document = new Document(); for (String value : dataset) { - document.add(new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef(value))); + addField.apply(document, KEYWORD_FIELD, value); indexWriter.addDocument(document); document.clear(); } @@ -147,5 +160,4 @@ private void testSearchCase( } } } - } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java index 80744ecde4d69..6d105c27a692f 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java @@ -44,6 +44,8 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.index.Term; import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; @@ -52,6 +54,7 @@ import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; +import org.opensearch.common.TriConsumer; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.network.InetAddresses; import org.opensearch.common.settings.Settings; @@ -120,6 +123,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiFunction; import java.util.function.Consumer; import java.util.function.Function; @@ -136,9 +140,6 @@ import static org.mockito.Mockito.when; public class TermsAggregatorTests extends AggregatorTestCase { - - private boolean randomizeAggregatorImpl = true; - // Constants for a script that returns a string private static final String STRING_SCRIPT_NAME = "string_script"; private static final String STRING_SCRIPT_OUTPUT = "Orange"; @@ -171,9 +172,22 @@ protected ScriptService getMockScriptService() { return new ScriptService(Settings.EMPTY, engines, ScriptModule.CORE_CONTEXTS); } + protected CountingAggregator createCountingAggregator( + AggregationBuilder aggregationBuilder, + IndexSearcher indexSearcher, + boolean randomizeAggregatorImpl, + MappedFieldType... fieldTypes + ) throws IOException { + return new CountingAggregator( + new AtomicInteger(), + createAggregator(aggregationBuilder, indexSearcher, randomizeAggregatorImpl, fieldTypes) + ); + } + protected <A extends Aggregator> A createAggregator( AggregationBuilder aggregationBuilder, IndexSearcher indexSearcher, + boolean randomizeAggregatorImpl, MappedFieldType... fieldTypes ) throws IOException { try { @@ -188,6 +202,14 @@ protected <A extends Aggregator> A createAggregator( } } + protected <A extends Aggregator> A createAggregator( + AggregationBuilder aggregationBuilder, + IndexSearcher indexSearcher, + MappedFieldType... fieldTypes + ) throws IOException { + return createAggregator(aggregationBuilder, indexSearcher, true, fieldTypes); + } + @Override protected AggregationBuilder createAggBuilderForTypeTest(MappedFieldType fieldType, String fieldName) { return new TermsAggregationBuilder("foo").field(fieldName); @@ -207,8 +229,7 @@ protected List<ValuesSourceType> getSupportedValuesSourceTypes() { } public void testUsesGlobalOrdinalsByDefault() throws Exception { - randomizeAggregatorImpl = false; - + boolean randomizeAggregatorImpl = false; Directory directory = newDirectory(); RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); indexWriter.close(); @@ -220,35 +241,35 @@ public void testUsesGlobalOrdinalsByDefault() throws Exception { .field("string"); MappedFieldType fieldType = new KeywordFieldMapper.KeywordFieldType("string"); - TermsAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); + TermsAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, randomizeAggregatorImpl, fieldType); assertThat(aggregator, instanceOf(GlobalOrdinalsStringTermsAggregator.class)); GlobalOrdinalsStringTermsAggregator globalAgg = (GlobalOrdinalsStringTermsAggregator) aggregator; assertThat(globalAgg.descriptCollectionStrategy(), equalTo("dense")); // Infers depth_first because the maxOrd is 0 which is less than the size aggregationBuilder.subAggregation(AggregationBuilders.cardinality("card").field("string")); - aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); + aggregator = createAggregator(aggregationBuilder, indexSearcher, randomizeAggregatorImpl, fieldType); assertThat(aggregator, instanceOf(GlobalOrdinalsStringTermsAggregator.class)); globalAgg = (GlobalOrdinalsStringTermsAggregator) aggregator; assertThat(globalAgg.collectMode, equalTo(Aggregator.SubAggCollectionMode.DEPTH_FIRST)); assertThat(globalAgg.descriptCollectionStrategy(), equalTo("remap")); aggregationBuilder.collectMode(Aggregator.SubAggCollectionMode.DEPTH_FIRST); - aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); + aggregator = createAggregator(aggregationBuilder, indexSearcher, randomizeAggregatorImpl, fieldType); assertThat(aggregator, instanceOf(GlobalOrdinalsStringTermsAggregator.class)); globalAgg = (GlobalOrdinalsStringTermsAggregator) aggregator; assertThat(globalAgg.collectMode, equalTo(Aggregator.SubAggCollectionMode.DEPTH_FIRST)); assertThat(globalAgg.descriptCollectionStrategy(), equalTo("remap")); aggregationBuilder.collectMode(Aggregator.SubAggCollectionMode.BREADTH_FIRST); - aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); + aggregator = createAggregator(aggregationBuilder, indexSearcher, randomizeAggregatorImpl, fieldType); assertThat(aggregator, instanceOf(GlobalOrdinalsStringTermsAggregator.class)); globalAgg = (GlobalOrdinalsStringTermsAggregator) aggregator; assertThat(globalAgg.collectMode, equalTo(Aggregator.SubAggCollectionMode.BREADTH_FIRST)); assertThat(globalAgg.descriptCollectionStrategy(), equalTo("dense")); aggregationBuilder.order(BucketOrder.aggregation("card", true)); - aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); + aggregator = createAggregator(aggregationBuilder, indexSearcher, randomizeAggregatorImpl, fieldType); assertThat(aggregator, instanceOf(GlobalOrdinalsStringTermsAggregator.class)); globalAgg = (GlobalOrdinalsStringTermsAggregator) aggregator; assertThat(globalAgg.descriptCollectionStrategy(), equalTo("remap")); @@ -257,51 +278,145 @@ public void testUsesGlobalOrdinalsByDefault() throws Exception { directory.close(); } - public void testSimple() throws Exception { + /** + * This test case utilizes the default implementation of GlobalOrdinalsStringTermsAggregator since collectSegmentOrds is false + */ + public void testSimpleAggregation() throws Exception { + // Fields not indexed: cannot use LeafBucketCollector#termDocFreqCollector - all documents are visited + testSimple(ADD_SORTED_SET_FIELD_NOT_INDEXED, false, false, false, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 4); + + // Fields indexed, deleted documents in segment: cannot use LeafBucketCollector#termDocFreqCollector - all documents are visited + testSimple(ADD_SORTED_SET_FIELD_INDEXED, true, false, false, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 4); + + // Fields indexed, no deleted documents in segment: will use LeafBucketCollector#termDocFreqCollector - no documents are visited + testSimple(ADD_SORTED_SET_FIELD_INDEXED, false, false, false, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 0); + + // Fields indexed, no deleted documents, but _doc_field value present in document: + // cannot use LeafBucketCollector#termDocFreqCollector - all documents are visited + testSimple(ADD_SORTED_SET_FIELD_INDEXED, false, true, false, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 4); + + } + + /** + * This test case utilizes the LowCardinality implementation of GlobalOrdinalsStringTermsAggregator since collectSegmentOrds is true + */ + public void testSimpleAggregationLowCardinality() throws Exception { + // Fields not indexed: cannot use LeafBucketCollector#termDocFreqCollector - all documents are visited + testSimple(ADD_SORTED_SET_FIELD_NOT_INDEXED, false, false, true, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 4); + + // Fields indexed, deleted documents in segment: cannot use LeafBucketCollector#termDocFreqCollector - all documents are visited + testSimple(ADD_SORTED_SET_FIELD_INDEXED, true, false, true, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 4); + + // Fields indexed, no deleted documents in segment: will use LeafBucketCollector#termDocFreqCollector - no documents are visited + testSimple(ADD_SORTED_SET_FIELD_INDEXED, false, false, true, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 0); + + // Fields indexed, no deleted documents, but _doc_field value present in document: + // cannot use LeafBucketCollector#termDocFreqCollector - all documents are visited + testSimple(ADD_SORTED_SET_FIELD_INDEXED, false, true, true, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 4); + } + + /** + * This test case utilizes the MapStringTermsAggregator. + */ + public void testSimpleMapStringAggregation() throws Exception { + testSimple( + ADD_SORTED_SET_FIELD_INDEXED, + randomBoolean(), + randomBoolean(), + randomBoolean(), + TermsAggregatorFactory.ExecutionMode.MAP, + 4 + ); + } + + /** + * This is a utility method to test out string terms aggregation + * @param addFieldConsumer a function that determines how a field is added to the document + * @param includeDeletedDocumentsInSegment to include deleted documents in the segment or not + * @param collectSegmentOrds collect segment ords or not - set true to utilize LowCardinality implementation for GlobalOrdinalsStringTermsAggregator + * @param executionMode execution mode MAP or GLOBAL_ORDINALS + * @param expectedCollectCount expected number of documents visited as part of collect() invocation + */ + private void testSimple( + TriConsumer<Document, String, String> addFieldConsumer, + final boolean includeDeletedDocumentsInSegment, + final boolean includeDocCountField, + boolean collectSegmentOrds, + TermsAggregatorFactory.ExecutionMode executionMode, + final int expectedCollectCount + ) throws Exception { try (Directory directory = newDirectory()) { - try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { + try ( + RandomIndexWriter indexWriter = new RandomIndexWriter( + random(), + directory, + newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE) + ) + ) { Document document = new Document(); - document.add(new SortedSetDocValuesField("string", new BytesRef("a"))); - document.add(new SortedSetDocValuesField("string", new BytesRef("b"))); + addFieldConsumer.apply(document, "string", "a"); + addFieldConsumer.apply(document, "string", "b"); indexWriter.addDocument(document); document = new Document(); - document.add(new SortedSetDocValuesField("string", new BytesRef(""))); - document.add(new SortedSetDocValuesField("string", new BytesRef("c"))); - document.add(new SortedSetDocValuesField("string", new BytesRef("a"))); + addFieldConsumer.apply(document, "string", ""); + addFieldConsumer.apply(document, "string", "c"); + addFieldConsumer.apply(document, "string", "a"); indexWriter.addDocument(document); document = new Document(); - document.add(new SortedSetDocValuesField("string", new BytesRef("b"))); - document.add(new SortedSetDocValuesField("string", new BytesRef("d"))); + addFieldConsumer.apply(document, "string", "b"); + addFieldConsumer.apply(document, "string", "d"); indexWriter.addDocument(document); document = new Document(); - document.add(new SortedSetDocValuesField("string", new BytesRef(""))); + addFieldConsumer.apply(document, "string", ""); + if (includeDocCountField) { + // Adding _doc_count to one document + document.add(new NumericDocValuesField("_doc_count", 10)); + } indexWriter.addDocument(document); + + if (includeDeletedDocumentsInSegment) { + document = new Document(); + ADD_SORTED_SET_FIELD_INDEXED.apply(document, "string", "e"); + indexWriter.addDocument(document); + indexWriter.deleteDocuments(new Term("string", "e")); + assertEquals(5, indexWriter.getDocStats().maxDoc); // deleted document still in segment + } + try (IndexReader indexReader = maybeWrapReaderEs(indexWriter.getReader())) { IndexSearcher indexSearcher = newIndexSearcher(indexReader); - for (TermsAggregatorFactory.ExecutionMode executionMode : TermsAggregatorFactory.ExecutionMode.values()) { - TermsAggregationBuilder aggregationBuilder = new TermsAggregationBuilder("_name").userValueTypeHint( - ValueType.STRING - ).executionHint(executionMode.toString()).field("string").order(BucketOrder.key(true)); - MappedFieldType fieldType = new KeywordFieldMapper.KeywordFieldType("string"); - TermsAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); - aggregator.preCollection(); - indexSearcher.search(new MatchAllDocsQuery(), aggregator); - aggregator.postCollection(); - Terms result = reduce(aggregator); - assertEquals(5, result.getBuckets().size()); - assertEquals("", result.getBuckets().get(0).getKeyAsString()); + TermsAggregationBuilder aggregationBuilder = new TermsAggregationBuilder("_name").userValueTypeHint(ValueType.STRING) + .executionHint(executionMode.toString()) + .field("string") + .order(BucketOrder.key(true)); + MappedFieldType fieldType = new KeywordFieldMapper.KeywordFieldType("string"); + + TermsAggregatorFactory.COLLECT_SEGMENT_ORDS = collectSegmentOrds; + TermsAggregatorFactory.REMAP_GLOBAL_ORDS = false; + CountingAggregator aggregator = createCountingAggregator(aggregationBuilder, indexSearcher, false, fieldType); + + aggregator.preCollection(); + indexSearcher.search(new MatchAllDocsQuery(), aggregator); + aggregator.postCollection(); + Terms result = reduce(aggregator); + assertEquals(5, result.getBuckets().size()); + assertEquals("", result.getBuckets().get(0).getKeyAsString()); + if (includeDocCountField) { + assertEquals(11L, result.getBuckets().get(0).getDocCount()); + } else { assertEquals(2L, result.getBuckets().get(0).getDocCount()); - assertEquals("a", result.getBuckets().get(1).getKeyAsString()); - assertEquals(2L, result.getBuckets().get(1).getDocCount()); - assertEquals("b", result.getBuckets().get(2).getKeyAsString()); - assertEquals(2L, result.getBuckets().get(2).getDocCount()); - assertEquals("c", result.getBuckets().get(3).getKeyAsString()); - assertEquals(1L, result.getBuckets().get(3).getDocCount()); - assertEquals("d", result.getBuckets().get(4).getKeyAsString()); - assertEquals(1L, result.getBuckets().get(4).getDocCount()); - assertTrue(AggregationInspectionHelper.hasValue((InternalTerms) result)); } + assertEquals("a", result.getBuckets().get(1).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(1).getDocCount()); + assertEquals("b", result.getBuckets().get(2).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(2).getDocCount()); + assertEquals("c", result.getBuckets().get(3).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(3).getDocCount()); + assertEquals("d", result.getBuckets().get(4).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(4).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue((InternalTerms) result)); + + assertEquals(expectedCollectCount, aggregator.getCollectCount().get()); } } } @@ -1543,5 +1658,4 @@ private <T extends InternalAggregation> T reduce(Aggregator agg) throws IOExcept doAssertReducedMultiBucketConsumer(result, reduceBucketConsumer); return result; } - } diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/AbstractGeoTestCase.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/AbstractGeoTestCase.java index e02c00005df9b..94cb4c7955a21 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/AbstractGeoTestCase.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/AbstractGeoTestCase.java @@ -39,7 +39,6 @@ import org.opensearch.common.document.DocumentField; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; @@ -48,7 +47,7 @@ import org.opensearch.search.sort.SortBuilders; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.geo.RandomGeoGenerator; import java.util.ArrayList; @@ -65,7 +64,7 @@ import static org.hamcrest.Matchers.equalTo; @OpenSearchIntegTestCase.SuiteScopeTestCase -public abstract class AbstractGeoTestCase extends ParameterizedOpenSearchIntegTestCase { +public abstract class AbstractGeoTestCase extends ParameterizedStaticSettingsOpenSearchIntegTestCase { protected static final String SINGLE_VALUED_FIELD_NAME = "geo_value"; protected static final String MULTI_VALUED_FIELD_NAME = "geo_values"; @@ -97,11 +96,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { createIndex(UNMAPPED_IDX_NAME); diff --git a/server/src/test/java/org/opensearch/search/aggregations/support/ScriptValuesTests.java b/server/src/test/java/org/opensearch/search/aggregations/support/ScriptValuesTests.java index 9eb90f2358f98..98dde2c7a31b3 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/support/ScriptValuesTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/support/ScriptValuesTests.java @@ -60,7 +60,7 @@ private static class FakeAggregationScript extends AggregationScript { int index; FakeAggregationScript(Object[][] values) { - super(Collections.emptyMap(), new SearchLookup(null, null) { + super(Collections.emptyMap(), new SearchLookup(null, null, SearchLookup.UNKNOWN_SHARD_ID) { @Override public LeafSearchLookup getLeafSearchLookup(LeafReaderContext context) { diff --git a/server/src/test/java/org/opensearch/search/fetch/subphase/InnerHitsPhaseTests.java b/server/src/test/java/org/opensearch/search/fetch/subphase/InnerHitsPhaseTests.java new file mode 100644 index 0000000000000..7ca5977a1c276 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/fetch/subphase/InnerHitsPhaseTests.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.fetch.subphase; + +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.search.fetch.FetchContext; +import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.lookup.SearchLookup; +import org.opensearch.test.OpenSearchTestCase; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class InnerHitsPhaseTests extends OpenSearchTestCase { + + /* + Returns mock search context reused across test methods + */ + private SearchContext getMockSearchContext(final boolean hasInnerHits) { + final QueryShardContext queryShardContext = mock(QueryShardContext.class); + when(queryShardContext.newFetchLookup()).thenReturn(mock(SearchLookup.class)); + + final SearchContext searchContext = mock(SearchContext.class); + when(searchContext.hasInnerHits()).thenReturn(hasInnerHits); + when(searchContext.getQueryShardContext()).thenReturn(queryShardContext); + + return searchContext; + } + + /* + Validates that InnerHitsPhase processor is not initialized when no inner hits + */ + public void testInnerHitsNull() { + assertNull(new InnerHitsPhase(null).getProcessor(new FetchContext(getMockSearchContext(false)))); + } + + /* + Validates that InnerHitsPhase processor is initialized when inner hits are present + */ + public void testInnerHitsNonNull() { + final SearchContext searchContext = getMockSearchContext(true); + when(searchContext.innerHits()).thenReturn(new InnerHitsContext()); + + assertNotNull(new InnerHitsPhase(null).getProcessor(new FetchContext(searchContext))); + } + +} diff --git a/server/src/test/java/org/opensearch/search/fetch/subphase/ScriptFieldsPhaseTests.java b/server/src/test/java/org/opensearch/search/fetch/subphase/ScriptFieldsPhaseTests.java new file mode 100644 index 0000000000000..eb6338997ab9f --- /dev/null +++ b/server/src/test/java/org/opensearch/search/fetch/subphase/ScriptFieldsPhaseTests.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.fetch.subphase; + +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.search.fetch.FetchContext; +import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.lookup.SearchLookup; +import org.opensearch.test.OpenSearchTestCase; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class ScriptFieldsPhaseTests extends OpenSearchTestCase { + + /* + Returns mock search context reused across test methods + */ + private SearchContext getMockSearchContext(final boolean hasScriptFields) { + final QueryShardContext queryShardContext = mock(QueryShardContext.class); + when(queryShardContext.newFetchLookup()).thenReturn(mock(SearchLookup.class)); + + final SearchContext searchContext = mock(SearchContext.class); + when(searchContext.hasScriptFields()).thenReturn(hasScriptFields); + when(searchContext.getQueryShardContext()).thenReturn(queryShardContext); + + return searchContext; + } + + /* + Validates that ScriptFieldsPhase processor is not initialized when no script fields + */ + public void testScriptFieldsNull() { + assertNull(new ScriptFieldsPhase().getProcessor(new FetchContext(getMockSearchContext(false)))); + } + + /* + Validates that ScriptFieldsPhase processor is initialized when script fields are present + */ + public void testScriptFieldsNonNull() { + final SearchContext searchContext = getMockSearchContext(true); + when(searchContext.scriptFields()).thenReturn(new ScriptFieldsContext()); + + assertNotNull(new ScriptFieldsPhase().getProcessor(new FetchContext(searchContext))); + } + +} diff --git a/server/src/test/java/org/opensearch/search/lookup/LeafFieldsLookupTests.java b/server/src/test/java/org/opensearch/search/lookup/LeafFieldsLookupTests.java index 85aacfbd63ee2..8c4b8ad6d1776 100644 --- a/server/src/test/java/org/opensearch/search/lookup/LeafFieldsLookupTests.java +++ b/server/src/test/java/org/opensearch/search/lookup/LeafFieldsLookupTests.java @@ -85,6 +85,7 @@ public void setUp() throws Exception { 0, VectorEncoding.FLOAT32, VectorSimilarityFunction.EUCLIDEAN, + false, false ); diff --git a/server/src/test/java/org/opensearch/search/lookup/SearchLookupTests.java b/server/src/test/java/org/opensearch/search/lookup/SearchLookupTests.java new file mode 100644 index 0000000000000..e942c3ab17420 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/lookup/SearchLookupTests.java @@ -0,0 +1,21 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.lookup; + +import org.opensearch.index.mapper.MapperService; +import org.opensearch.test.OpenSearchTestCase; + +import static org.mockito.Mockito.mock; + +public class SearchLookupTests extends OpenSearchTestCase { + public void testDeprecatedConstructorShardId() { + final SearchLookup searchLookup = new SearchLookup(mock(MapperService.class), (a, b) -> null); + assertThrows(IllegalStateException.class, searchLookup::shardId); + } +} diff --git a/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java b/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java index d0e01c5461c79..4bd4d406e4391 100644 --- a/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java +++ b/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java @@ -122,6 +122,7 @@ import java.util.concurrent.TimeUnit; import static org.opensearch.search.query.TopDocsCollectorContext.hasInfMaxScore; +import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -437,10 +438,16 @@ public void testTerminateAfterEarlyTermination() throws Exception { assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + // Do not expect an exact match when terminate_after is used in conjunction to size = 0 as an optimization introduced by + // https://issues.apache.org/jira/browse/LUCENE-10620 can produce a total hit count >= terminated_after, because + // TotalHitCountCollector is used in this case as part of Weight#count() optimization context.setSize(0); QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat( + context.queryResult().topDocs().topDocs.totalHits.value, + allOf(greaterThanOrEqualTo(1L), lessThanOrEqualTo((long) numDocs)) + ); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); } @@ -466,7 +473,10 @@ public void testTerminateAfterEarlyTermination() throws Exception { context.parsedQuery(new ParsedQuery(bq)); QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat( + context.queryResult().topDocs().topDocs.totalHits.value, + allOf(greaterThanOrEqualTo(1L), lessThanOrEqualTo((long) numDocs)) + ); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); } { @@ -486,9 +496,12 @@ public void testTerminateAfterEarlyTermination() throws Exception { context.queryCollectorManagers().put(TotalHitCountCollector.class, manager); QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat( + context.queryResult().topDocs().topDocs.totalHits.value, + allOf(greaterThanOrEqualTo(1L), lessThanOrEqualTo((long) numDocs)) + ); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); - assertThat(manager.getTotalHits(), equalTo(1)); + assertThat(manager.getTotalHits(), allOf(greaterThanOrEqualTo(1), lessThanOrEqualTo(numDocs))); } // tests with trackTotalHits and terminateAfter @@ -503,7 +516,10 @@ public void testTerminateAfterEarlyTermination() throws Exception { if (trackTotalHits == -1) { assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(0L)); } else { - assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) Math.min(trackTotalHits, 10))); + assertThat( + context.queryResult().topDocs().topDocs.totalHits.value, + allOf(greaterThanOrEqualTo(Math.min(trackTotalHits, 10L)), lessThanOrEqualTo((long) numDocs)) + ); } assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); // The concurrent search terminates the collection when the number of hits is reached by each @@ -511,7 +527,7 @@ public void testTerminateAfterEarlyTermination() throws Exception { // slices (as the unit of concurrency). To address that, we have to use the shared global state, // much as HitsThresholdChecker does. if (executor == null) { - assertThat(manager.getTotalHits(), equalTo(10)); + assertThat(manager.getTotalHits(), allOf(greaterThanOrEqualTo(Math.min(trackTotalHits, 10)), lessThanOrEqualTo(numDocs))); } } diff --git a/server/src/test/java/org/opensearch/search/query/ScriptScoreQueryTests.java b/server/src/test/java/org/opensearch/search/query/ScriptScoreQueryTests.java index ca4b7dc49f6f0..55c50b8cf854d 100644 --- a/server/src/test/java/org/opensearch/search/query/ScriptScoreQueryTests.java +++ b/server/src/test/java/org/opensearch/search/query/ScriptScoreQueryTests.java @@ -39,9 +39,14 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.TwoPhaseIterator; import org.apache.lucene.search.Weight; import org.apache.lucene.store.Directory; import org.opensearch.Version; @@ -49,6 +54,7 @@ import org.opensearch.common.lucene.search.function.ScriptScoreQuery; import org.opensearch.script.ScoreScript; import org.opensearch.script.Script; +import org.opensearch.script.ScriptType; import org.opensearch.search.lookup.LeafSearchLookup; import org.opensearch.search.lookup.SearchLookup; import org.opensearch.test.OpenSearchTestCase; @@ -56,6 +62,8 @@ import org.junit.Before; import java.io.IOException; +import java.util.HashMap; +import java.util.Map; import java.util.function.Function; import static org.hamcrest.CoreMatchers.containsString; @@ -177,6 +185,37 @@ public void testScriptScoreErrorOnNegativeScore() { assertTrue(e.getMessage().contains("Must be a non-negative score!")); } + public void testTwoPhaseIteratorDelegation() throws IOException { + Map<String, Object> params = new HashMap<>(); + String scriptSource = "doc['field'].value != null ? 2.0 : 0.0"; // Adjust based on actual field and logic + Script script = new Script(ScriptType.INLINE, "painless", scriptSource, params); + float minScore = 1.0f; // This should be below the score produced by the script for all docs + ScoreScript.LeafFactory factory = newFactory(script, false, explanation -> 2.0); + + Query subQuery = new MatchAllDocsQuery(); + ScriptScoreQuery scriptScoreQuery = new ScriptScoreQuery(subQuery, script, factory, minScore, "index", 0, Version.CURRENT); + + Weight weight = searcher.createWeight(searcher.rewrite(scriptScoreQuery), ScoreMode.COMPLETE, 1f); + + boolean foundMatchingDoc = false; + for (LeafReaderContext leafContext : searcher.getIndexReader().leaves()) { + Scorer scorer = weight.scorer(leafContext); + if (scorer != null) { + TwoPhaseIterator twoPhaseIterator = scorer.twoPhaseIterator(); + assertNotNull("TwoPhaseIterator should not be null", twoPhaseIterator); + DocIdSetIterator docIdSetIterator = twoPhaseIterator.approximation(); + int docId; + while ((docId = docIdSetIterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { + if (twoPhaseIterator.matches()) { + foundMatchingDoc = true; + break; + } + } + } + } + assertTrue("Expected to find at least one matching document", foundMatchingDoc); + } + private ScoreScript.LeafFactory newFactory( Script script, boolean needsScore, @@ -203,5 +242,4 @@ public double execute(ExplanationHolder explanation) { } }; } - } diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 9bb1f51c51cf6..635939e68de71 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -157,6 +157,7 @@ import org.opensearch.common.CheckedConsumer; import org.opensearch.common.Nullable; import org.opensearch.common.SetOnce; +import org.opensearch.common.cache.module.CacheModule; import org.opensearch.common.network.NetworkModule; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.IndexScopedSettings; @@ -188,7 +189,6 @@ import org.opensearch.index.shard.PrimaryReplicaSyncer; import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; import org.opensearch.index.store.remote.filecache.FileCache; -import org.opensearch.index.store.remote.filecache.FileCacheCleaner; import org.opensearch.index.store.remote.filecache.FileCacheStats; import org.opensearch.indices.IndicesModule; import org.opensearch.indices.IndicesService; @@ -240,6 +240,7 @@ import java.io.IOException; import java.nio.file.Path; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -2037,7 +2038,6 @@ public void onFailure(final Exception e) { final MapperRegistry mapperRegistry = new IndicesModule(Collections.emptyList()).getMapperRegistry(); final SetOnce<RepositoriesService> repositoriesServiceReference = new SetOnce<>(); repositoriesServiceReference.set(repositoriesService); - FileCacheCleaner fileCacheCleaner = new FileCacheCleaner(nodeEnv, null); indicesService = new IndicesService( settings, mock(PluginsService.class), @@ -2072,10 +2072,10 @@ public void onFailure(final Exception e) { emptyMap(), new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService, threadPool), repositoriesServiceReference::get, - fileCacheCleaner, null, new RemoteStoreStatsTrackerFactory(clusterService, settings), - DefaultRecoverySettings.INSTANCE + DefaultRecoverySettings.INSTANCE, + new CacheModule(new ArrayList<>(), settings).getCacheService() ); final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); snapshotShardsService = new SnapshotShardsService( @@ -2313,7 +2313,8 @@ public void onFailure(final Exception e) { client ), NoopMetricsRegistry.INSTANCE, - searchRequestOperationsCompositeListenerFactory + searchRequestOperationsCompositeListenerFactory, + NoopTracer.INSTANCE ) ); actions.put( diff --git a/server/src/test/java/org/opensearch/telemetry/TelemetrySettingsTests.java b/server/src/test/java/org/opensearch/telemetry/TelemetrySettingsTests.java new file mode 100644 index 0000000000000..4c96f79b30d55 --- /dev/null +++ b/server/src/test/java/org/opensearch/telemetry/TelemetrySettingsTests.java @@ -0,0 +1,64 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Set; + +import static org.opensearch.telemetry.TelemetrySettings.TRACER_ENABLED_SETTING; +import static org.opensearch.telemetry.TelemetrySettings.TRACER_SAMPLER_PROBABILITY; + +public class TelemetrySettingsTests extends OpenSearchTestCase { + + public void testSetTracingEnabledOrDisabled() { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, Set.of(TRACER_SAMPLER_PROBABILITY, TRACER_ENABLED_SETTING)); + TelemetrySettings telemetrySettings = new TelemetrySettings(Settings.EMPTY, clusterSettings); + + // Validation for tracingEnabled as true + telemetrySettings.setTracingEnabled(true); + assertTrue(telemetrySettings.isTracingEnabled()); + + // Validation for tracingEnabled as false + telemetrySettings.setTracingEnabled(false); + assertFalse(telemetrySettings.isTracingEnabled()); + } + + public void testSetSamplingProbability() { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, Set.of(TRACER_SAMPLER_PROBABILITY, TRACER_ENABLED_SETTING)); + TelemetrySettings telemetrySettings = new TelemetrySettings(Settings.EMPTY, clusterSettings); + + // Validating default sample rate i.e 1% + assertEquals(0.01, telemetrySettings.getSamplingProbability(), 0.00d); + + // Validating override for sampling for 100% request + telemetrySettings.setSamplingProbability(1.00); + assertEquals(1.00, telemetrySettings.getSamplingProbability(), 0.00d); + + // Validating override for sampling for 50% request + telemetrySettings.setSamplingProbability(0.50); + assertEquals(0.50, telemetrySettings.getSamplingProbability(), 0.00d); + } + + public void testGetSamplingProbability() { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, Set.of(TRACER_SAMPLER_PROBABILITY, TRACER_ENABLED_SETTING)); + TelemetrySettings telemetrySettings = new TelemetrySettings(Settings.EMPTY, clusterSettings); + + // Validating default value of Sampling is 1% + assertEquals(0.01, telemetrySettings.getSamplingProbability(), 0.00d); + + clusterSettings.applySettings(Settings.builder().put("telemetry.tracer.sampler.probability", "0.02").build()); + + // Validating if default sampling is updated to 2% + assertEquals(0.02, telemetrySettings.getSamplingProbability(), 0.00d); + } + +} diff --git a/server/src/test/java/org/opensearch/telemetry/tracing/SpanBuilderTests.java b/server/src/test/java/org/opensearch/telemetry/tracing/SpanBuilderTests.java index b4183412cdf02..4b763e4bd4454 100644 --- a/server/src/test/java/org/opensearch/telemetry/tracing/SpanBuilderTests.java +++ b/server/src/test/java/org/opensearch/telemetry/tracing/SpanBuilderTests.java @@ -8,6 +8,8 @@ package org.opensearch.telemetry.tracing; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.Version; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.network.NetworkAddress; @@ -19,6 +21,7 @@ import org.opensearch.http.HttpResponse; import org.opensearch.rest.RestRequest; import org.opensearch.telemetry.tracing.attributes.Attributes; +import org.opensearch.telemetry.tracing.noop.NoopSpan; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportException; @@ -27,29 +30,64 @@ import java.io.IOException; import java.util.Arrays; +import java.util.Collection; import java.util.List; import java.util.Map; public class SpanBuilderTests extends OpenSearchTestCase { + public String uri; + + public String expectedSpanName; + + public String expectedQueryParams; + + public String expectedReqRawPath; + + @ParametersFactory + public static Collection<Object[]> data() { + return Arrays.asList( + new Object[][] { + { "/_test/resource?name=John&age=25", "GET /_test/resource", "name=John&age=25", "/_test/resource" }, + { "/_test/", "GET /_test/", "", "/_test/" }, } + ); + } + + public SpanBuilderTests(String uri, String expectedSpanName, String expectedQueryParams, String expectedReqRawPath) { + this.uri = uri; + this.expectedSpanName = expectedSpanName; + this.expectedQueryParams = expectedQueryParams; + this.expectedReqRawPath = expectedReqRawPath; + } + public void testHttpRequestContext() { - HttpRequest httpRequest = createHttpRequest(); + HttpRequest httpRequest = createHttpRequest(uri); SpanCreationContext context = SpanBuilder.from(httpRequest); Attributes attributes = context.getAttributes(); - assertEquals("GET /_test", context.getSpanName()); + assertEquals(expectedSpanName, context.getSpanName()); assertEquals("true", attributes.getAttributesMap().get(AttributeNames.TRACE)); assertEquals("GET", attributes.getAttributesMap().get(AttributeNames.HTTP_METHOD)); assertEquals("HTTP_1_0", attributes.getAttributesMap().get(AttributeNames.HTTP_PROTOCOL_VERSION)); - assertEquals("/_test", attributes.getAttributesMap().get(AttributeNames.HTTP_URI)); + assertEquals(uri, attributes.getAttributesMap().get(AttributeNames.HTTP_URI)); + if (expectedQueryParams.isBlank()) { + assertNull(attributes.getAttributesMap().get(AttributeNames.HTTP_REQ_QUERY_PARAMS)); + } else { + assertEquals(expectedQueryParams, attributes.getAttributesMap().get(AttributeNames.HTTP_REQ_QUERY_PARAMS)); + } } public void testRestRequestContext() { - RestRequest restRequest = RestRequest.request(null, createHttpRequest(), null); + RestRequest restRequest = RestRequest.request(null, createHttpRequest(uri), null); SpanCreationContext context = SpanBuilder.from(restRequest); Attributes attributes = context.getAttributes(); - assertEquals("GET /_test", context.getSpanName()); - assertEquals("/_test", attributes.getAttributesMap().get(AttributeNames.REST_REQ_RAW_PATH)); + assertEquals(expectedSpanName, context.getSpanName()); + assertEquals(expectedReqRawPath, attributes.getAttributesMap().get(AttributeNames.REST_REQ_RAW_PATH)); assertNotNull(attributes.getAttributesMap().get(AttributeNames.REST_REQ_ID)); + if (expectedQueryParams.isBlank()) { + assertNull(attributes.getAttributesMap().get(AttributeNames.HTTP_REQ_QUERY_PARAMS)); + } else { + assertEquals(expectedQueryParams, attributes.getAttributesMap().get(AttributeNames.HTTP_REQ_QUERY_PARAMS)); + } } public void testRestRequestContextForNull() { @@ -67,6 +105,16 @@ public void testTransportContext() { assertEquals(connection.getNode().getHostAddress(), attributes.getAttributesMap().get(AttributeNames.TRANSPORT_TARGET_HOST)); } + public void testParentSpan() { + String spanName = "test-name"; + SpanContext parentSpanContext = new SpanContext(NoopSpan.INSTANCE); + SpanCreationContext context = SpanBuilder.from(spanName, parentSpanContext); + Attributes attributes = context.getAttributes(); + assertNull(attributes); + assertEquals(spanName, context.getSpanName()); + assertEquals(parentSpanContext, context.getParent()); + } + private static Transport.Connection createTransportConnection() { return new Transport.Connection() { @Override @@ -97,7 +145,7 @@ public void close() { }; } - private static HttpRequest createHttpRequest() { + private static HttpRequest createHttpRequest(String uri) { return new HttpRequest() { @Override public RestRequest.Method method() { @@ -106,7 +154,7 @@ public RestRequest.Method method() { @Override public String uri() { - return "/_test"; + return uri; } @Override diff --git a/server/src/test/resources/org/opensearch/bootstrap/test-codebases.policy b/server/src/test/resources/org/opensearch/bootstrap/test-codebases.policy new file mode 100644 index 0000000000000..0dc754ccb0c57 --- /dev/null +++ b/server/src/test/resources/org/opensearch/bootstrap/test-codebases.policy @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +//// additional test framework permissions. +//// These are mock objects and test management that we allow test framework libs +//// to provide on our behalf. But tests themselves cannot do this stuff! + +grant codeBase "${codebase.zstd-jni}" { +}; + +grant codeBase "${codebase.kafka-server-common}" { +}; + +grant codeBase "${codebase.kafka-server-common@test}" { +}; diff --git a/server/src/test/resources/org/opensearch/bootstrap/test.policy b/server/src/test/resources/org/opensearch/bootstrap/test.policy new file mode 100644 index 0000000000000..7b0a9b3d5d709 --- /dev/null +++ b/server/src/test/resources/org/opensearch/bootstrap/test.policy @@ -0,0 +1,13 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +grant { + // allow to test Security policy and codebases + permission java.util.PropertyPermission "*", "read,write"; + permission java.security.SecurityPermission "createPolicy.JavaPolicy"; +}; diff --git a/server/src/test/resources/org/opensearch/index/analysis/shingle_analysis.json b/server/src/test/resources/org/opensearch/index/analysis/shingle_analysis.json index 4a4fc7d2c81b1..1ed56fa6dab4d 100644 --- a/server/src/test/resources/org/opensearch/index/analysis/shingle_analysis.json +++ b/server/src/test/resources/org/opensearch/index/analysis/shingle_analysis.json @@ -20,4 +20,4 @@ } } } -} \ No newline at end of file +} diff --git a/settings.gradle b/settings.gradle index 24ab4a7a22237..8fbf32504215b 100644 --- a/settings.gradle +++ b/settings.gradle @@ -10,7 +10,7 @@ */ plugins { - id "com.gradle.enterprise" version "3.16.1" + id "com.gradle.enterprise" version "3.16.2" } ext.disableBuildCache = hasProperty('DISABLE_BUILD_CACHE') || System.getenv().containsKey('DISABLE_BUILD_CACHE') diff --git a/test/external-modules/delayed-aggs/src/internalClusterTest/java/org/opensearch/search/aggregations/DelayedShardAggregationIT.java b/test/external-modules/delayed-aggs/src/internalClusterTest/java/org/opensearch/search/aggregations/DelayedShardAggregationIT.java index 90f4f1ba2ceb2..5cb5cdd18f6cc 100644 --- a/test/external-modules/delayed-aggs/src/internalClusterTest/java/org/opensearch/search/aggregations/DelayedShardAggregationIT.java +++ b/test/external-modules/delayed-aggs/src/internalClusterTest/java/org/opensearch/search/aggregations/DelayedShardAggregationIT.java @@ -37,7 +37,6 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.plugins.Plugin; import org.opensearch.search.aggregations.bucket.filter.InternalFilter; import org.opensearch.search.aggregations.metrics.InternalMax; @@ -64,11 +63,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(DelayedShardAggregationPlugin.class); diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 7adf29792f27d..0ca4797bfeff1 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -51,6 +51,7 @@ dependencies { exclude module: "logback-core" exclude module: "logback-classic" exclude module: "avro" + exclude group: 'org.apache.kerby' } api "org.codehaus.jettison:jettison:${versions.jettison}" api "org.apache.commons:commons-compress:${versions.commonscompress}" @@ -67,15 +68,16 @@ dependencies { api "org.jetbrains.kotlin:kotlin-stdlib:${versions.kotlin}" api "org.eclipse.jetty:jetty-server:${versions.jetty}" api "org.eclipse.jetty.websocket:javax-websocket-server-impl:${versions.jetty}" - api 'org.apache.zookeeper:zookeeper:3.9.1' + api 'org.apache.zookeeper:zookeeper:3.9.2' api "org.apache.commons:commons-text:1.11.0" api "commons-net:commons-net:3.10.0" - api "ch.qos.logback:logback-core:1.2.13" + api "ch.qos.logback:logback-core:1.5.3" api "ch.qos.logback:logback-classic:1.2.13" + api 'org.apache.kerby:kerb-admin:2.0.3' runtimeOnly "com.google.guava:guava:${versions.guava}" runtimeOnly("com.squareup.okhttp3:okhttp:4.12.0") { exclude group: "com.squareup.okio" } - runtimeOnly "com.squareup.okio:okio:3.7.0" + runtimeOnly "com.squareup.okio:okio:3.8.0" runtimeOnly "org.xerial.snappy:snappy-java:1.1.10.5" } diff --git a/test/fixtures/minio-fixture/docker-compose.yml b/test/fixtures/minio-fixture/docker-compose.yml index e4d2faab9a657..539ca9471fa04 100644 --- a/test/fixtures/minio-fixture/docker-compose.yml +++ b/test/fixtures/minio-fixture/docker-compose.yml @@ -1,4 +1,4 @@ -version: '3' +version: '3.2' services: minio-fixture: build: diff --git a/test/framework/src/main/java/org/opensearch/bootstrap/BootstrapForTesting.java b/test/framework/src/main/java/org/opensearch/bootstrap/BootstrapForTesting.java index 43881d0660e04..933385dedcf49 100644 --- a/test/framework/src/main/java/org/opensearch/bootstrap/BootstrapForTesting.java +++ b/test/framework/src/main/java/org/opensearch/bootstrap/BootstrapForTesting.java @@ -84,6 +84,7 @@ * The idea is to mimic as much as possible what happens with ES in production * mode (e.g. assign permissions and install security manager the same way) */ +@SuppressWarnings("removal") public class BootstrapForTesting { // TODO: can we share more code with the non-test side here diff --git a/test/framework/src/main/java/org/opensearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/opensearch/index/mapper/MapperServiceTestCase.java index ac78a0d1936ea..a65ce3cbdd380 100644 --- a/test/framework/src/main/java/org/opensearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/mapper/MapperServiceTestCase.java @@ -253,7 +253,7 @@ protected QueryShardContext createQueryShardContext(MapperService mapperService) when(queryShardContext.allowExpensiveQueries()).thenReturn(true); when(queryShardContext.lookup()).thenReturn(new SearchLookup(mapperService, (ft, s) -> { throw new UnsupportedOperationException("search lookup not available"); - })); + }, SearchLookup.UNKNOWN_SHARD_ID)); when(queryShardContext.getFieldType(any())).thenAnswer(inv -> mapperService.fieldType(inv.getArguments()[0].toString())); when(queryShardContext.documentMapper(anyString())).thenReturn(mapperService.documentMapper()); return queryShardContext; diff --git a/test/framework/src/main/java/org/opensearch/index/mapper/MapperTestCase.java b/test/framework/src/main/java/org/opensearch/index/mapper/MapperTestCase.java index da043229c642d..dc5954907a4fa 100644 --- a/test/framework/src/main/java/org/opensearch/index/mapper/MapperTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/mapper/MapperTestCase.java @@ -293,7 +293,7 @@ protected final List<?> fetchFromDocValues(MapperService mapperService, MappedFi withLuceneIndex(mapperService, iw -> { iw.addDocument(mapperService.documentMapper().parse(source(b -> b.field(ft.name(), sourceValue))).rootDoc()); }, iw -> { - SearchLookup lookup = new SearchLookup(mapperService, fieldDataLookup); + SearchLookup lookup = new SearchLookup(mapperService, fieldDataLookup, SearchLookup.UNKNOWN_SHARD_ID); ValueFetcher valueFetcher = new DocValueFetcher(format, lookup.doc().getForField(ft)); IndexSearcher searcher = newSearcher(iw); LeafReaderContext context = searcher.getIndexReader().leaves().get(0); diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 412d5235fe462..bf1c4d4c94e04 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -785,10 +785,10 @@ protected Store createRemoteStore(Path path, ShardRouting shardRouting, IndexMet protected RemoteSegmentStoreDirectory createRemoteSegmentStoreDirectory(ShardId shardId, Path path) throws IOException { NodeEnvironment.NodePath remoteNodePath = new NodeEnvironment.NodePath(path); ShardPath remoteShardPath = new ShardPath(false, remoteNodePath.resolve(shardId), remoteNodePath.resolve(shardId), shardId); - RemoteDirectory dataDirectory = newRemoteDirectory(remoteShardPath.resolveIndex()); - RemoteDirectory metadataDirectory = newRemoteDirectory(remoteShardPath.resolveIndex()); + RemoteDirectory dataDirectory = newRemoteDirectory(remoteShardPath.resolveIndex().resolve("data")); + RemoteDirectory metadataDirectory = newRemoteDirectory(remoteShardPath.resolveIndex().resolve("metadata")); RemoteStoreLockManager remoteStoreLockManager = new RemoteStoreMetadataLockManager( - new RemoteBufferedOutputDirectory(getBlobContainer(remoteShardPath.resolveIndex())) + new RemoteBufferedOutputDirectory(getBlobContainer(remoteShardPath.resolveIndex().resolve("lock_files"))) ); return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, remoteStoreLockManager, threadPool, shardId); } diff --git a/test/framework/src/main/java/org/opensearch/mockito/plugin/PriviledgedMockMaker.java b/test/framework/src/main/java/org/opensearch/mockito/plugin/PriviledgedMockMaker.java index 0f5e043ee1135..cc2d26a598757 100644 --- a/test/framework/src/main/java/org/opensearch/mockito/plugin/PriviledgedMockMaker.java +++ b/test/framework/src/main/java/org/opensearch/mockito/plugin/PriviledgedMockMaker.java @@ -31,6 +31,7 @@ * Mockito plugin which wraps the Mockito calls into priviledged execution blocks and respects * SecurityManager presence. */ +@SuppressWarnings("removal") @SuppressForbidden(reason = "allow URL#getFile() to be used in tests") public class PriviledgedMockMaker implements MockMaker { private static AccessControlContext context; diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java index ac0447dbebf7e..4eb49ebb42241 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java @@ -34,11 +34,13 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.BinaryDocValuesField; import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.document.LatLonDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.document.StoredField; +import org.apache.lucene.document.StringField; import org.apache.lucene.index.CompositeReaderContext; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; @@ -62,6 +64,7 @@ import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.CheckedConsumer; +import org.opensearch.common.TriConsumer; import org.opensearch.common.TriFunction; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; @@ -121,6 +124,7 @@ import org.opensearch.search.aggregations.AggregatorFactories.Builder; import org.opensearch.search.aggregations.MultiBucketConsumerService.MultiBucketConsumer; import org.opensearch.search.aggregations.bucket.nested.NestedAggregationBuilder; +import org.opensearch.search.aggregations.bucket.terms.TermsAggregator; import org.opensearch.search.aggregations.metrics.MetricsAggregator; import org.opensearch.search.aggregations.pipeline.PipelineAggregator; import org.opensearch.search.aggregations.pipeline.PipelineAggregator.PipelineTree; @@ -147,6 +151,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Supplier; @@ -178,6 +183,14 @@ public abstract class AggregatorTestCase extends OpenSearchTestCase { // A list of field types that should not be tested, or are not currently supported private static List<String> TYPE_TEST_DENYLIST; + protected static final TriConsumer<Document, String, String> ADD_SORTED_SET_FIELD_NOT_INDEXED = (document, field, value) -> document + .add(new SortedSetDocValuesField(field, new BytesRef(value))); + + protected static final TriConsumer<Document, String, String> ADD_SORTED_SET_FIELD_INDEXED = (document, field, value) -> { + document.add(new SortedSetDocValuesField(field, new BytesRef(value))); + document.add(new StringField(field, value, Field.Store.NO)); + }; + static { List<String> denylist = new ArrayList<>(); denylist.add(ObjectMapper.CONTENT_TYPE); // Cannot aggregate objects @@ -433,7 +446,6 @@ protected QueryShardContext queryShardContextMock( CircuitBreakerService circuitBreakerService, BigArrays bigArrays ) { - return new QueryShardContext( 0, indexSettings, @@ -1096,6 +1108,89 @@ protected void doWriteTo(StreamOutput out) throws IOException { } } + /** + * Wrapper around Aggregator class + * Maintains a count for times collect() is invoked - number of documents visited + */ + protected static class CountingAggregator extends Aggregator { + private final AtomicInteger collectCounter; + public final Aggregator delegate; + + public CountingAggregator(AtomicInteger collectCounter, TermsAggregator delegate) { + this.collectCounter = collectCounter; + this.delegate = delegate; + } + + public AtomicInteger getCollectCount() { + return collectCounter; + } + + @Override + public void close() { + delegate.close(); + } + + @Override + public String name() { + return delegate.name(); + } + + @Override + public SearchContext context() { + return delegate.context(); + } + + @Override + public Aggregator parent() { + return delegate.parent(); + } + + @Override + public Aggregator subAggregator(String name) { + return delegate.subAggregator(name); + } + + @Override + public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + return delegate.buildAggregations(owningBucketOrds); + } + + @Override + public InternalAggregation buildEmptyAggregation() { + return delegate.buildEmptyAggregation(); + } + + @Override + public LeafBucketCollector getLeafCollector(LeafReaderContext ctx) throws IOException { + return new LeafBucketCollector() { + @Override + public void collect(int doc, long bucket) throws IOException { + delegate.getLeafCollector(ctx).collect(doc, bucket); + collectCounter.incrementAndGet(); + } + }; + } + + @Override + public ScoreMode scoreMode() { + return delegate.scoreMode(); + } + + @Override + public void preCollection() throws IOException { + delegate.preCollection(); + } + + @Override + public void postCollection() throws IOException { + delegate.postCollection(); + } + + public void setWeight(Weight weight) { + this.delegate.setWeight(weight); + } + } + public static class InternalAggCardinality extends InternalAggregation { private final CardinalityUpperBound cardinality; diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/bucket/AbstractTermsTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/bucket/AbstractTermsTestCase.java index 8e94f2cacf070..0b44fe447d6f8 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/bucket/AbstractTermsTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/bucket/AbstractTermsTestCase.java @@ -36,11 +36,10 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.search.aggregations.bucket.terms.TermsAggregatorFactory.ExecutionMode; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -49,7 +48,7 @@ import static org.opensearch.search.aggregations.AggregationBuilders.terms; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; -public abstract class AbstractTermsTestCase extends ParameterizedOpenSearchIntegTestCase { +public abstract class AbstractTermsTestCase extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public AbstractTermsTestCase(Settings dynamicSettings) { super(dynamicSettings); @@ -63,11 +62,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - public String randomExecutionHint() { return randomBoolean() ? null : randomFrom(ExecutionMode.values()).toString(); } diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/composite/BaseCompositeAggregatorTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/composite/BaseCompositeAggregatorTestCase.java index 6b5ec838f401d..466e4d1bf1742 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/composite/BaseCompositeAggregatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/composite/BaseCompositeAggregatorTestCase.java @@ -14,6 +14,7 @@ import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.document.IntPoint; import org.apache.lucene.document.LongPoint; +import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.document.StringField; @@ -40,13 +41,16 @@ import org.opensearch.core.index.Index; import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.DateFieldMapper; +import org.opensearch.index.mapper.DocCountFieldMapper; import org.opensearch.index.mapper.DocumentMapper; import org.opensearch.index.mapper.IpFieldMapper; import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.NumberFieldMapper; +import org.opensearch.search.aggregations.AggregationBuilder; import org.opensearch.search.aggregations.AggregatorTestCase; +import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; import org.opensearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder; import org.opensearch.search.aggregations.bucket.composite.InternalComposite; @@ -139,12 +143,16 @@ protected void executeTestCase( boolean useIndexSort, Query query, List<Map<String, List<Object>>> dataset, - Supplier<CompositeAggregationBuilder> create, + Supplier<? extends AggregationBuilder> create, Consumer<InternalComposite> verify ) throws IOException { Map<String, MappedFieldType> types = FIELD_TYPES.stream().collect(Collectors.toMap(MappedFieldType::name, Function.identity())); - CompositeAggregationBuilder aggregationBuilder = create.get(); - Sort indexSort = useIndexSort ? buildIndexSort(aggregationBuilder.sources(), types) : null; + AggregationBuilder aggregationBuilder = create.get(); + Sort indexSort = null; + if (aggregationBuilder instanceof CompositeAggregationBuilder && useIndexSort) { + CompositeAggregationBuilder cab = (CompositeAggregationBuilder) aggregationBuilder; + indexSort = buildIndexSort(cab.sources(), types); + } IndexSettings indexSettings = createIndexSettings(indexSort); try (Directory directory = newDirectory()) { IndexWriterConfig config = newIndexWriterConfig(random(), new MockAnalyzer(random())); @@ -180,14 +188,16 @@ protected void executeTestCase( } try (IndexReader indexReader = DirectoryReader.open(directory)) { IndexSearcher indexSearcher = new IndexSearcher(indexReader); - InternalComposite composite = searchAndReduce( + InternalAggregation aggregation = searchAndReduce( indexSettings, indexSearcher, query, aggregationBuilder, FIELD_TYPES.toArray(new MappedFieldType[0]) ); - verify.accept(composite); + if (aggregation instanceof InternalComposite) { + verify.accept((InternalComposite) aggregation); + } } } } @@ -196,6 +206,12 @@ protected void addToDocument(int id, Document doc, Map<String, List<Object>> key doc.add(new StringField("id", Integer.toString(id), Field.Store.NO)); for (Map.Entry<String, List<Object>> entry : keys.entrySet()) { final String name = entry.getKey(); + if (name.equals(DocCountFieldMapper.NAME)) { + doc.add(new IntPoint(name, (int) entry.getValue().get(0))); + // doc count field should be DocValuesType.NUMERIC + doc.add(new NumericDocValuesField(name, (int) entry.getValue().get(0))); + continue; + } for (Object value : entry.getValue()) { if (value instanceof Integer) { doc.add(new SortedNumericDocValuesField(name, (int) value)); diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/metrics/AbstractNumericTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/metrics/AbstractNumericTestCase.java index 103b67e2782de..8c2cefa89c860 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/metrics/AbstractNumericTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/metrics/AbstractNumericTestCase.java @@ -35,9 +35,8 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -48,11 +47,11 @@ import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; @OpenSearchIntegTestCase.SuiteScopeTestCase -public abstract class AbstractNumericTestCase extends ParameterizedOpenSearchIntegTestCase { +public abstract class AbstractNumericTestCase extends ParameterizedStaticSettingsOpenSearchIntegTestCase { protected static long minValue, maxValue, minValues, maxValues; - public AbstractNumericTestCase(Settings dynamicSettings) { - super(dynamicSettings); + public AbstractNumericTestCase(Settings staticSettings) { + super(staticSettings); } @ParametersFactory @@ -63,11 +62,6 @@ public static Collection<Object[]> parameters() { ); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } - @Override public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); diff --git a/test/framework/src/main/java/org/opensearch/test/FeatureFlagSetter.java b/test/framework/src/main/java/org/opensearch/test/FeatureFlagSetter.java index eddcf9c738bb3..f698cd03c464f 100644 --- a/test/framework/src/main/java/org/opensearch/test/FeatureFlagSetter.java +++ b/test/framework/src/main/java/org/opensearch/test/FeatureFlagSetter.java @@ -46,6 +46,7 @@ public static synchronized void clear() { private static final Logger LOGGER = LogManager.getLogger(FeatureFlagSetter.class); private final Set<String> flags = ConcurrentCollections.newConcurrentSet(); + @SuppressWarnings("removal") @SuppressForbidden(reason = "Enables setting of feature flags") private void setFlag(String flag) { flags.add(flag); @@ -53,6 +54,7 @@ private void setFlag(String flag) { LOGGER.info("set feature_flag={}", flag); } + @SuppressWarnings("removal") @SuppressForbidden(reason = "Clears the set feature flags") private void clearAll() { for (String flag : flags) { diff --git a/test/framework/src/main/java/org/opensearch/test/MockLogAppender.java b/test/framework/src/main/java/org/opensearch/test/MockLogAppender.java index 328aaf8a65b1f..6d6199833b25b 100644 --- a/test/framework/src/main/java/org/opensearch/test/MockLogAppender.java +++ b/test/framework/src/main/java/org/opensearch/test/MockLogAppender.java @@ -35,18 +35,23 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.core.LogEvent; import org.apache.logging.log4j.core.appender.AbstractAppender; +import org.apache.logging.log4j.core.config.Property; import org.apache.logging.log4j.core.filter.RegexFilter; import org.opensearch.common.logging.Loggers; import org.opensearch.common.regex.Regex; +import org.opensearch.test.junit.annotations.TestLogging; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.regex.Pattern; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.fail; /** * Test appender that can be used to verify that certain events were logged correctly @@ -68,11 +73,19 @@ public class MockLogAppender extends AbstractAppender implements AutoCloseable { * write to a closed MockLogAppender instance. */ public static MockLogAppender createForLoggers(Logger... loggers) throws IllegalAccessException { - return createForLoggers(".*(\n.*)*", loggers); + final String callingClass = Thread.currentThread().getStackTrace()[2].getClassName(); + return createForLoggersInternal(callingClass, ".*(\n.*)*", loggers); } public static MockLogAppender createForLoggers(String filter, Logger... loggers) throws IllegalAccessException { + final String callingClass = Thread.currentThread().getStackTrace()[2].getClassName(); + return createForLoggersInternal(callingClass, filter, loggers); + } + + private static MockLogAppender createForLoggersInternal(String callingClass, String filter, Logger... loggers) + throws IllegalAccessException { final MockLogAppender appender = new MockLogAppender( + callingClass + "-mock-log-appender", RegexFilter.createFilter(filter, new String[0], false, null, null), Collections.unmodifiableList(Arrays.asList(loggers)) ); @@ -83,8 +96,8 @@ public static MockLogAppender createForLoggers(String filter, Logger... loggers) return appender; } - private MockLogAppender(RegexFilter filter, List<Logger> loggers) { - super("mock", filter, null); + private MockLogAppender(String name, RegexFilter filter, List<Logger> loggers) { + super(name, filter, null, true, Property.EMPTY_ARRAY); /* * We use a copy-on-write array list since log messages could be appended while we are setting up expectations. When that occurs, * we would run into a concurrent modification exception from the iteration over the expectations in #append, concurrent with a @@ -116,7 +129,14 @@ public void close() { for (Logger logger : loggers) { Loggers.removeAppender(logger, this); } - this.stop(); + super.stop(); + } + + @Override + public void stop() { + // MockLogAppender should be used with try-with-resources to ensure + // proper clean up ordering and should never be stopped directly. + throw new UnsupportedOperationException("Use close() to ensure proper clean up ordering"); } public interface LoggingExpectation { @@ -243,6 +263,59 @@ public void assertMatched() { } + /** + * Used for cases when the logger is dynamically named such as to include an index name or shard id + * + * Best used in conjunction with the root logger: + * {@code @TestLogging(value = "_root:debug", reason = "Validate logging output");} + * @see TestLogging + * */ + public static class PatternSeenWithLoggerPrefixExpectation implements LoggingExpectation { + private final String expectationName; + private final String loggerPrefix; + private final Level level; + private final String messageMatchingRegex; + + private final List<String> loggerMatches = new ArrayList<>(); + private final AtomicBoolean eventSeen = new AtomicBoolean(false); + + public PatternSeenWithLoggerPrefixExpectation( + final String expectationName, + final String loggerPrefix, + final Level level, + final String messageMatchingRegex + ) { + this.expectationName = expectationName; + this.loggerPrefix = loggerPrefix; + this.level = level; + this.messageMatchingRegex = messageMatchingRegex; + } + + @Override + public void match(final LogEvent event) { + if (event.getLevel() == level && event.getLoggerName().startsWith(loggerPrefix)) { + final String formattedMessage = event.getMessage().getFormattedMessage(); + loggerMatches.add(formattedMessage); + if (formattedMessage.matches(messageMatchingRegex)) { + eventSeen.set(true); + } + } + } + + @Override + public void assertMatched() { + if (!eventSeen.get()) { + final StringBuilder failureMessage = new StringBuilder(); + failureMessage.append(expectationName + " was not seen, found " + loggerMatches.size() + " messages matching the logger."); + failureMessage.append("\r\nMessage matching regex: " + messageMatchingRegex); + if (!loggerMatches.isEmpty()) { + failureMessage.append("\r\nMessage details:\r\n" + String.join("\r\n", loggerMatches)); + } + fail(failureMessage.toString()); + } + } + } + private static String getLoggerName(String name) { if (name.startsWith("org.opensearch.")) { name = name.substring("org.opensearch.".length()); diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 6215e84f42676..7cb1b3f4fe0d8 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -32,13 +32,12 @@ package org.opensearch.test; -import com.carrotsearch.randomizedtesting.RandomizedContext; import com.carrotsearch.randomizedtesting.annotations.TestGroup; import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomPicks; -import org.apache.hc.core5.http.HttpHost; import org.apache.lucene.codecs.Codec; +import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.search.Sort; import org.apache.lucene.search.TotalHits; import org.apache.lucene.tests.util.LuceneTestCase; @@ -48,7 +47,6 @@ import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.node.hotthreads.NodeHotThreads; -import org.opensearch.action.admin.cluster.node.info.NodeInfo; import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; @@ -77,7 +75,6 @@ import org.opensearch.client.ClusterAdminClient; import org.opensearch.client.Requests; import org.opensearch.client.RestClient; -import org.opensearch.client.RestClientBuilder; import org.opensearch.cluster.ClusterModule; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.OpenSearchNodeCommand; @@ -96,7 +93,7 @@ import org.opensearch.common.Nullable; import org.opensearch.common.Priority; import org.opensearch.common.collect.Tuple; -import org.opensearch.common.network.NetworkAddress; +import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.network.NetworkModule; import org.opensearch.common.regex.Regex; import org.opensearch.common.settings.FeatureFlagSettings; @@ -106,7 +103,6 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.common.util.io.IOUtils; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.smile.SmileXContent; @@ -120,6 +116,7 @@ import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -128,8 +125,8 @@ import org.opensearch.core.xcontent.XContentParser; import org.opensearch.env.Environment; import org.opensearch.env.TestEnvironment; -import org.opensearch.http.HttpInfo; import org.opensearch.index.IndexModule; +import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.index.MergeSchedulerConfig; import org.opensearch.index.MockEngineFactoryPlugin; @@ -138,16 +135,19 @@ import org.opensearch.index.engine.Segment; import org.opensearch.index.mapper.CompletionFieldMapper; import org.opensearch.index.mapper.MockFieldFilterPlugin; +import org.opensearch.index.shard.IndexShard; import org.opensearch.index.store.Store; import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndicesQueryCache; import org.opensearch.indices.IndicesRequestCache; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.indices.store.IndicesStore; import org.opensearch.monitor.os.OsInfo; import org.opensearch.node.NodeMocksPlugin; import org.opensearch.plugins.NetworkPlugin; import org.opensearch.plugins.Plugin; -import org.opensearch.rest.action.RestCancellableNodeClient; +import org.opensearch.repositories.fs.ReloadableFsRepository; import org.opensearch.script.MockScriptService; import org.opensearch.search.MockSearchService; import org.opensearch.search.SearchHit; @@ -158,17 +158,15 @@ import org.opensearch.test.disruption.ServiceDisruptionScheme; import org.opensearch.test.store.MockFSIndexStore; import org.opensearch.test.telemetry.MockTelemetryPlugin; -import org.opensearch.test.telemetry.tracing.StrictCheckSpanProcessor; import org.opensearch.test.transport.MockTransportService; import org.opensearch.transport.TransportInterceptor; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestHandler; import org.opensearch.transport.TransportService; import org.hamcrest.Matchers; -import org.junit.After; import org.junit.AfterClass; -import org.junit.Before; import org.junit.BeforeClass; +import org.junit.Rule; import java.io.IOException; import java.lang.Runtime.Version; @@ -189,13 +187,12 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.IdentityHashMap; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Optional; import java.util.Random; import java.util.Set; -import java.util.concurrent.Callable; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -209,14 +206,21 @@ import static org.opensearch.core.common.util.CollectionUtils.eagerPartition; import static org.opensearch.discovery.DiscoveryModule.DISCOVERY_SEED_PROVIDERS_SETTING; import static org.opensearch.discovery.SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING; +import static org.opensearch.index.IndexSettings.INDEX_DOC_ID_FUZZY_SET_ENABLED_SETTING; +import static org.opensearch.index.IndexSettings.INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING; import static org.opensearch.index.IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.test.XContentTestUtils.convertToMap; import static org.opensearch.test.XContentTestUtils.differenceBetweenMapsIgnoringArrayOrder; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoTimeout; -import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.emptyIterable; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -334,6 +338,10 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase { public static final String TESTS_ENABLE_MOCK_MODULES = "tests.enable_mock_modules"; private static final boolean MOCK_MODULES_ENABLED = "true".equals(System.getProperty(TESTS_ENABLE_MOCK_MODULES, "true")); + + @Rule + public static OpenSearchTestClusterRule testClusterRule = new OpenSearchTestClusterRule(); + /** * Threshold at which indexing switches from frequently async to frequently bulk. */ @@ -369,22 +377,9 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase { */ public static final String TESTS_CLUSTER_NAME = "tests.clustername"; - /** - * The current cluster depending on the configured {@link Scope}. - * By default if no {@link ClusterScope} is configured this will hold a reference to the suite cluster. - */ - private static TestCluster currentCluster; - private static RestClient restClient = null; - - private static final Map<Class<?>, TestCluster> clusters = new IdentityHashMap<>(); - - private static OpenSearchIntegTestCase INSTANCE = null; // see @SuiteScope - private static Long SUITE_SEED = null; - @BeforeClass public static void beforeClass() throws Exception { - SUITE_SEED = randomLong(); - initializeSuiteScope(); + testClusterRule.beforeClass(); } @Override @@ -394,36 +389,6 @@ protected final boolean enableWarningsCheck() { return false; } - protected final void beforeInternal() throws Exception { - final Scope currentClusterScope = getCurrentClusterScope(); - Callable<Void> setup = () -> { - cluster().beforeTest(random()); - cluster().wipe(excludeTemplates()); - randomIndexTemplate(); - return null; - }; - switch (currentClusterScope) { - case SUITE: - assert SUITE_SEED != null : "Suite seed was not initialized"; - currentCluster = buildAndPutCluster(currentClusterScope, SUITE_SEED); - RandomizedContext.current().runWithPrivateRandomness(SUITE_SEED, setup); - break; - case TEST: - currentCluster = buildAndPutCluster(currentClusterScope, randomLong()); - setup.call(); - break; - } - - } - - private void printTestMessage(String message) { - if (isSuiteScopedTest(getClass()) && (getTestName().equals("<unknown>"))) { - logger.info("[{}]: {} suite", getTestClass().getSimpleName(), message); - } else { - logger.info("[{}#{}]: {} test", getTestClass().getSimpleName(), getTestName(), message); - } - } - /** * Creates a randomized index template. This template is used to pass in randomized settings on a * per index basis. Allows to enable/disable the randomization for number of shards and replicas @@ -547,85 +512,6 @@ private static Settings.Builder setRandomIndexTranslogSettings(Random random, Se return builder; } - private TestCluster buildWithPrivateContext(final Scope scope, final long seed) throws Exception { - return RandomizedContext.current().runWithPrivateRandomness(seed, () -> buildTestCluster(scope, seed)); - } - - private TestCluster buildAndPutCluster(Scope currentClusterScope, long seed) throws Exception { - final Class<?> clazz = this.getClass(); - TestCluster testCluster = clusters.remove(clazz); // remove this cluster first - clearClusters(); // all leftovers are gone by now... this is really just a double safety if we miss something somewhere - switch (currentClusterScope) { - case SUITE: - if (testCluster == null) { // only build if it's not there yet - testCluster = buildWithPrivateContext(currentClusterScope, seed); - } - break; - case TEST: - // close the previous one and create a new one - IOUtils.closeWhileHandlingException(testCluster); - testCluster = buildTestCluster(currentClusterScope, seed); - break; - } - clusters.put(clazz, testCluster); - return testCluster; - } - - private static void clearClusters() throws Exception { - if (!clusters.isEmpty()) { - IOUtils.close(clusters.values()); - clusters.clear(); - } - if (restClient != null) { - restClient.close(); - restClient = null; - } - assertBusy(() -> { - int numChannels = RestCancellableNodeClient.getNumChannels(); - assertEquals( - numChannels - + " channels still being tracked in " - + RestCancellableNodeClient.class.getSimpleName() - + " while there should be none", - 0, - numChannels - ); - }); - } - - private void afterInternal(boolean afterClass) throws Exception { - final Scope currentClusterScope = getCurrentClusterScope(); - if (isInternalCluster()) { - internalCluster().clearDisruptionScheme(); - } - try { - if (cluster() != null) { - if (currentClusterScope != Scope.TEST) { - Metadata metadata = client().admin().cluster().prepareState().execute().actionGet().getState().getMetadata(); - - final Set<String> persistentKeys = new HashSet<>(metadata.persistentSettings().keySet()); - assertThat("test leaves persistent cluster metadata behind", persistentKeys, empty()); - - final Set<String> transientKeys = new HashSet<>(metadata.transientSettings().keySet()); - assertThat("test leaves transient cluster metadata behind", transientKeys, empty()); - } - ensureClusterSizeConsistency(); - ensureClusterStateConsistency(); - ensureClusterStateCanBeReadByNodeTool(); - beforeIndexDeletion(); - cluster().wipe(excludeTemplates()); // wipe after to make sure we fail in the test that didn't ack the delete - if (afterClass || currentClusterScope == Scope.TEST) { - cluster().close(); - } - cluster().assertAfterTest(); - } - } finally { - if (currentClusterScope == Scope.TEST) { - clearClusters(); // it is ok to leave persistent / transient cluster state behind if scope is TEST - } - } - } - /** * @return An exclude set of index templates that will not be removed in between tests. */ @@ -638,18 +524,15 @@ protected void beforeIndexDeletion() throws Exception { } public static TestCluster cluster() { - return currentCluster; + return testClusterRule.cluster(); } public static boolean isInternalCluster() { - return (currentCluster instanceof InternalTestCluster); + return testClusterRule.isInternalCluster(); } public static InternalTestCluster internalCluster() { - if (!isInternalCluster()) { - throw new UnsupportedOperationException("current test cluster is immutable"); - } - return (InternalTestCluster) currentCluster; + return testClusterRule.internalCluster().orElseThrow(() -> new UnsupportedOperationException("current test cluster is immutable")); } public ClusterService clusterService() { @@ -661,14 +544,7 @@ public static Client client() { } public static Client client(@Nullable String node) { - if (node != null) { - return internalCluster().client(node); - } - Client client = cluster().client(); - if (frequently()) { - client = new RandomizingClient(client, random()); - } - return client; + return testClusterRule.clientForNode(node); } public static Client dataNodeClient() { @@ -771,6 +647,11 @@ public Settings indexSettings() { ); } + if (randomBoolean()) { + builder.put(INDEX_DOC_ID_FUZZY_SET_ENABLED_SETTING.getKey(), true); + builder.put(INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING.getKey(), randomDoubleBetween(0.01, 0.50, true)); + } + return builder.build(); } @@ -787,6 +668,7 @@ protected Settings featureFlagSettings() { } // Enabling Telemetry setting by default featureSettings.put(FeatureFlags.TELEMETRY_SETTING.getKey(), true); + return featureSettings.build(); } @@ -1687,14 +1569,17 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean ma if (dummyDocuments) { indexRandomForMultipleSlices(indicesArray); } + if (forceRefresh) { + waitForReplication(); + } } /* - * This method ingests bogus documents for the given indices such that multiple slices - * are formed. This is useful for testing with the concurrent search use-case as it creates - * multiple slices based on segment count. - * @param indices the indices in which bogus documents should be ingested - * */ + * This method ingests bogus documents for the given indices such that multiple slices + * are formed. This is useful for testing with the concurrent search use-case as it creates + * multiple slices based on segment count. + * @param indices the indices in which bogus documents should be ingested + * */ protected void indexRandomForMultipleSlices(String... indices) throws InterruptedException { Set<List<String>> bogusIds = new HashSet<>(); int refreshCount = randomIntBetween(2, 3); @@ -1928,7 +1813,7 @@ public void clearScroll(String... scrollIds) { assertThat(clearResponse.isSucceeded(), equalTo(true)); } - private static <A extends Annotation> A getAnnotation(Class<?> clazz, Class<A> annotationClass) { + static <A extends Annotation> A getAnnotation(Class<?> clazz, Class<A> annotationClass) { if (clazz == Object.class || clazz == OpenSearchIntegTestCase.class) { return null; } @@ -1939,16 +1824,6 @@ private static <A extends Annotation> A getAnnotation(Class<?> clazz, Class<A> a return getAnnotation(clazz.getSuperclass(), annotationClass); } - private Scope getCurrentClusterScope() { - return getCurrentClusterScope(this.getClass()); - } - - private static Scope getCurrentClusterScope(Class<?> clazz) { - ClusterScope annotation = getAnnotation(clazz, ClusterScope.class); - // if we are not annotated assume suite! - return annotation == null ? Scope.SUITE : annotation.scope(); - } - private boolean getSupportsDedicatedClusterManagers() { ClusterScope annotation = getAnnotation(this.getClass(), ClusterScope.class); return annotation == null ? true : annotation.supportsDedicatedMasters(); @@ -2008,6 +1883,9 @@ protected Settings nodeSettings(int nodeOrdinal) { .put(SearchService.LOW_LEVEL_CANCELLATION_SETTING.getKey(), randomBoolean()) .putList(DISCOVERY_SEED_HOSTS_SETTING.getKey()) // empty list disables a port scan for other nodes .putList(DISCOVERY_SEED_PROVIDERS_SETTING.getKey(), "file") + // By default, for tests we will put the target slice count of 2. This will increase the probability of having multiple slices + // when tests are run with concurrent segment search enabled + .put(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_KEY, 2) .put(featureFlagSettings()); // Enable tracer only when Telemetry Setting is enabled @@ -2015,14 +1893,25 @@ protected Settings nodeSettings(int nodeOrdinal) { builder.put(TelemetrySettings.TRACER_FEATURE_ENABLED_SETTING.getKey(), true); builder.put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), true); } - if (FeatureFlags.CONCURRENT_SEGMENT_SEARCH_SETTING.get(featureFlagSettings)) { - // By default, for tests we will put the target slice count of 2. This will increase the probability of having multiple slices - // when tests are run with concurrent segment search enabled - builder.put(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_KEY, 2); + + // Randomly set a replication strategy for the node. Replication Strategy can still be manually overridden by subclass if needed. + if (useRandomReplicationStrategy()) { + ReplicationType replicationType = randomBoolean() ? ReplicationType.DOCUMENT : ReplicationType.SEGMENT; + logger.info("Randomly using Replication Strategy as {}.", replicationType.toString()); + builder.put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), replicationType); } return builder.build(); } + /** + * Used for selecting random replication strategy, either DOCUMENT or SEGMENT. + * This method must be overridden by subclass to use random replication strategy. + * Should be used only on test classes where replication strategy is not critical for tests. + */ + protected boolean useRandomReplicationStrategy() { + return false; + } + protected Path nodeConfigPath(int nodeOrdinal) { return null; } @@ -2270,10 +2159,9 @@ public <T extends TransportRequest> TransportRequestHandler<T> interceptHandler( * Returns path to a random directory that can be used to create a temporary file system repo */ public Path randomRepoPath() { - if (currentCluster instanceof InternalTestCluster) { - return randomRepoPath(((InternalTestCluster) currentCluster).getDefaultSettings()); - } - throw new UnsupportedOperationException("unsupported cluster type"); + return testClusterRule.internalCluster() + .map(c -> randomRepoPath(c.getDefaultSettings())) + .orElseThrow(() -> new UnsupportedOperationException("unsupported cluster type")); } /** @@ -2347,83 +2235,9 @@ private NumShards(int numPrimaries, int numReplicas) { } } - private static boolean runTestScopeLifecycle() { - return INSTANCE == null; - } - - @Before - public final void setupTestCluster() throws Exception { - if (runTestScopeLifecycle()) { - printTestMessage("setting up"); - beforeInternal(); - printTestMessage("all set up"); - } - } - - @After - public final void cleanUpCluster() throws Exception { - // Deleting indices is going to clear search contexts implicitly so we - // need to check that there are no more in-flight search contexts before - // we remove indices - if (isInternalCluster()) { - internalCluster().setBootstrapClusterManagerNodeIndex(-1); - } - super.ensureAllSearchContextsReleased(); - if (runTestScopeLifecycle()) { - printTestMessage("cleaning up after"); - afterInternal(false); - printTestMessage("cleaned up after"); - } - } - @AfterClass public static void afterClass() throws Exception { - try { - if (runTestScopeLifecycle()) { - clearClusters(); - } else { - INSTANCE.printTestMessage("cleaning up after"); - INSTANCE.afterInternal(true); - checkStaticState(true); - } - StrictCheckSpanProcessor.validateTracingStateOnShutdown(); - } finally { - SUITE_SEED = null; - currentCluster = null; - INSTANCE = null; - } - } - - private static void initializeSuiteScope() throws Exception { - Class<?> targetClass = getTestClass(); - /* - Note we create these test class instance via reflection - since JUnit creates a new instance per test and that is also - the reason why INSTANCE is static since this entire method - must be executed in a static context. - */ - assert INSTANCE == null; - if (isSuiteScopedTest(targetClass)) { - // note we need to do this way to make sure this is reproducible - if (isSuiteScopedTestParameterized(targetClass)) { - INSTANCE = (OpenSearchIntegTestCase) targetClass.getConstructor(Settings.class).newInstance(Settings.EMPTY); - } else { - INSTANCE = (OpenSearchIntegTestCase) targetClass.getConstructor().newInstance(); - } - boolean success = false; - try { - INSTANCE.printTestMessage("setup"); - INSTANCE.beforeInternal(); - INSTANCE.setupSuiteScopeCluster(); - success = true; - } finally { - if (!success) { - afterClass(); - } - } - } else { - INSTANCE = null; - } + testClusterRule.afterClass(); } /** @@ -2455,41 +2269,8 @@ protected boolean forbidPrivateIndexSettings() { * The returned client gets automatically closed when needed, it shouldn't be closed as part of tests otherwise * it cannot be reused by other tests anymore. */ - protected static synchronized RestClient getRestClient() { - if (restClient == null) { - restClient = createRestClient(); - } - return restClient; - } - - protected static RestClient createRestClient() { - return createRestClient(null, "http"); - } - - protected static RestClient createRestClient(RestClientBuilder.HttpClientConfigCallback httpClientConfigCallback, String protocol) { - NodesInfoResponse nodesInfoResponse = client().admin().cluster().prepareNodesInfo().get(); - assertFalse(nodesInfoResponse.hasFailures()); - return createRestClient(nodesInfoResponse.getNodes(), httpClientConfigCallback, protocol); - } - - protected static RestClient createRestClient( - final List<NodeInfo> nodes, - RestClientBuilder.HttpClientConfigCallback httpClientConfigCallback, - String protocol - ) { - List<HttpHost> hosts = new ArrayList<>(); - for (NodeInfo node : nodes) { - if (node.getInfo(HttpInfo.class) != null) { - TransportAddress publishAddress = node.getInfo(HttpInfo.class).address().publishAddress(); - InetSocketAddress address = publishAddress.address(); - hosts.add(new HttpHost(protocol, NetworkAddress.format(address.getAddress()), address.getPort())); - } - } - RestClientBuilder builder = RestClient.builder(hosts.toArray(new HttpHost[0])); - if (httpClientConfigCallback != null) { - builder.setHttpClientConfigCallback(httpClientConfigCallback); - } - return builder.build(); + protected static RestClient getRestClient() { + return testClusterRule.getRestClient(); } /** @@ -2500,20 +2281,6 @@ protected static RestClient createRestClient( */ protected void setupSuiteScopeCluster() throws Exception {} - private static boolean isSuiteScopedTest(Class<?> clazz) { - return clazz.getAnnotation(SuiteScopeTestCase.class) != null; - } - - /* - * For tests defined with, SuiteScopeTestCase return true if the - * class has a constructor that takes a single Settings parameter - * */ - private static boolean isSuiteScopedTestParameterized(Class<?> clazz) { - return Arrays.stream(clazz.getConstructors()) - .filter(x -> x.getParameterTypes().length == 1) - .anyMatch(x -> x.getParameterTypes()[0].equals(Settings.class)); - } - /** * If a test is annotated with {@link SuiteScopeTestCase} * the checks and modifications that are applied to the used test cluster are only done after all tests @@ -2622,4 +2389,210 @@ protected ClusterState getClusterState() { return client(internalCluster().getClusterManagerName()).admin().cluster().prepareState().get().getState(); } + /** + * Refreshes the indices in the cluster and waits until active/started replica shards + * are caught up with primary shard only when Segment Replication is enabled. + * This doesn't wait for inactive/non-started replica shards to become active/started. + */ + protected RefreshResponse refreshAndWaitForReplication(String... indices) { + RefreshResponse refreshResponse = refresh(indices); + waitForReplication(); + return refreshResponse; + } + + /** + * Waits until active/started replica shards are caught up with primary shard only when Segment Replication is enabled. + * This doesn't wait for inactive/non-started replica shards to become active/started. + */ + protected void waitForReplication(String... indices) { + if (indices.length == 0) { + indices = getClusterState().routingTable().indicesRouting().keySet().toArray(String[]::new); + } + try { + for (String index : indices) { + if (isSegmentReplicationEnabledForIndex(index)) { + if (isInternalCluster()) { + IndexRoutingTable indexRoutingTable = getClusterState().routingTable().index(index); + if (indexRoutingTable != null) { + assertBusy(() -> { + for (IndexShardRoutingTable shardRoutingTable : indexRoutingTable) { + final ShardRouting primaryRouting = shardRoutingTable.primaryShard(); + if (primaryRouting.state().toString().equals("STARTED")) { + if (isSegmentReplicationEnabledForIndex(index)) { + final List<ShardRouting> replicaRouting = shardRoutingTable.replicaShards(); + final IndexShard primaryShard = getIndexShard(primaryRouting, index); + for (ShardRouting replica : replicaRouting) { + if (replica.state().toString().equals("STARTED")) { + IndexShard replicaShard = getIndexShard(replica, index); + assertEquals( + "replica shards haven't caught up with primary", + getLatestSegmentInfoVersion(primaryShard), + getLatestSegmentInfoVersion(replicaShard) + ); + } + } + } + } + } + }, 30, TimeUnit.SECONDS); + } + } else { + throw new IllegalStateException( + "Segment Replication is not supported for testing tests using External Test Cluster" + ); + } + } + } + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + /** + * Checks if Segment Replication is enabled on Index. + */ + protected boolean isSegmentReplicationEnabledForIndex(String index) { + return clusterService().state().getMetadata().isSegmentReplicationEnabled(index); + } + + protected IndexShard getIndexShard(ShardRouting routing, String indexName) { + return getIndexShard(getClusterState().nodes().get(routing.currentNodeId()).getName(), routing.shardId(), indexName); + } + + /** + * Fetch IndexShard by shardId, multiple shards per node allowed. + */ + protected IndexShard getIndexShard(String node, ShardId shardId, String indexName) { + final Index index = resolveIndex(indexName); + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node); + IndexService indexService = indicesService.indexServiceSafe(index); + final Optional<Integer> id = indexService.shardIds().stream().filter(sid -> sid.equals(shardId.id())).findFirst(); + return indexService.getShard(id.get()); + } + + /** + * Fetch latest segment info snapshot version of an index. + */ + protected long getLatestSegmentInfoVersion(IndexShard shard) { + try (final GatedCloseable<SegmentInfos> snapshot = shard.getSegmentInfosSnapshot()) { + return snapshot.get().version; + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + public static Settings remoteStoreClusterSettings(String name, Path path) { + return remoteStoreClusterSettings(name, path, name, path); + } + + public static Settings remoteStoreClusterSettings( + String segmentRepoName, + Path segmentRepoPath, + String segmentRepoType, + String translogRepoName, + Path translogRepoPath, + String translogRepoType + ) { + Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.put( + buildRemoteStoreNodeAttributes( + segmentRepoName, + segmentRepoPath, + segmentRepoType, + translogRepoName, + translogRepoPath, + translogRepoType, + false + ) + ); + return settingsBuilder.build(); + } + + public static Settings remoteStoreClusterSettings( + String segmentRepoName, + Path segmentRepoPath, + String translogRepoName, + Path translogRepoPath + ) { + Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.put(buildRemoteStoreNodeAttributes(segmentRepoName, segmentRepoPath, translogRepoName, translogRepoPath, false)); + return settingsBuilder.build(); + } + + public static Settings buildRemoteStoreNodeAttributes( + String segmentRepoName, + Path segmentRepoPath, + String translogRepoName, + Path translogRepoPath, + boolean withRateLimiterAttributes + ) { + return buildRemoteStoreNodeAttributes( + segmentRepoName, + segmentRepoPath, + ReloadableFsRepository.TYPE, + translogRepoName, + translogRepoPath, + ReloadableFsRepository.TYPE, + withRateLimiterAttributes + ); + } + + public static Settings buildRemoteStoreNodeAttributes( + String segmentRepoName, + Path segmentRepoPath, + String segmentRepoType, + String translogRepoName, + Path translogRepoPath, + String translogRepoType, + boolean withRateLimiterAttributes + ) { + String segmentRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + segmentRepoName + ); + String segmentRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + segmentRepoName + ); + String translogRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + translogRepoName + ); + String translogRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + translogRepoName + ); + String stateRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + segmentRepoName + ); + String stateRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + segmentRepoName + ); + + Settings.Builder settings = Settings.builder() + .put("node.attr." + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, segmentRepoName) + .put(segmentRepoTypeAttributeKey, segmentRepoType) + .put(segmentRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath) + .put("node.attr." + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, translogRepoName) + .put(translogRepoTypeAttributeKey, translogRepoType) + .put(translogRepoSettingsAttributeKeyPrefix + "location", translogRepoPath) + .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, segmentRepoName) + .put(stateRepoTypeAttributeKey, segmentRepoType) + .put(stateRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath); + + if (withRateLimiterAttributes) { + settings.put(segmentRepoSettingsAttributeKeyPrefix + "compress", randomBoolean()) + .put(segmentRepoSettingsAttributeKeyPrefix + "chunk_size", 200, ByteSizeUnit.BYTES); + } + return settings.build(); + } + } diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java index efc29d1c254e6..45ea63e862df6 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java @@ -255,13 +255,11 @@ private Node newNode() { .put(FeatureFlags.TELEMETRY_SETTING.getKey(), true) .put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), true) .put(TelemetrySettings.TRACER_FEATURE_ENABLED_SETTING.getKey(), true) - .put(nodeSettings()) // allow test cases to provide their own settings or override these - .put(featureFlagSettings); - if (FeatureFlags.CONCURRENT_SEGMENT_SEARCH_SETTING.get(featureFlagSettings)) { // By default, for tests we will put the target slice count of 2. This will increase the probability of having multiple slices // when tests are run with concurrent segment search enabled - settingsBuilder.put(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_KEY, 2); - } + .put(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_KEY, 2) + .put(nodeSettings()) // allow test cases to provide their own settings or override these + .put(featureFlagSettings); Collection<Class<? extends Plugin>> plugins = getPlugins(); if (plugins.contains(getTestTransportPlugin()) == false) { diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java index b5ff30deecf5c..aac3fca9e1e16 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java @@ -83,6 +83,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateUtils; import org.opensearch.common.time.FormatNames; +import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; import org.opensearch.common.util.concurrent.ThreadContext; @@ -144,6 +145,8 @@ import java.io.IOException; import java.io.InputStream; +import java.io.PrintWriter; +import java.io.StringWriter; import java.math.BigInteger; import java.net.InetAddress; import java.net.UnknownHostException; @@ -169,6 +172,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BooleanSupplier; import java.util.function.Consumer; +import java.util.function.Function; import java.util.function.IntFunction; import java.util.function.Predicate; import java.util.function.Supplier; @@ -638,7 +642,32 @@ protected static void checkStaticState(boolean afterClass) throws Exception { try { // ensure that there are no status logger messages which would indicate a problem with our Log4j usage; we map the // StatusData instances to Strings as otherwise their toString output is useless + + final Function<StatusData, String> statusToString = (statusData) -> { + try (final StringWriter sw = new StringWriter(); final PrintWriter pw = new PrintWriter(sw)) { + + pw.print(statusData.getLevel()); + pw.print(":"); + pw.print(statusData.getMessage().getFormattedMessage()); + + if (statusData.getStackTraceElement() != null) { + final var messageSource = statusData.getStackTraceElement(); + pw.println("Source:"); + pw.println(messageSource.getFileName() + "@" + messageSource.getLineNumber()); + } + + if (statusData.getThrowable() != null) { + pw.println("Throwable:"); + statusData.getThrowable().printStackTrace(pw); + } + return sw.toString(); + } catch (IOException ioe) { + throw new RuntimeException(ioe); + } + }; + assertThat( + statusData.stream().map(statusToString::apply).collect(Collectors.joining("\r\n")), statusData.stream().map(status -> status.getMessage().getFormattedMessage()).collect(Collectors.toList()), empty() ); @@ -1095,6 +1124,38 @@ public static void assertBusy(CheckedRunnable<Exception> codeBlock, long maxWait } } + /** + * Runs the code block for the provided max wait time and sleeping for fixed sleep time, waiting for no assertions to trip. + */ + public static void assertBusyWithFixedSleepTime(CheckedRunnable<Exception> codeBlock, TimeValue maxWaitTime, TimeValue sleepTime) + throws Exception { + long maxTimeInMillis = maxWaitTime.millis(); + long sleepTimeInMillis = sleepTime.millis(); + if (sleepTimeInMillis > maxTimeInMillis) { + throw new IllegalArgumentException("sleepTime is more than the maxWaitTime"); + } + long sum = 0; + List<AssertionError> failures = new ArrayList<>(); + while (sum <= maxTimeInMillis) { + try { + codeBlock.run(); + return; + } catch (AssertionError e) { + failures.add(e); + } + sum += sleepTimeInMillis; + Thread.sleep(sleepTimeInMillis); + } + try { + codeBlock.run(); + } catch (AssertionError e) { + for (AssertionError failure : failures) { + e.addSuppressed(failure); + } + throw e; + } + } + /** * Periodically execute the supplied function until it returns true, or a timeout * is reached. This version uses a timeout of 10 seconds. If at all possible, diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestClusterRule.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestClusterRule.java new file mode 100644 index 0000000000000..57e9ccf22ab43 --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestClusterRule.java @@ -0,0 +1,428 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test; + +import com.carrotsearch.randomizedtesting.RandomizedContext; + +import org.apache.hc.core5.http.HttpHost; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.admin.cluster.node.info.NodeInfo; +import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.opensearch.client.Client; +import org.opensearch.client.RestClient; +import org.opensearch.client.RestClientBuilder; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.common.Nullable; +import org.opensearch.common.network.NetworkAddress; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.http.HttpInfo; +import org.opensearch.rest.action.RestCancellableNodeClient; +import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; +import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import org.opensearch.test.OpenSearchIntegTestCase.SuiteScopeTestCase; +import org.opensearch.test.client.RandomizingClient; +import org.opensearch.test.telemetry.tracing.StrictCheckSpanProcessor; +import org.junit.rules.MethodRule; +import org.junit.runners.model.FrameworkMethod; +import org.junit.runners.model.MultipleFailureException; +import org.junit.runners.model.Statement; + +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.IdentityHashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.Callable; + +import static org.hamcrest.Matchers.empty; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThat; + +/** + * The JUnit {@link MethodRule} that handles test method scoped and test suite scoped clusters for integration (internal cluster) tests. There rule is + * injected into {@link OpenSearchIntegTestCase} that every integration test suite should be subclassing. In case of the parameterized test suites, + * please subclass {@link ParameterizedStaticSettingsOpenSearchIntegTestCase} or {@link ParameterizedDynamicSettingsOpenSearchIntegTestCase}, depending + * on the way cluster settings are being managed. + */ +class OpenSearchTestClusterRule implements MethodRule { + // Maps each TestCluster instance to the exact test suite instance that triggered its creation + private final Map<TestCluster, OpenSearchIntegTestCase> suites = new IdentityHashMap<>(); + private final Map<Class<?>, TestCluster> clusters = new IdentityHashMap<>(); + private final Logger logger = LogManager.getLogger(getClass()); + + /** + * The current cluster depending on the configured {@link Scope}. + * By default if no {@link ClusterScope} is configured this will hold a reference to the suite cluster. + */ + private TestCluster currentCluster = null; + private RestClient restClient = null; + + private OpenSearchIntegTestCase suiteInstance = null; // see @SuiteScope + private Long suiteSeed = null; + + @Override + public Statement apply(Statement base, FrameworkMethod method, Object target) { + return statement(base, method, target); + } + + void beforeClass() throws Exception { + suiteSeed = OpenSearchTestCase.randomLong(); + } + + void afterClass() throws Exception { + try { + if (runTestScopeLifecycle()) { + clearClusters(); + } else { + printTestMessage("cleaning up after"); + afterInternal(true, null); + OpenSearchTestCase.checkStaticState(true); + synchronized (clusters) { + final TestCluster cluster = clusters.remove(getTestClass()); + IOUtils.closeWhileHandlingException(cluster); + if (cluster != null) { + suites.remove(cluster); + } + } + } + StrictCheckSpanProcessor.validateTracingStateOnShutdown(); + } finally { + suiteSeed = null; + currentCluster = null; + suiteInstance = null; + } + } + + TestCluster cluster() { + return currentCluster; + } + + boolean isInternalCluster() { + return (cluster() instanceof InternalTestCluster); + } + + Optional<InternalTestCluster> internalCluster() { + if (!isInternalCluster()) { + return Optional.empty(); + } else { + return Optional.of((InternalTestCluster) cluster()); + } + } + + Client clientForAnyNode() { + return clientForNode(null); + } + + Client clientForNode(@Nullable String node) { + if (node != null) { + return internalCluster().orElseThrow(() -> new UnsupportedOperationException("current test cluster is immutable")).client(node); + } + Client client = cluster().client(); + if (OpenSearchTestCase.frequently()) { + client = new RandomizingClient(client, OpenSearchTestCase.random()); + } + return client; + } + + synchronized RestClient getRestClient() { + if (restClient == null) { + restClient = createRestClient(); + } + return restClient; + } + + protected final void beforeInternal(OpenSearchIntegTestCase target) throws Exception { + final Scope currentClusterScope = getClusterScope(target.getClass()); + Callable<Void> setup = () -> { + currentCluster.beforeTest(OpenSearchTestCase.random()); + currentCluster.wipe(target.excludeTemplates()); + target.randomIndexTemplate(); + return null; + }; + switch (currentClusterScope) { + case SUITE: + assert suiteSeed != null : "Suite seed was not initialized"; + currentCluster = buildAndPutCluster(currentClusterScope, suiteSeed, target); + RandomizedContext.current().runWithPrivateRandomness(suiteSeed, setup); + break; + case TEST: + currentCluster = buildAndPutCluster(currentClusterScope, OpenSearchTestCase.randomLong(), target); + setup.call(); + break; + } + } + + protected void before(Object target, FrameworkMethod method) throws Throwable { + final OpenSearchIntegTestCase instance = (OpenSearchIntegTestCase) target; + initializeSuiteScope(instance, method); + + if (runTestScopeLifecycle()) { + printTestMessage("setting up", method); + beforeInternal(instance); + printTestMessage("all set up", method); + } + } + + protected void after(Object target, FrameworkMethod method) throws Exception { + final OpenSearchIntegTestCase instance = (OpenSearchIntegTestCase) target; + + // Deleting indices is going to clear search contexts implicitly so we + // need to check that there are no more in-flight search contexts before + // we remove indices + internalCluster().ifPresent(c -> c.setBootstrapClusterManagerNodeIndex(-1)); + + instance.ensureAllSearchContextsReleased(); + if (runTestScopeLifecycle()) { + printTestMessage("cleaning up after", method); + afterInternal(false, instance); + printTestMessage("cleaned up after", method); + } + } + + protected RestClient createRestClient() { + return createRestClient(null, "http"); + } + + protected RestClient createRestClient(RestClientBuilder.HttpClientConfigCallback httpClientConfigCallback, String protocol) { + NodesInfoResponse nodesInfoResponse = clientForAnyNode().admin().cluster().prepareNodesInfo().get(); + assertFalse(nodesInfoResponse.hasFailures()); + return createRestClient(nodesInfoResponse.getNodes(), httpClientConfigCallback, protocol); + } + + protected RestClient createRestClient( + final List<NodeInfo> nodes, + RestClientBuilder.HttpClientConfigCallback httpClientConfigCallback, + String protocol + ) { + List<HttpHost> hosts = new ArrayList<>(); + for (NodeInfo node : nodes) { + if (node.getInfo(HttpInfo.class) != null) { + TransportAddress publishAddress = node.getInfo(HttpInfo.class).address().publishAddress(); + InetSocketAddress address = publishAddress.address(); + hosts.add(new HttpHost(protocol, NetworkAddress.format(address.getAddress()), address.getPort())); + } + } + RestClientBuilder builder = RestClient.builder(hosts.toArray(new HttpHost[0])); + if (httpClientConfigCallback != null) { + builder.setHttpClientConfigCallback(httpClientConfigCallback); + } + return builder.build(); + } + + private Scope getClusterScope(Class<?> clazz) { + ClusterScope annotation = OpenSearchIntegTestCase.getAnnotation(clazz, ClusterScope.class); + // if we are not annotated assume suite! + return annotation == null ? Scope.SUITE : annotation.scope(); + } + + private TestCluster buildWithPrivateContext(final Scope scope, final long seed, OpenSearchIntegTestCase target) throws Exception { + return RandomizedContext.current().runWithPrivateRandomness(seed, () -> target.buildTestCluster(scope, seed)); + } + + private static boolean isSuiteScopedTest(Class<?> clazz) { + return clazz.getAnnotation(SuiteScopeTestCase.class) != null; + } + + private static boolean hasParametersChanged( + final ParameterizedOpenSearchIntegTestCase instance, + final ParameterizedOpenSearchIntegTestCase target + ) { + return !instance.hasSameParametersAs(target); + } + + private boolean runTestScopeLifecycle() { + return suiteInstance == null; + } + + private TestCluster buildAndPutCluster(Scope currentClusterScope, long seed, OpenSearchIntegTestCase target) throws Exception { + final Class<?> clazz = target.getClass(); + + synchronized (clusters) { + TestCluster testCluster = clusters.remove(clazz); // remove this cluster first + clearClusters(); // all leftovers are gone by now... this is really just a double safety if we miss something somewhere + switch (currentClusterScope) { + case SUITE: + if (testCluster != null && target instanceof ParameterizedOpenSearchIntegTestCase) { + final OpenSearchIntegTestCase instance = suites.get(testCluster); + if (instance != null) { + assert instance instanceof ParameterizedOpenSearchIntegTestCase; + if (hasParametersChanged( + (ParameterizedOpenSearchIntegTestCase) instance, + (ParameterizedOpenSearchIntegTestCase) target + )) { + IOUtils.closeWhileHandlingException(testCluster); + printTestMessage("new instance of parameterized test class, recreating test cluster for suite"); + testCluster = null; + } + } + } + + if (testCluster == null) { // only build if it's not there yet + testCluster = buildWithPrivateContext(currentClusterScope, seed, target); + suites.put(testCluster, target); + } + break; + case TEST: + // close the previous one and create a new one + IOUtils.closeWhileHandlingException(testCluster); + testCluster = target.buildTestCluster(currentClusterScope, seed); + break; + } + clusters.put(clazz, testCluster); + return testCluster; + } + } + + private void printTestMessage(String message) { + logger.info("[{}]: {} suite", getTestClass().getSimpleName(), message); + } + + private static Class<?> getTestClass() { + return OpenSearchTestCase.getTestClass(); + } + + private void printTestMessage(String message, FrameworkMethod method) { + logger.info("[{}#{}]: {} test", getTestClass().getSimpleName(), method.getName(), message); + } + + private void afterInternal(boolean afterClass, OpenSearchIntegTestCase target) throws Exception { + final Scope currentClusterScope = getClusterScope(getTestClass()); + internalCluster().ifPresent(InternalTestCluster::clearDisruptionScheme); + + OpenSearchIntegTestCase instance = suiteInstance; + if (instance == null) { + instance = target; + } + + try { + if (cluster() != null) { + if (currentClusterScope != Scope.TEST) { + Metadata metadata = clientForAnyNode().admin().cluster().prepareState().execute().actionGet().getState().getMetadata(); + + final Set<String> persistentKeys = new HashSet<>(metadata.persistentSettings().keySet()); + assertThat("test leaves persistent cluster metadata behind", persistentKeys, empty()); + + final Set<String> transientKeys = new HashSet<>(metadata.transientSettings().keySet()); + assertThat("test leaves transient cluster metadata behind", transientKeys, empty()); + } + instance.ensureClusterSizeConsistency(); + instance.ensureClusterStateConsistency(); + instance.ensureClusterStateCanBeReadByNodeTool(); + instance.beforeIndexDeletion(); + cluster().wipe(instance.excludeTemplates()); // wipe after to make sure we fail in the test that didn't ack the delete + if (afterClass || currentClusterScope == Scope.TEST) { + cluster().close(); + } + cluster().assertAfterTest(); + } + } finally { + if (currentClusterScope == Scope.TEST) { + clearClusters(); // it is ok to leave persistent / transient cluster state behind if scope is TEST + } + } + } + + private void clearClusters() throws Exception { + synchronized (clusters) { + if (!clusters.isEmpty()) { + IOUtils.close(clusters.values()); + suites.clear(); + clusters.clear(); + } + } + if (restClient != null) { + restClient.close(); + restClient = null; + } + OpenSearchTestCase.assertBusy(() -> { + int numChannels = RestCancellableNodeClient.getNumChannels(); + OpenSearchTestCase.assertEquals( + numChannels + + " channels still being tracked in " + + RestCancellableNodeClient.class.getSimpleName() + + " while there should be none", + 0, + numChannels + ); + }); + } + + private Statement statement(final Statement base, FrameworkMethod method, Object target) { + return new Statement() { + @Override + public void evaluate() throws Throwable { + before(target, method); + + List<Throwable> errors = new ArrayList<Throwable>(); + try { + base.evaluate(); + } catch (Throwable t) { + errors.add(t); + } finally { + try { + after(target, method); + } catch (Throwable t) { + errors.add(t); + } + } + MultipleFailureException.assertEmpty(errors); + } + }; + } + + private void initializeSuiteScope(OpenSearchIntegTestCase target, FrameworkMethod method) throws Exception { + final Class<?> targetClass = getTestClass(); + /* + Note we create these test class instance via reflection + since JUnit creates a new instance per test. + */ + if (suiteInstance != null) { + // Catching the case when parameterized test cases are run: the test class stays the same but the test instances changes. + if (target instanceof ParameterizedOpenSearchIntegTestCase) { + assert suiteInstance instanceof ParameterizedOpenSearchIntegTestCase; + if (hasParametersChanged( + (ParameterizedOpenSearchIntegTestCase) suiteInstance, + (ParameterizedOpenSearchIntegTestCase) target + )) { + printTestMessage("new instance of parameterized test class, recreating cluster scope", method); + afterClass(); + beforeClass(); + } else { + return; /* same test class instance */ + } + } else { + return; /* not a parameterized test */ + } + } + + assert suiteInstance == null; + if (isSuiteScopedTest(targetClass)) { + suiteInstance = target; + + boolean success = false; + try { + printTestMessage("setup", method); + beforeInternal(target); + suiteInstance.setupSuiteScopeCluster(); + success = true; + } finally { + if (!success) { + afterClass(); + } + } + } else { + suiteInstance = null; + } + } +} diff --git a/test/framework/src/main/java/org/opensearch/test/ParameterizedDynamicSettingsOpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/ParameterizedDynamicSettingsOpenSearchIntegTestCase.java new file mode 100644 index 0000000000000..b31dfa2bdefa5 --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/test/ParameterizedDynamicSettingsOpenSearchIntegTestCase.java @@ -0,0 +1,66 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test; + +import org.opensearch.common.settings.Settings; +import org.opensearch.common.settings.SettingsModule; +import org.junit.After; +import org.junit.Before; + +/** + * Base class for running the tests with parameterization using dynamic settings: the cluster will be created once before the test suite and the + * settings will be applied dynamically, please notice that not all settings could be changed dynamically (consider using {@link ParameterizedStaticSettingsOpenSearchIntegTestCase} + * instead). + * <p> + * Here is the simple illustration on of the execution flow per parameters combination: + * <ul> + * <li><b>suite scope</b>: create cluster -> for each test method { apply settings -> run test method -> unapply settings } -> shutdown cluster</li> + * <li><b>test scope</b>: for each test method { create cluster -> apply settings -> run test method -> unapply settings -> shutdown cluster }</li> + * </ul> + */ +public abstract class ParameterizedDynamicSettingsOpenSearchIntegTestCase extends ParameterizedOpenSearchIntegTestCase { + public ParameterizedDynamicSettingsOpenSearchIntegTestCase(Settings dynamicSettings) { + super(dynamicSettings); + } + + @Before + public void beforeTests() { + SettingsModule settingsModule = new SettingsModule(settings); + for (String key : settings.keySet()) { + assertTrue( + settingsModule.getClusterSettings().isDynamicSetting(key) || settingsModule.getIndexScopedSettings().isDynamicSetting(key) + ); + } + client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings).get(); + } + + @After + public void afterTests() { + final Settings.Builder settingsToUnset = Settings.builder(); + settings.keySet().forEach(settingsToUnset::putNull); + client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settingsToUnset).get(); + } + + @Override + boolean hasSameParametersAs(ParameterizedOpenSearchIntegTestCase obj) { + if (this == obj) { + return true; + } + + if (obj == null) { + return false; + } + + if (getClass() != obj.getClass()) { + return false; + } + + return true; + } +} diff --git a/test/framework/src/main/java/org/opensearch/test/ParameterizedOpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/ParameterizedOpenSearchIntegTestCase.java index edda6bf5603f7..23316adf6a2d7 100644 --- a/test/framework/src/main/java/org/opensearch/test/ParameterizedOpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/ParameterizedOpenSearchIntegTestCase.java @@ -9,48 +9,43 @@ package org.opensearch.test; import org.opensearch.common.settings.Settings; -import org.opensearch.common.settings.SettingsModule; -import org.junit.After; -import org.junit.Before; import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; /** - * Base class for running the tests with parameterization of the dynamic settings - * For any class that wants to use parameterization, use @ParametersFactory to generate - * different params only for dynamic settings. Refer SearchCancellationIT for an example. - * Note: this doesn't work for the parameterization of feature flag/static settings. + * Base class for running the tests with parameterization of the settings. + * For any class that wants to use parameterization, use {@link com.carrotsearch.randomizedtesting.annotations.ParametersFactory} to generate + * different parameters. + * + * There are two flavors of applying the parameterized settings to the cluster on the suite level: + * - static: the cluster will be pre-created with the settings at startup, please subclass {@link ParameterizedStaticSettingsOpenSearchIntegTestCase}, the method + * {@link #hasSameParametersAs(ParameterizedOpenSearchIntegTestCase)} is being used by the test scaffolding to detect when the test suite is instantiated with + * the new parameters and the test cluster has to be recreated + * - dynamic: the cluster will be created once before the test suite and the settings will be applied dynamically , please subclass {@link ParameterizedDynamicSettingsOpenSearchIntegTestCase}, + * please notice that not all settings could be changed dynamically + * + * If the test suites use per-test level, the cluster will be recreated per each test method (applying static or dynamic settings). */ -public abstract class ParameterizedOpenSearchIntegTestCase extends OpenSearchIntegTestCase { - - private final Settings dynamicSettings; +abstract class ParameterizedOpenSearchIntegTestCase extends OpenSearchIntegTestCase { + protected final Settings settings; - public ParameterizedOpenSearchIntegTestCase(Settings dynamicSettings) { - this.dynamicSettings = dynamicSettings; - } - - @Before - public void beforeTests() { - SettingsModule settingsModule = new SettingsModule(dynamicSettings); - for (String key : dynamicSettings.keySet()) { - assertTrue( - settingsModule.getClusterSettings().isDynamicSetting(key) || settingsModule.getIndexScopedSettings().isDynamicSetting(key) - ); - } - client().admin().cluster().prepareUpdateSettings().setPersistentSettings(dynamicSettings).get(); - } - - @After - public void afterTests() { - final Settings.Builder settingsToUnset = Settings.builder(); - dynamicSettings.keySet().forEach(settingsToUnset::putNull); - client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settingsToUnset).get(); + ParameterizedOpenSearchIntegTestCase(Settings settings) { + this.settings = settings; } // This method shouldn't be called in setupSuiteScopeCluster(). Only call this method inside single test. public void indexRandomForConcurrentSearch(String... indices) throws InterruptedException { - if (dynamicSettings.get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey()).equals("true")) { + if (CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(settings)) { indexRandomForMultipleSlices(indices); } } + + /** + * Compares the parameters of the two {@link ParameterizedOpenSearchIntegTestCase} test suite instances. + * This method is being use by {@link OpenSearchTestClusterRule} to determine when the parameterized test suite is instantiated with + * another set of parameters and the test cluster has to be recreated to reflect that. + * @param obj instance of the {@link ParameterizedOpenSearchIntegTestCase} to compare with + * @return {@code true} of the parameters of the test suites are the same, {@code false} otherwise + */ + abstract boolean hasSameParametersAs(ParameterizedOpenSearchIntegTestCase obj); } diff --git a/test/framework/src/main/java/org/opensearch/test/ParameterizedStaticSettingsOpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/ParameterizedStaticSettingsOpenSearchIntegTestCase.java new file mode 100644 index 0000000000000..7d2c9ad686a01 --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/test/ParameterizedStaticSettingsOpenSearchIntegTestCase.java @@ -0,0 +1,80 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test; + +import org.opensearch.common.settings.Settings; +import org.opensearch.indices.replication.common.ReplicationType; + +import java.nio.file.Path; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; + +/** + * Base class for running the tests with parameterization with static settings: the cluster will be pre-created with the settings at startup, the method + * {@link #hasSameParametersAs(ParameterizedOpenSearchIntegTestCase)} is being used by the test scaffolding to detect when the test suite is instantiated with + * the new parameters and the test cluster has to be recreated. + * <p> + * Here is the simple illustration on of the execution flow per parameters combination: + * <ul> + * <li><b>suite scope</b>: create cluster -> for each test method { run test method } -> shutdown cluster</li> + * <li><b>test scope</b>: for each test method { create cluster -> run test method -> shutdown cluster }</li> + * </ul> + */ +public abstract class ParameterizedStaticSettingsOpenSearchIntegTestCase extends ParameterizedOpenSearchIntegTestCase { + + protected static final String REMOTE_STORE_REPOSITORY_NAME = "test-remote-store-repo"; + private Path remoteStoreRepositoryPath; + public static final List<Object[]> replicationSettings = Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.DOCUMENT).build() }, + new Object[] { Settings.builder().put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT).build() } + ); + + public ParameterizedStaticSettingsOpenSearchIntegTestCase(Settings nodeSettings) { + super(nodeSettings); + } + + public static final List<Object[]> remoteStoreSettings = Arrays.asList( + new Object[] { Settings.builder().put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true).build() }, + new Object[] { Settings.builder().put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), false).build() } + ); + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + Settings.Builder builder = Settings.builder(); + if (REMOTE_CLUSTER_STATE_ENABLED_SETTING.get(settings)) { + if (remoteStoreRepositoryPath == null) { + remoteStoreRepositoryPath = randomRepoPath().toAbsolutePath(); + } + builder.put(remoteStoreClusterSettings(REMOTE_STORE_REPOSITORY_NAME, remoteStoreRepositoryPath)); + } + return builder.put(super.nodeSettings(nodeOrdinal)).put(settings).build(); + } + + @Override + boolean hasSameParametersAs(ParameterizedOpenSearchIntegTestCase obj) { + if (this == obj) { + return true; + } + + if (obj == null) { + return false; + } + + if (getClass() != obj.getClass()) { + return false; + } + + final ParameterizedStaticSettingsOpenSearchIntegTestCase other = (ParameterizedStaticSettingsOpenSearchIntegTestCase) obj; + return Objects.equals(settings, other.settings); + } +} diff --git a/test/framework/src/main/java/org/opensearch/test/TestSearchContext.java b/test/framework/src/main/java/org/opensearch/test/TestSearchContext.java index 2fb345f73fb06..09a72dcdc3641 100644 --- a/test/framework/src/main/java/org/opensearch/test/TestSearchContext.java +++ b/test/framework/src/main/java/org/opensearch/test/TestSearchContext.java @@ -107,6 +107,7 @@ public class TestSearchContext extends SearchContext { SearchShardTask task; SortAndFormats sort; boolean trackScores = false; + boolean includeNamedQueriesScore = false; int trackTotalHitsUpTo = SearchContext.DEFAULT_TRACK_TOTAL_HITS_UP_TO; ContextIndexSearcher searcher; @@ -409,6 +410,17 @@ public boolean trackScores() { return trackScores; } + @Override + public SearchContext includeNamedQueriesScore(boolean includeNamedQueriesScore) { + this.includeNamedQueriesScore = includeNamedQueriesScore; + return this; + } + + @Override + public boolean includeNamedQueriesScore() { + return includeNamedQueriesScore; + } + @Override public SearchContext trackTotalHitsUpTo(int trackTotalHitsUpTo) { this.trackTotalHitsUpTo = trackTotalHitsUpTo; diff --git a/test/framework/src/main/java/org/opensearch/test/VersionUtils.java b/test/framework/src/main/java/org/opensearch/test/VersionUtils.java index 8fb9bc5cd7c1c..8ce5afab17c00 100644 --- a/test/framework/src/main/java/org/opensearch/test/VersionUtils.java +++ b/test/framework/src/main/java/org/opensearch/test/VersionUtils.java @@ -359,4 +359,14 @@ public static Version randomPreviousCompatibleVersion(Random random, Version ver // but 7.2.0 for minimum compat return randomVersionBetween(random, version.minimumIndexCompatibilityVersion(), getPreviousVersion(version)); } + + /** + * Returns a {@link Version} with a given major, minor and revision version. + * Build version is skipped for the sake of simplicity. + */ + public static Version getVersion(byte major, byte minor, byte revision) { + StringBuilder sb = new StringBuilder(); + sb.append(major).append('.').append(minor).append('.').append(revision); + return Version.fromString(sb.toString()); + } } diff --git a/test/framework/src/main/java/org/opensearch/test/disruption/LongGCDisruption.java b/test/framework/src/main/java/org/opensearch/test/disruption/LongGCDisruption.java index 44837c37962b4..168fbd5bd0d0a 100644 --- a/test/framework/src/main/java/org/opensearch/test/disruption/LongGCDisruption.java +++ b/test/framework/src/main/java/org/opensearch/test/disruption/LongGCDisruption.java @@ -252,7 +252,7 @@ public TimeValue expectedTimeToHeal() { * returns true if some live threads were found. The caller is expected to call this method * until no more "live" are found. */ - @SuppressWarnings("deprecation") // suspends/resumes threads intentionally + @SuppressWarnings({ "deprecation", "removal" }) // suspends/resumes threads intentionally @SuppressForbidden(reason = "suspends/resumes threads intentionally") protected boolean suspendThreads(Set<Thread> nodeThreads) { Thread[] allThreads = null; @@ -360,7 +360,7 @@ protected void onBlockDetected(ThreadInfo blockedThread, @Nullable ThreadInfo bl ); } - @SuppressWarnings("deprecation") // suspends/resumes threads intentionally + @SuppressWarnings({ "deprecation", "removal" }) // suspends/resumes threads intentionally @SuppressForbidden(reason = "suspends/resumes threads intentionally") protected void resumeThreads(Set<Thread> threads) { for (Thread thread : threads) { diff --git a/test/framework/src/main/java/org/opensearch/test/gateway/TestGatewayAllocator.java b/test/framework/src/main/java/org/opensearch/test/gateway/TestGatewayAllocator.java index f123b926f5bad..b1695ff00e0cc 100644 --- a/test/framework/src/main/java/org/opensearch/test/gateway/TestGatewayAllocator.java +++ b/test/framework/src/main/java/org/opensearch/test/gateway/TestGatewayAllocator.java @@ -42,6 +42,7 @@ import org.opensearch.gateway.GatewayAllocator; import org.opensearch.gateway.PrimaryShardAllocator; import org.opensearch.gateway.ReplicaShardAllocator; +import org.opensearch.gateway.TransportNodesGatewayStartedShardHelper; import org.opensearch.gateway.TransportNodesListGatewayStartedShards.NodeGatewayStartedShards; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.store.TransportNodesListShardStoreMetadata.NodeStoreFilesMetadata; @@ -91,9 +92,12 @@ protected AsyncShardFetch.FetchResult<NodeGatewayStartedShards> fetchData(ShardR routing -> currentNodes.get(routing.currentNodeId()), routing -> new NodeGatewayStartedShards( currentNodes.get(routing.currentNodeId()), - routing.allocationId().getId(), - routing.primary(), - getReplicationCheckpoint(shardId, routing.currentNodeId()) + new TransportNodesGatewayStartedShardHelper.GatewayStartedShard( + routing.allocationId().getId(), + routing.primary(), + getReplicationCheckpoint(shardId, routing.currentNodeId()), + null + ) ) ) ); diff --git a/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchAssertions.java b/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchAssertions.java index 183214c159c14..650558aaa97a6 100644 --- a/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchAssertions.java +++ b/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchAssertions.java @@ -304,6 +304,22 @@ public static void assertHitCount(SearchResponse countResponse, long expectedHit } } + public static void assertHitCount(SearchResponse countResponse, long minHitCount, long maxHitCount) { + final TotalHits totalHits = countResponse.getHits().getTotalHits(); + if (!(totalHits.relation == TotalHits.Relation.EQUAL_TO && totalHits.value >= minHitCount && totalHits.value <= maxHitCount)) { + fail( + "Count is " + + totalHits + + " not between " + + minHitCount + + " and " + + maxHitCount + + " inclusive. " + + formatShardStatus(countResponse) + ); + } + } + public static void assertExists(GetResponse response) { String message = String.format(Locale.ROOT, "Expected %s/%s to exist, but does not", response.getIndex(), response.getId()); assertThat(message, response.isExists(), is(true)); @@ -528,6 +544,10 @@ public static Matcher<SearchHit> hasScore(final float score) { return new OpenSearchMatchers.SearchHitHasScoreMatcher(score); } + public static Matcher<SearchHit> hasMatchedQueries(final String[] matchedQueries) { + return new OpenSearchMatchers.SearchHitMatchedQueriesMatcher(matchedQueries); + } + public static <T, V> CombinableMatcher<T> hasProperty(Function<? super T, ? extends V> property, Matcher<V> valueMatcher) { return OpenSearchMatchers.HasPropertyLambdaMatcher.hasProperty(property, valueMatcher); } diff --git a/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchMatchers.java b/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchMatchers.java index 5889b7e269ed2..2be94bd53e3c1 100644 --- a/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchMatchers.java +++ b/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchMatchers.java @@ -38,6 +38,7 @@ import org.hamcrest.TypeSafeMatcher; import org.hamcrest.core.CombinableMatcher; +import java.util.Arrays; import java.util.function.Function; public class OpenSearchMatchers { @@ -111,6 +112,35 @@ public void describeTo(final Description description) { } } + public static class SearchHitMatchedQueriesMatcher extends TypeSafeMatcher<SearchHit> { + private String[] matchedQueries; + + public SearchHitMatchedQueriesMatcher(String[] matchedQueries) { + this.matchedQueries = matchedQueries; + } + + @Override + protected boolean matchesSafely(SearchHit searchHit) { + String[] searchHitQueries = searchHit.getMatchedQueries(); + if (matchedQueries == null) { + return false; + } + Arrays.sort(searchHitQueries); + Arrays.sort(matchedQueries); + return Arrays.equals(searchHitQueries, matchedQueries); + } + + @Override + public void describeMismatchSafely(final SearchHit searchHit, final Description mismatchDescription) { + mismatchDescription.appendText(" matched queries were ").appendValue(Arrays.toString(searchHit.getMatchedQueries())); + } + + @Override + public void describeTo(final Description description) { + description.appendText("searchHit matched queries should be ").appendValue(Arrays.toString(matchedQueries)); + } + } + public static class HasPropertyLambdaMatcher<T, V> extends FeatureMatcher<T, V> { private final Function<? super T, ? extends V> property; diff --git a/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetry.java b/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetry.java index dda413ce2818e..4ba130343e889 100644 --- a/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetry.java +++ b/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetry.java @@ -11,11 +11,17 @@ import org.opensearch.telemetry.Telemetry; import org.opensearch.telemetry.TelemetrySettings; import org.opensearch.telemetry.metrics.Counter; +import org.opensearch.telemetry.metrics.Histogram; import org.opensearch.telemetry.metrics.MetricsTelemetry; import org.opensearch.telemetry.metrics.noop.NoopCounter; +import org.opensearch.telemetry.metrics.noop.NoopHistogram; +import org.opensearch.telemetry.metrics.tags.Tags; import org.opensearch.telemetry.tracing.TracingTelemetry; import org.opensearch.test.telemetry.tracing.MockTracingTelemetry; +import java.io.Closeable; +import java.util.function.Supplier; + /** * Mock {@link Telemetry} implementation for testing. */ @@ -46,6 +52,16 @@ public Counter createUpDownCounter(String name, String description, String unit) return NoopCounter.INSTANCE; } + @Override + public Histogram createHistogram(String name, String description, String unit) { + return NoopHistogram.INSTANCE; + } + + @Override + public Closeable createGauge(String name, String description, String unit, Supplier<Double> valueProvider, Tags tags) { + return () -> {}; + } + @Override public void close() { diff --git a/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java index 3b64e044e7bf0..e43b0756e2f2b 100644 --- a/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java @@ -1283,9 +1283,17 @@ public String executor() { Level.TRACE, notSeenReceived ); + final String notSeenResponseSent = ".*\\[internal:testNotSeen].*sent response.*"; + final MockLogAppender.LoggingExpectation notSeenResponseSentExpectation = new MockLogAppender.PatternSeenEventExpectation( + "sent response", + "org.opensearch.transport.TransportService.tracer", + Level.TRACE, + notSeenResponseSent + ); appender.addExpectation(notSeenSentExpectation); appender.addExpectation(notSeenReceivedExpectation); + appender.addExpectation(notSeenResponseSentExpectation); PlainTransportFuture<StringMessageResponse> future = new PlainTransportFuture<>(noopResponseHandler); serviceA.sendRequest(nodeB, "internal:testNotSeen", new StringMessageRequest(""), future); diff --git a/test/framework/src/test/java/org/opensearch/test/ParameterizedDynamicSettingsOpenSearchIntegTests.java b/test/framework/src/test/java/org/opensearch/test/ParameterizedDynamicSettingsOpenSearchIntegTests.java new file mode 100644 index 0000000000000..6dd14e06248a9 --- /dev/null +++ b/test/framework/src/test/java/org/opensearch/test/ParameterizedDynamicSettingsOpenSearchIntegTests.java @@ -0,0 +1,43 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.common.settings.Settings; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; + +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.hamcrest.CoreMatchers.equalTo; + +public class ParameterizedDynamicSettingsOpenSearchIntegTests extends ParameterizedDynamicSettingsOpenSearchIntegTestCase { + public ParameterizedDynamicSettingsOpenSearchIntegTests(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + public void testSettings() throws IOException { + final ClusterStateResponse cluster = client().admin().cluster().prepareState().all().get(); + assertThat( + CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(cluster.getState().getMetadata().settings()), + equalTo(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(settings)) + ); + } +} diff --git a/test/framework/src/test/java/org/opensearch/test/ParameterizedStaticSettingsOpenSearchIntegTests.java b/test/framework/src/test/java/org/opensearch/test/ParameterizedStaticSettingsOpenSearchIntegTests.java new file mode 100644 index 0000000000000..f38c1ecd26429 --- /dev/null +++ b/test/framework/src/test/java/org/opensearch/test/ParameterizedStaticSettingsOpenSearchIntegTests.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.admin.cluster.node.info.NodeInfo; +import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.opensearch.common.settings.Settings; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; + +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.hamcrest.CoreMatchers.equalTo; + +public class ParameterizedStaticSettingsOpenSearchIntegTests extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public ParameterizedStaticSettingsOpenSearchIntegTests(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + public void testSettings() throws IOException { + final NodesInfoResponse nodes = client().admin().cluster().prepareNodesInfo().get(); + for (final NodeInfo node : nodes.getNodes()) { + assertThat( + CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(node.getSettings()), + equalTo(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(settings)) + ); + } + } +} diff --git a/test/framework/src/test/java/org/opensearch/test/SuiteScopedParameterizedDynamicSettingsOpenSearchIntegTests.java b/test/framework/src/test/java/org/opensearch/test/SuiteScopedParameterizedDynamicSettingsOpenSearchIntegTests.java new file mode 100644 index 0000000000000..1f9a7cb87ae15 --- /dev/null +++ b/test/framework/src/test/java/org/opensearch/test/SuiteScopedParameterizedDynamicSettingsOpenSearchIntegTests.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.common.settings.Settings; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; + +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.hamcrest.CoreMatchers.equalTo; + +@OpenSearchIntegTestCase.SuiteScopeTestCase +public class SuiteScopedParameterizedDynamicSettingsOpenSearchIntegTests extends ParameterizedDynamicSettingsOpenSearchIntegTestCase { + public SuiteScopedParameterizedDynamicSettingsOpenSearchIntegTests(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + public void testSettings() throws IOException { + final ClusterStateResponse cluster = client().admin().cluster().prepareState().all().get(); + assertThat( + CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(cluster.getState().getMetadata().settings()), + equalTo(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(settings)) + ); + } +} diff --git a/test/framework/src/test/java/org/opensearch/test/SuiteScopedParameterizedStaticSettingsOpenSearchIntegTests.java b/test/framework/src/test/java/org/opensearch/test/SuiteScopedParameterizedStaticSettingsOpenSearchIntegTests.java new file mode 100644 index 0000000000000..36ca14e453158 --- /dev/null +++ b/test/framework/src/test/java/org/opensearch/test/SuiteScopedParameterizedStaticSettingsOpenSearchIntegTests.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.admin.cluster.node.info.NodeInfo; +import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.opensearch.common.settings.Settings; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; + +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.hamcrest.CoreMatchers.equalTo; + +@OpenSearchIntegTestCase.SuiteScopeTestCase +public class SuiteScopedParameterizedStaticSettingsOpenSearchIntegTests extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + public SuiteScopedParameterizedStaticSettingsOpenSearchIntegTests(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + public void testSettings() throws IOException { + final NodesInfoResponse nodes = client().admin().cluster().prepareNodesInfo().get(); + for (final NodeInfo node : nodes.getNodes()) { + assertThat( + CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(node.getSettings()), + equalTo(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(settings)) + ); + } + } +} diff --git a/test/framework/src/test/java/org/opensearch/test/TestScopedParameterizedDynamicSettingsOpenSearchIntegTests.java b/test/framework/src/test/java/org/opensearch/test/TestScopedParameterizedDynamicSettingsOpenSearchIntegTests.java new file mode 100644 index 0000000000000..84caebdb4302f --- /dev/null +++ b/test/framework/src/test/java/org/opensearch/test/TestScopedParameterizedDynamicSettingsOpenSearchIntegTests.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.common.settings.Settings; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; + +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.hamcrest.CoreMatchers.equalTo; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) +public class TestScopedParameterizedDynamicSettingsOpenSearchIntegTests extends ParameterizedDynamicSettingsOpenSearchIntegTestCase { + public TestScopedParameterizedDynamicSettingsOpenSearchIntegTests(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + public void testSettings() throws IOException { + final ClusterStateResponse cluster = client().admin().cluster().prepareState().all().get(); + assertThat( + CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(cluster.getState().getMetadata().settings()), + equalTo(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(settings)) + ); + } +} diff --git a/test/framework/src/test/java/org/opensearch/test/TestScopedParameterizedStaticSettingsOpenSearchIntegTests.java b/test/framework/src/test/java/org/opensearch/test/TestScopedParameterizedStaticSettingsOpenSearchIntegTests.java new file mode 100644 index 0000000000000..6df8ad2c27210 --- /dev/null +++ b/test/framework/src/test/java/org/opensearch/test/TestScopedParameterizedStaticSettingsOpenSearchIntegTests.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.admin.cluster.node.info.NodeInfo; +import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.opensearch.common.settings.Settings; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; + +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.hamcrest.CoreMatchers.equalTo; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) +public class TestScopedParameterizedStaticSettingsOpenSearchIntegTests extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + public TestScopedParameterizedStaticSettingsOpenSearchIntegTests(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + public void testSettings() throws IOException { + final NodesInfoResponse nodes = client().admin().cluster().prepareNodesInfo().get(); + for (final NodeInfo node : nodes.getNodes()) { + assertThat( + CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(node.getSettings()), + equalTo(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(settings)) + ); + } + } +}